repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
dazhangyu123/ACMIL | Step3_WSI_classification.py | [
{
"identifier": "save_model",
"path": "utils/utils.py",
"snippet": "def save_model(conf, epoch, model, optimizer, is_best=False, is_last=False):\n to_save = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch': epoch,\n 'config': conf,\n }\n\n checkpoint_path = os.path.join(conf.ckpt_dir, 'checkpoint-%s.pth' % epoch)\n\n # record the checkpoint with best validation accuracy\n if is_best:\n checkpoint_path = os.path.join(conf.ckpt_dir, 'checkpoint-best.pth')\n\n if is_last:\n checkpoint_path = os.path.join(conf.ckpt_dir, 'checkpoint-last.pth')\n\n torch.save(to_save, checkpoint_path)"
},
{
"identifier": "Struct",
"path": "utils/utils.py",
"snippet": "class Struct:\n def __init__(self, **entries):\n self.__dict__.update(entries)"
},
{
"identifier": "set_seed",
"path": "utils/utils.py",
"snippet": "def set_seed(seed):\n # Set random seed for PyTorch\n torch.manual_seed(seed)\n\n # Set random seed for CUDA if available\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n # Set random seed for NumPy\n np.random.seed(seed)\n\n # Set random seed for random module\n random.seed(seed)\n\n # Set random seed for CuDNN if available\n if torch.backends.cudnn.enabled:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False"
},
{
"identifier": "Wandb_Writer",
"path": "utils/utils.py",
"snippet": "class Wandb_Writer(object):\n\n def __init__(self, project_name='wsi_classification', group_name='baseline', mode='online', name=0):\n self.wandb = wandb.init(project=project_name, group=group_name, entity=\"dazhangyu123\", save_code=True, mode=mode, name='seed%d'%name)\n\n def log(self, var_name, var, commit=True):\n self.wandb.log({var_name: var}, commit=commit)\n\n def summary(self, var_name, var):\n self.wandb.run.summary[var_name] = var"
},
{
"identifier": "build_HDF5_feat_dataset",
"path": "datasets/datasets.py",
"snippet": "def build_HDF5_feat_dataset(file_path, conf):\r\n if conf.dataset == 'camelyon':\r\n train_split, train_names, val_split, val_names, test_split, test_names = split_dataset_camelyon(file_path, conf)\r\n train_split, train_names = generate_fewshot_dataset(train_split, train_names, num_shots=conf.n_shot)\r\n return HDF5_feat_dataset2(train_split, train_names), HDF5_feat_dataset2(val_split, val_names), HDF5_feat_dataset2(test_split, test_names)\r\n elif conf.dataset == 'bracs':\r\n train_split, train_names, val_split, val_names, test_split, test_names = split_dataset_bracs(file_path, conf)\r\n train_split, train_names = generate_fewshot_dataset(train_split, train_names, num_shots=conf.n_shot)\r\n return HDF5_feat_dataset2(train_split, train_names), HDF5_feat_dataset2(val_split, val_names), HDF5_feat_dataset2(test_split, test_names)\r\n elif conf.dataset == 'lct':\r\n train_split, train_names, val_split, val_names, test_split, test_names = split_dataset_lct(file_path, conf)\r\n # save_dir = 'splits/%s' % conf.dataset\r\n # os.makedirs(save_dir, exist_ok=True)\r\n # json.dump({'train_names': train_names, 'val_names': val_names, 'test_names': test_names},\r\n # open(os.path.join(save_dir, 'split_%s.json' % conf.seed), 'w'))\r\n # sys.exit()\r\n train_split, train_names = generate_fewshot_dataset(train_split, train_names, num_shots=conf.n_shot)\r\n return HDF5_feat_dataset2(train_split, train_names), HDF5_feat_dataset2(val_split, val_names), HDF5_feat_dataset2(test_split, test_names)\r"
},
{
"identifier": "TransformWrapper",
"path": "architecture/transformer.py",
"snippet": "class TransformWrapper(nn.Module):\n def __init__(self, conf):\n super(TransformWrapper, self).__init__()\n self.dimreduction = DimReduction(conf.D_feat, conf.D_inner)\n self.sub_attention = nn.ModuleList()\n for i in range(conf.n_token):\n self.sub_attention.append(MutiHeadAttention(conf.D_inner, 8, n_masked_patch=conf.n_masked_patch, mask_drop=conf.mask_drop))\n self.bag_attention = MutiHeadAttention1(conf.D_inner, 8)\n self.q = nn.Parameter(torch.zeros((1, conf.n_token, conf.D_inner)))\n nn.init.normal_(self.q, std=1e-6)\n self.n_class = conf.n_class\n\n self.classifier = nn.ModuleList()\n for i in range(conf.n_token):\n self.classifier.append(Classifier_1fc(conf.D_inner, conf.n_class, 0.0))\n self.n_token = conf.n_token\n self.Slide_classifier = Classifier_1fc(conf.D_inner, conf.n_class, 0.0)\n\n def forward(self, input, use_attention_mask=True):\n input = self.dimreduction(input)\n q = self.q\n k = input\n v = input\n outputs = []\n attns = []\n for i in range(self.n_token):\n feat_i, attn_i = self.sub_attention[i](q[:, i].unsqueeze(0), k, v, use_attention_mask=use_attention_mask)\n outputs.append(self.classifier[i](feat_i))\n attns.append(attn_i)\n\n attns = torch.cat(attns, 1)\n feat_bag = self.bag_attention(v, attns.softmax(dim=-1).mean(1, keepdim=True))\n\n return torch.cat(outputs, dim=0), self.Slide_classifier(feat_bag), attns"
},
{
"identifier": "AttnMIL",
"path": "architecture/transformer.py",
"snippet": "class AttnMIL(nn.Module):\n def __init__(self, conf, D=128, droprate=0):\n super(AttnMIL, self).__init__()\n self.dimreduction = DimReduction(conf.feat_d, conf.D_inner)\n self.attention = Attention_Gated(conf.D_inner, D, 1)\n self.classifier = Classifier_1fc(conf.D_inner, conf.n_class, droprate)\n\n def forward(self, x): ## x: N x L\n x = x[0]\n med_feat = self.dimreduction(x)\n A = self.attention(med_feat) ## K x N\n\n A_out = A\n A = F.softmax(A, dim=1) # softmax over N\n afeat = torch.mm(A, med_feat) ## K x L\n outputs = self.classifier(afeat)\n return outputs, A_out.unsqueeze(0)"
},
{
"identifier": "TransMIL",
"path": "architecture/transMIL.py",
"snippet": "class TransMIL(nn.Module):\r\n def __init__(self, conf):\r\n super(TransMIL, self).__init__()\r\n self.pos_layer = PPEG(dim=conf.D_inner)\r\n self._fc1 = nn.Sequential(nn.Linear(conf.D_feat, conf.D_inner), nn.ReLU())\r\n self.cls_token = nn.Parameter(torch.randn(1, 1, conf.D_inner))\r\n self.n_classes = conf.n_class\r\n self.layer1 = TransLayer(dim=conf.D_inner)\r\n self.layer2 = TransLayer(dim=conf.D_inner)\r\n self.norm = nn.LayerNorm(conf.D_inner)\r\n self._fc2 = nn.Linear(conf.D_inner, conf.n_class)\r\n\r\n def forward(self, input):\r\n h = self._fc1(input) # [B, n, 512]\r\n\r\n # ---->pad\r\n H = h.shape[1]\r\n _H, _W = int(np.ceil(np.sqrt(H))), int(np.ceil(np.sqrt(H)))\r\n add_length = _H * _W - H\r\n h = torch.cat([h, h[:, :add_length, :]], dim=1) # [B, N, 512]\r\n\r\n # ---->cls_token\r\n B = h.shape[0]\r\n cls_tokens = self.cls_token.expand(B, -1, -1).cuda()\r\n h = torch.cat((cls_tokens, h), dim=1)\r\n\r\n # ---->Translayer x1\r\n h = self.layer1(h) # [B, N, 512]\r\n\r\n # ---->PPEG\r\n h = self.pos_layer(h, _H, _W) # [B, N, 512]\r\n\r\n # ---->Translayer x2\r\n h = self.layer2(h) # [B, N, 512]\r\n\r\n # ---->cls_token\r\n h = self.norm(h)[:, 0]\r\n\r\n # ---->predict\r\n logits = self._fc2(h) # [B, n_classes]\r\n # Y_hat = torch.argmax(logits, dim=1)\r\n # Y_prob = F.softmax(logits, dim=1)\r\n # results_dict = {'logits': logits, 'Y_prob': Y_prob, 'Y_hat': Y_hat}\r\n return logits\r"
},
{
"identifier": "train_one_epoch",
"path": "engine.py",
"snippet": "def train_one_epoch(net, criterion, data_loader, optimizer, device, epoch, conf, log_writer=None):\r\n \"\"\"\r\n Trains the given network for one epoch according to given criterions (loss functions)\r\n \"\"\"\r\n\r\n # Set the network to training mode\r\n net.train()\r\n metric_logger = MetricLogger(delimiter=\" \")\r\n metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))\r\n header = 'Epoch: [{}]'.format(epoch)\r\n print_freq = 100\r\n\r\n for data_it, data in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\r\n # for data_it, data in enumerate(data_loader, start=epoch * len(data_loader)):\r\n # Move input batch onto GPU if eager execution is enabled (default), else leave it on CPU\r\n # Data is a dict with keys `input` (patches) and `{task_name}` (labels for given task)\r\n image_patches = data['input'].to(device, dtype=torch.float32)\r\n labels = data['label'].to(device)\r\n coords = data['coords']\r\n\r\n # # Calculate and set new learning rate\r\n adjust_learning_rate(optimizer, epoch + data_it / len(data_loader), conf)\r\n optimizer.zero_grad()\r\n\r\n if conf.arch == 'dsmil':\r\n loss_forward_and_backward_dsmil(net, image_patches, labels, criterion, conf,\r\n device, optimizer, metric_logger, log_writer)\r\n elif conf.arch in ('clam_sb', 'clam_mb'):\r\n loss_forward_and_backward_clam(net, image_patches, labels, criterion, conf,\r\n device, optimizer, metric_logger, log_writer)\r\n elif conf.arch == 'bmil_spvis':\r\n loss_forward_and_backward_bmil(net, image_patches, coords, labels, criterion, conf,\r\n device, optimizer, metric_logger, log_writer)\r\n else:\r\n loss_forward_and_backward(net, image_patches, labels, criterion, conf,\r\n device, optimizer, metric_logger, log_writer)\r\n\r\n optimizer.step()\r"
},
{
"identifier": "evaluate",
"path": "engine.py",
"snippet": "@torch.no_grad()\r\ndef evaluate(net, criterion, data_loader, device, conf, header):\r\n # Set the network to evaluation mode\r\n net.eval()\r\n\r\n y_pred = []\r\n y_true = []\r\n\r\n metric_logger = MetricLogger(delimiter=\" \")\r\n\r\n for data in metric_logger.log_every(data_loader, 100, header):\r\n image_patches = data['input'].to(device, dtype=torch.float32)\r\n labels = data['label'].to(device)\r\n coords = data['coords']\r\n\r\n if conf.arch == 'dsmil':\r\n # Compute loss\r\n ins_preds, bag_preds, attn = net(image_patches)\r\n max_preds, _ = torch.max(ins_preds, 0, keepdim=True)\r\n loss = 0.5 * criterion(max_preds, labels) \\\r\n + 0.5 * criterion(bag_preds, labels)\r\n pred = 0.5 * torch.softmax(max_preds, dim=-1) \\\r\n + 0.5 * torch.softmax(bag_preds, dim=-1)\r\n elif conf.arch == 'bmil_spvis':\r\n coords_array = coords.numpy()[0]\r\n output, Y_prob, Y_hat, _, _ = net(image_patches, coords_array, coords_array[:, 1].max(),\r\n coords_array[:, 0].max(), validation=True)\r\n loss = criterion(output, labels)\r\n pred = torch.softmax(output, dim=-1)\r\n elif conf.arch in ('clam_sb', 'clam_mb'):\r\n output = net(image_patches)\r\n loss = criterion(output, labels)\r\n pred = torch.softmax(output, dim=-1)\r\n else:\r\n # Compute loss\r\n output = net(image_patches)\r\n loss = criterion(output, labels)\r\n pred = torch.softmax(output, dim=-1)\r\n\r\n acc1 = accuracy(pred, labels, topk=(1,))[0]\r\n\r\n metric_logger.update(loss=loss.item())\r\n metric_logger.meters['acc1'].update(acc1.item(), n=labels.shape[0])\r\n\r\n y_pred.append(pred)\r\n y_true.append(labels)\r\n\r\n y_pred = torch.cat(y_pred, dim=0)\r\n y_true = torch.cat(y_true, dim=0)\r\n\r\n AUROC_metric = torchmetrics.AUROC(num_classes=conf.n_class, average='macro').to(device)\r\n AUROC_metric(y_pred, y_true)\r\n auroc = AUROC_metric.compute().item()\r\n F1_metric = torchmetrics.F1Score(num_classes=conf.n_class, average='macro').to(device)\r\n F1_metric(y_pred, y_true)\r\n f1_score = F1_metric.compute().item()\r\n\r\n print('* Acc@1 {top1.global_avg:.3f} loss {losses.global_avg:.3f} auroc {AUROC:.3f} f1_score {F1:.3f}'\r\n .format(top1=metric_logger.acc1, losses=metric_logger.loss, AUROC=auroc, F1=f1_score))\r\n\r\n return auroc, metric_logger.acc1.global_avg, f1_score, metric_logger.loss.global_avg\r"
},
{
"identifier": "MILNet",
"path": "architecture/dsmil.py",
"snippet": "class MILNet(nn.Module):\r\n def __init__(self, i_classifier, b_classifier):\r\n super(MILNet, self).__init__()\r\n self.i_classifier = i_classifier\r\n self.b_classifier = b_classifier\r\n\r\n def forward(self, x, is_train=True):\r\n feats, classes = self.i_classifier(x[0])\r\n # print(feats)\r\n prediction_bag, A, B = self.b_classifier(feats, classes, is_train=is_train)\r\n return classes, prediction_bag, A\r"
},
{
"identifier": "FCLayer",
"path": "architecture/dsmil.py",
"snippet": "class FCLayer(nn.Module):\r\n def __init__(self, in_size, out_size=1):\r\n super(FCLayer, self).__init__()\r\n self.fc = nn.Sequential(nn.Linear(in_size, out_size))\r\n\r\n def forward(self, feats):\r\n x = self.fc(feats)\r\n return feats, x\r"
},
{
"identifier": "BClassifier",
"path": "architecture/dsmil.py",
"snippet": "class BClassifier(nn.Module):\r\n def __init__(self, conf, dropout_v=0.0, nonlinear=True, passing_v=False,\r\n confounder_path=False): # K, L, N\r\n super(BClassifier, self).__init__()\r\n self.n_masked_patch = conf.n_masked_patch\r\n input_size=conf.D_feat\r\n output_class=conf.n_class\r\n if nonlinear:\r\n self.q = nn.Sequential(nn.Linear(input_size, conf.D_inner), nn.ReLU(), nn.Linear(conf.D_inner, 128), nn.Tanh())\r\n else:\r\n self.q = nn.Linear(input_size, conf.D_inner)\r\n if passing_v:\r\n self.v = nn.Sequential(\r\n nn.Dropout(dropout_v),\r\n nn.Linear(input_size, input_size),\r\n nn.ReLU()\r\n )\r\n else:\r\n self.v = nn.Identity()\r\n\r\n ### 1D convolutional layer that can handle multiple class (including binary)\r\n self.fcc = nn.Conv1d(output_class, output_class, kernel_size=input_size)\r\n self.confounder_path = None\r\n if confounder_path:\r\n self.confounder_path = confounder_path\r\n conf_list = []\r\n for i in confounder_path:\r\n conf_list.append(torch.from_numpy(np.load(i)).float())\r\n conf_tensor = torch.cat(conf_list,\r\n 0) # [ k, C, K] k-means, c classes , K-dimension, should concatenate at centers k\r\n conf_tensor_dim = conf_tensor.shape[-1]\r\n self.register_buffer(\"confounder_feat\", conf_tensor)\r\n joint_space_dim = 128\r\n dropout_v = 0.1\r\n self.confounder_W_q = nn.Linear(input_size, joint_space_dim)\r\n self.confounder_W_k = nn.Linear(conf_tensor_dim, joint_space_dim)\r\n self.fcc = nn.Conv1d(output_class, output_class, kernel_size=input_size + conf_tensor_dim)\r\n # self.classifier = nn.Linear(self.L*self.K+in_size, out_size)\r\n self.dropout = nn.Dropout(dropout_v)\r\n\r\n\r\n def forward(self, feats, c, is_train=True): # N x K, N x C\r\n device = feats.device\r\n V = self.v(feats) # N x V, unsorted\r\n Q = self.q(feats).view(feats.shape[0], -1) # N x Q, unsorted\r\n # handle multiple classes without for loop\r\n _, m_indices = torch.sort(c, 0,\r\n descending=True) # sort class scores along the instance dimension, m_indices in shape N x C\r\n # print(m_indices.shape)\r\n m_feats = torch.index_select(feats, dim=0,\r\n index=m_indices[0, :]) # select critical instances, m_feats in shape C x K\r\n q_max = self.q(m_feats) # compute queries of critical instances, q_max in shape C x Q\r\n A = torch.mm(Q, q_max.transpose(0,\r\n 1)) # compute inner product of Q to each entry of q_max, A in shape N x C, each column contains unnormalized attention scores\r\n A = A / torch.sqrt(torch.tensor(Q.shape[1], dtype=torch.float32, device=device)) # normalize attention scores, A in shape N x C,\r\n A = A.transpose(0, 1)\r\n\r\n if self.n_masked_patch > 0 and is_train:\r\n # Get the indices of the top-k largest values\r\n q, c = A.shape\r\n n_masked_patch = min(self.n_masked_patch, c)\r\n _, indices = torch.topk(A, n_masked_patch, dim=-1)\r\n indices = indices.reshape(q, -1)\r\n rand_selected = torch.argsort(torch.rand(*indices.shape), dim=-1)[:, :int(n_masked_patch * 0.5)]\r\n masked_indices = indices[torch.arange(indices.shape[0]).unsqueeze(-1), rand_selected]\r\n random_mask = torch.ones(q, c).to(A.device)\r\n random_mask.scatter_(-1, masked_indices, 0)\r\n A = A.masked_fill(random_mask.reshape(q, -1) == 0, -1e9)\r\n\r\n A_out = A\r\n A = F.softmax(A, dim=-1)\r\n B = torch.mm(A, V) # compute bag representation, B in shape C x V\r\n B = B.view(1, B.shape[0], B.shape[1]) # 1 x C x V\r\n # cls-specific confounder\r\n if self.confounder_path:\r\n if 'agnostic' in self.confounder_path[0]:\r\n device = B.device\r\n bag_q = self.confounder_W_q(B.squeeze(0)) # bs x C x V -- C x V\r\n conf_k = self.confounder_W_k(self.dropout(self.confounder_feat)) # k x V\r\n A = torch.mm(conf_k, bag_q.transpose(0, 1)) # k * C\r\n A = F.softmax(A / torch.sqrt(torch.tensor(conf_k.shape[1], dtype=torch.float32, device=device)),\r\n 0) # normalize attention scores, A in shape N x C,\r\n conf_feats = torch.mm(A.transpose(0, 1),\r\n self.confounder_feat) # compute bag representation, B in shape C x V\r\n B = torch.cat((B, conf_feats.unsqueeze(0)), dim=-1)\r\n elif self.confounder_path: #### cls-agnostic\r\n device = B.device\r\n bag_q = self.confounder_W_q(B.squeeze(0)).unsqueeze(0) # bs x C x V --- C x V ----bs x C x Q\r\n conf_k = self.confounder_W_k(self.confounder_feat.view(-1, B.shape[-1])) # k x C x K ---- C*k x K\r\n conf_k = conf_k.view(self.confounder_feat.shape[0], self.confounder_feat.shape[1],\r\n bag_q.shape[-1]) # C*k x K ---k x C x Q\r\n A = torch.einsum('kcq, bcq -> kcb ', conf_k, bag_q)\r\n # A = torch.mm(conf_k, bag_q.transpose(0, 1))\r\n A = F.softmax(A / torch.sqrt(torch.tensor(conf_k.shape[-1], dtype=torch.float32, device=device)), 0) #\r\n # conf_feats = torch.mm(A.transpose(0, 1), self.confounder_feat) # compute bag representation, B in shape C x V\r\n conf_feats = torch.einsum(' kcb ,kcq-> bcq ', A, self.confounder_feat)\r\n B = torch.cat((B, conf_feats), dim=2)\r\n C = self.fcc(B) # 1 x C x 1\r\n C = C.view(1, -1)\r\n return C, A_out, B\r"
},
{
"identifier": "probabilistic_MIL_Bayes_spvis",
"path": "architecture/bmil.py",
"snippet": "class probabilistic_MIL_Bayes_spvis(nn.Module):\r\n def __init__(self, conf, size_arg=\"small\", top_k=1):\r\n super(probabilistic_MIL_Bayes_spvis, self).__init__()\r\n\r\n # self.size_dict = {\"small\": [1024, 512, 256], \"big\": [1024, 512, 384]}\r\n self.size_dict = {\"small\": [conf.feat_d, 512, 256], \"big\": [conf.feat_d, 512, 384]}\r\n size = self.size_dict[size_arg]\r\n\r\n ard_init = -4.\r\n self.linear1 = nn.Linear(size[0], size[1])\r\n self.linear2a = LinearVDO(size[1], size[2], ard_init=ard_init)\r\n self.linear2b = LinearVDO(size[1], size[2], ard_init=ard_init)\r\n self.linear3 = LinearVDO(size[2], 2, ard_init=ard_init)\r\n\r\n self.gaus_smoothing = GaussianSmoothing(1, 3, 0.5)\r\n\r\n self.classifiers = LinearVDO(size[1], conf.n_class, ard_init=-3.)\r\n\r\n self.dp_0 = nn.Dropout(0.25)\r\n self.dp_a = nn.Dropout(0.25)\r\n self.dp_b = nn.Dropout(0.25)\r\n\r\n self.prior_mu = torch.tensor([-5., 0.])\r\n self.prior_logvar = torch.tensor([-1., 3.])\r\n\r\n initialize_weights(self)\r\n self.top_k = top_k\r\n self.patch_size = conf.patch_size\r\n\r\n def reparameterize(self, mu, logvar):\r\n std = torch.exp(0.5 * logvar)\r\n eps = torch.randn_like(std)\r\n return mu + eps * std\r\n\r\n def kl_logistic_normal(self, mu_pr, mu_pos, logvar_pr, logvar_pos):\r\n return (logvar_pr - logvar_pos) / 2. + (logvar_pos ** 2 + (mu_pr - mu_pos) ** 2) / (2. * logvar_pr ** 2) - 0.5\r\n\r\n def relocate(self):\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n self.linear1 = self.linear1.to(device)\r\n self.linear2a = self.linear2a.to(device)\r\n self.linear2b = self.linear2b.to(device)\r\n self.linear3 = self.linear3.to(device)\r\n\r\n self.dp_0 = self.dp_0.to(device)\r\n self.dp_a = self.dp_a.to(device)\r\n self.dp_b = self.dp_b.to(device)\r\n self.gaus_smoothing = self.gaus_smoothing.to(device)\r\n\r\n self.prior_mu = self.prior_mu.to(device)\r\n self.prior_logvar = self.prior_logvar.to(device)\r\n\r\n self.classifiers = self.classifiers.to(device)\r\n\r\n def forward(self, h, coords, height, width, slide_label=None, validation=False):\r\n h = h[0]\r\n device = h.device\r\n h = F.relu(self.dp_0(self.linear1(h)))\r\n\r\n feat_a = self.dp_a(torch.sigmoid(self.linear2a(h)))\r\n feat_b = self.dp_b(torch.tanh(self.linear2b(h)))\r\n feat = feat_a.mul(feat_b)\r\n params = self.linear3(feat)\r\n\r\n coords = coords // self.patch_size\r\n asign = lambda coord: coord[:, 0] + coord[:, 1] * (width // self.patch_size)\r\n coords = asign(coords)\r\n coords = torch.from_numpy(coords).to(device)\r\n\r\n mu = torch.zeros([1, (height // self.patch_size + 1) * (width // self.patch_size + 1)]).to(device)\r\n logvar = torch.zeros([1, (height // self.patch_size + 1) * (width // self.patch_size + 1)]).to(device)\r\n\r\n mu[:, coords.long()] = params[:, 0]\r\n logvar[:, coords.long()] = params[:, 1]\r\n\r\n mu = mu.view(1, height // self.patch_size + 1, width // self.patch_size + 1)\r\n logvar = logvar.view(1, height // self.patch_size + 1, width // self.patch_size + 1)\r\n\r\n if not validation:\r\n mu_pr = self.prior_mu[slide_label.item()].expand_as(mu)\r\n logvar_pr = self.prior_logvar[slide_label.item()]\r\n kl_div = self.kl_logistic_normal(mu_pr, mu, logvar_pr, logvar)\r\n else:\r\n kl_div = None\r\n\r\n # # no branch\r\n mu = F.pad(mu, (1, 1, 1, 1), mode='constant', value=0)\r\n mu = torch.unsqueeze(mu, dim=0)\r\n mu = self.gaus_smoothing(mu)\r\n\r\n gaus_samples = self.reparameterize(mu, logvar)\r\n gaus_samples = torch.squeeze(gaus_samples, dim=0)\r\n\r\n A = F.sigmoid(gaus_samples)\r\n A = A.view(1, -1)\r\n\r\n patch_A = torch.index_select(A, dim=1, index=coords)\r\n M = torch.mm(patch_A, h) / patch_A.sum()\r\n\r\n logits = self.classifiers(M)\r\n\r\n y_probs = F.softmax(logits, dim=1)\r\n top_instance_idx = torch.topk(y_probs[:, 1], self.top_k, dim=0)[1].view(1, )\r\n top_instance = torch.index_select(logits, dim=0, index=top_instance_idx)\r\n Y_hat = torch.topk(top_instance, 1, dim=1)[1]\r\n Y_prob = F.softmax(top_instance, dim=1)\r\n\r\n if not validation:\r\n return top_instance, Y_prob, Y_hat, kl_div, y_probs, patch_A.view((1, -1))\r\n else:\r\n return top_instance, Y_prob, Y_hat, y_probs, patch_A.view((1, -1))\r"
},
{
"identifier": "CLAM_SB",
"path": "architecture/clam.py",
"snippet": "class CLAM_SB(nn.Module):\r\n def __init__(self, conf, gate=True, size_arg=\"small\", k_sample=8, dropout=True,\r\n instance_loss_fn=nn.CrossEntropyLoss()):\r\n super(CLAM_SB, self).__init__()\r\n n_classes = conf.n_class\r\n self.size_dict = {\"small\": [conf.D_feat, conf.D_inner, 128], \"big\": [conf.D_feat, 512, 384]}\r\n size = self.size_dict[size_arg]\r\n fc = [nn.Linear(size[0], size[1]), nn.ReLU()]\r\n if dropout:\r\n fc.append(nn.Dropout(0.25))\r\n if gate:\r\n attention_net = Attn_Net_Gated(L=size[1], D=size[2], dropout=dropout, n_classes=conf.n_token)\r\n else:\r\n attention_net = Attn_Net(L=size[1], D=size[2], dropout=dropout, n_classes=conf.n_token)\r\n fc.append(attention_net)\r\n self.attention_net = nn.Sequential(*fc)\r\n self.classifiers = nn.Linear(size[1], n_classes)\r\n instance_classifiers = [nn.Linear(size[1], 2) for i in range(n_classes)]\r\n self.instance_classifiers = nn.ModuleList(instance_classifiers)\r\n self.k_sample = k_sample\r\n self.instance_loss_fn = instance_loss_fn\r\n self.n_classes = n_classes\r\n self.subtyping = False\r\n if conf.n_class > 2:\r\n self.subtyping = True\r\n self.n_masked_patch = conf.n_masked_patch\r\n\r\n initialize_weights(self)\r\n\r\n def relocate(self):\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n self.attention_net = self.attention_net.to(device)\r\n self.classifiers = self.classifiers.to(device)\r\n self.instance_classifiers = self.instance_classifiers.to(device)\r\n\r\n @staticmethod\r\n def create_positive_targets(length, device):\r\n return torch.full((length,), 1, device=device).long()\r\n\r\n @staticmethod\r\n def create_negative_targets(length, device):\r\n return torch.full((length,), 0, device=device).long()\r\n\r\n # instance-level evaluation for in-the-class attention branch\r\n def inst_eval(self, A, h, classifier):\r\n device = h.device\r\n if len(A.shape) == 1:\r\n A = A.view(1, -1)\r\n top_p_ids = torch.topk(A, self.k_sample)[1][-1]\r\n top_p = torch.index_select(h, dim=0, index=top_p_ids)\r\n top_n_ids = torch.topk(-A, self.k_sample, dim=1)[1][-1]\r\n top_n = torch.index_select(h, dim=0, index=top_n_ids)\r\n p_targets = self.create_positive_targets(self.k_sample, device)\r\n n_targets = self.create_negative_targets(self.k_sample, device)\r\n\r\n all_targets = torch.cat([p_targets, n_targets], dim=0)\r\n all_instances = torch.cat([top_p, top_n], dim=0)\r\n logits = classifier(all_instances)\r\n all_preds = torch.topk(logits, 1, dim=1)[1].squeeze(1)\r\n instance_loss = self.instance_loss_fn(logits, all_targets)\r\n return instance_loss, all_preds, all_targets\r\n\r\n # instance-level evaluation for out-of-the-class attention branch\r\n def inst_eval_out(self, A, h, classifier):\r\n device = h.device\r\n if len(A.shape) == 1:\r\n A = A.view(1, -1)\r\n top_p_ids = torch.topk(A, self.k_sample)[1][-1]\r\n top_p = torch.index_select(h, dim=0, index=top_p_ids)\r\n p_targets = self.create_negative_targets(self.k_sample, device)\r\n logits = classifier(top_p)\r\n p_preds = torch.topk(logits, 1, dim=1)[1].squeeze(1)\r\n instance_loss = self.instance_loss_fn(logits, p_targets)\r\n return instance_loss, p_preds, p_targets\r\n\r\n def forward(self, h, label=None, instance_eval=False, return_features=False, attention_only=False, is_train=True):\r\n A, h = self.attention_net(h[0]) # NxK\r\n A = torch.transpose(A, -1, -2) # KxN\r\n if attention_only:\r\n return A\r\n\r\n\r\n if self.n_masked_patch > 0 and is_train:\r\n # Get the indices of the top-k largest values\r\n b, q, c = A.shape\r\n n_masked_patch = min(self.n_masked_patch, c)\r\n _, indices = torch.topk(A, n_masked_patch, dim=-1)\r\n indices = indices.reshape(b * q, -1)\r\n rand_selected = torch.argsort(torch.rand(*indices.shape), dim=-1)[:,:int(n_masked_patch * 0.5)]\r\n masked_indices = indices[torch.arange(indices.shape[0]).unsqueeze(-1), rand_selected]\r\n random_mask = torch.ones(b*q, c).to(A.device)\r\n random_mask.scatter_(-1, masked_indices, 0)\r\n A = A.masked_fill(random_mask.reshape(b, q, -1) == 0, -1e9)\r\n\r\n\r\n A_raw = A\r\n A = F.softmax(A, dim=-1) # softmax over N\r\n\r\n if instance_eval:\r\n total_inst_loss = 0.0\r\n all_preds = []\r\n all_targets = []\r\n inst_labels = F.one_hot(label, num_classes=self.n_classes).squeeze() # binarize label\r\n for i in range(len(self.instance_classifiers)):\r\n inst_label = inst_labels[i].item()\r\n classifier = self.instance_classifiers[i]\r\n if inst_label == 1: # in-the-class:\r\n instance_loss, preds, targets = self.inst_eval(A, h, classifier)\r\n all_preds.extend(preds.cpu().numpy())\r\n all_targets.extend(targets.cpu().numpy())\r\n else: # out-of-the-class\r\n if self.subtyping:\r\n instance_loss, preds, targets = self.inst_eval_out(A, h, classifier)\r\n all_preds.extend(preds.cpu().numpy())\r\n all_targets.extend(targets.cpu().numpy())\r\n else:\r\n continue\r\n total_inst_loss += instance_loss\r\n\r\n if self.subtyping:\r\n total_inst_loss /= len(self.instance_classifiers)\r\n\r\n M = torch.mm(A, h)\r\n logits = self.classifiers(M)\r\n if instance_eval:\r\n return logits, total_inst_loss\r\n else:\r\n return logits\r\n # Y_hat = torch.topk(logits, 1, dim=1)[1]\r\n # Y_prob = F.softmax(logits, dim=1)\r\n # if instance_eval:\r\n # results_dict = {'instance_loss': total_inst_loss, 'inst_labels': np.array(all_targets),\r\n # 'inst_preds': np.array(all_preds)}\r\n # else:\r\n # results_dict = {}\r\n # if return_features:\r\n # results_dict.update({'features': M})\r\n # return logits, Y_prob, Y_hat, A_raw, results_dict\r"
},
{
"identifier": "CLAM_MB",
"path": "architecture/clam.py",
"snippet": "class CLAM_MB(CLAM_SB):\r\n def __init__(self, conf, gate=True, size_arg=\"small\", k_sample=8, dropout=True,\r\n instance_loss_fn=nn.CrossEntropyLoss()):\r\n nn.Module.__init__(self)\r\n n_classes = conf.n_class\r\n self.size_dict = {\"small\": [conf.D_feat, conf.D_inner, 128], \"big\": [conf.D_feat, 512, 384]}\r\n size = self.size_dict[size_arg]\r\n fc = [nn.Linear(size[0], size[1]), nn.ReLU()]\r\n if dropout:\r\n fc.append(nn.Dropout(0.25))\r\n if gate:\r\n attention_net = Attn_Net_Gated(L=size[1], D=size[2], dropout=dropout, n_classes=n_classes)\r\n else:\r\n attention_net = Attn_Net(L=size[1], D=size[2], dropout=dropout, n_classes=n_classes)\r\n fc.append(attention_net)\r\n self.attention_net = nn.Sequential(*fc)\r\n bag_classifiers = [nn.Linear(size[1], 1) for i in\r\n range(n_classes)] # use an indepdent linear layer to predict each class\r\n self.classifiers = nn.ModuleList(bag_classifiers)\r\n instance_classifiers = [nn.Linear(size[1], 2) for i in range(n_classes)]\r\n self.instance_classifiers = nn.ModuleList(instance_classifiers)\r\n self.k_sample = k_sample\r\n self.instance_loss_fn = instance_loss_fn\r\n self.n_classes = n_classes\r\n self.subtyping = False\r\n if conf.n_class > 2:\r\n self.subtyping = True\r\n initialize_weights(self)\r\n\r\n def forward(self, h, label=None, instance_eval=False, return_features=False, attention_only=False):\r\n device = h.device\r\n h = h[0]\r\n A, h = self.attention_net(h) # NxK\r\n A = torch.transpose(A, 1, 0) # KxN\r\n if attention_only:\r\n return A\r\n A_raw = A\r\n A = softmax_one(A, dim=1) # softmax over N\r\n\r\n if instance_eval:\r\n total_inst_loss = 0.0\r\n all_preds = []\r\n all_targets = []\r\n inst_labels = F.one_hot(label, num_classes=self.n_classes).squeeze() # binarize label\r\n for i in range(len(self.instance_classifiers)):\r\n inst_label = inst_labels[i].item()\r\n classifier = self.instance_classifiers[i]\r\n if inst_label == 1: # in-the-class:\r\n instance_loss, preds, targets = self.inst_eval(A[i], h, classifier)\r\n all_preds.extend(preds.cpu().numpy())\r\n all_targets.extend(targets.cpu().numpy())\r\n else: # out-of-the-class\r\n if self.subtyping:\r\n instance_loss, preds, targets = self.inst_eval_out(A[i], h, classifier)\r\n all_preds.extend(preds.cpu().numpy())\r\n all_targets.extend(targets.cpu().numpy())\r\n else:\r\n continue\r\n total_inst_loss += instance_loss\r\n\r\n if self.subtyping:\r\n total_inst_loss /= len(self.instance_classifiers)\r\n\r\n M = torch.mm(A, h)\r\n logits = torch.empty(1, self.n_classes).float().to(device)\r\n for c in range(self.n_classes):\r\n logits[0, c] = self.classifiers[c](M[c])\r\n if instance_eval:\r\n return logits, total_inst_loss\r\n else:\r\n return logits\r"
},
{
"identifier": "mean_max",
"path": "modules/mean_max.py",
"snippet": "def initialize_weights(module):\n def __init__(self,conf,dropout=True,act='relu',test=False):\n def forward(self,x):\n def __init__(self,conf,dropout=True,act='relu',test=False):\n def forward(self,x):\nclass MeanMIL(nn.Module):\nclass MaxMIL(nn.Module):"
}
] | import sys
import os
import yaml
import argparse
import torch
from pprint import pprint
from torch import nn
from torch.utils.data import DataLoader
from utils.utils import save_model, Struct, set_seed, Wandb_Writer
from datasets.datasets import build_HDF5_feat_dataset
from architecture.transformer import TransformWrapper, AttnMIL
from architecture.transMIL import TransMIL
from engine import train_one_epoch, evaluate
from architecture.dsmil import MILNet, FCLayer, BClassifier
from architecture.bmil import probabilistic_MIL_Bayes_spvis
from architecture.clam import CLAM_SB, CLAM_MB
from modules import mean_max
| 10,059 |
# !/usr/bin/env python
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_arguments():
parser = argparse.ArgumentParser('Patch classification training', add_help=False)
parser.add_argument('--config', dest='config', default='config/camelyon_medical_ssl_config.yml',
help='settings of Tip-Adapter in yaml format')
parser.add_argument(
"--eval-only", action="store_true", help="evaluation only"
)
parser.add_argument(
"--seed", type=int, default=1, help="set the random seed to ensure reproducibility"
)
parser.add_argument('--wandb_mode', default='disabled', choices=['offline', 'online', 'disabled'],
help='the model of wandb')
parser.add_argument(
"--n_shot", type=int, default=-1, help="number of wsi images"
)
parser.add_argument(
"--w_loss", type=float, default=1.0, help="number of query token"
)
parser.add_argument(
"--arch", type=str, default='transmil', choices=['transmil', 'clam_sb', 'clam_mb', 'attnmil',
'selfattn', 'dsmil', 'bmil_spvis', 'meanmil', 'maxmil'], help="number of query token"
)
parser.add_argument(
"--n_token", type=int, default=1, help="number of query token"
)
parser.add_argument(
"--n_masked_patch", type=int, default=0, help="whether use adversarial mask"
)
args = parser.parse_args()
return args
def main():
# Load config file
args = get_arguments()
# get config
with open(args.config, "r") as ymlfile:
c = yaml.load(ymlfile, Loader=yaml.FullLoader)
c.update(vars(args))
conf = Struct(**c)
group_name = 'ds_%s_%s_arch_%s_%sepochs' % (conf.dataset, conf.pretrain, conf.arch, conf.train_epoch)
log_writer = Wandb_Writer(group_name=group_name, mode=args.wandb_mode, name=args.seed)
conf.ckpt_dir = log_writer.wandb.dir[:-5] + 'saved_models'
if conf.wandb_mode == 'disabled':
conf.ckpt_dir = os.path.join(conf.ckpt_dir, group_name, str(args.seed))
os.makedirs(conf.ckpt_dir, exist_ok=True)
print("Used config:");
pprint(vars(conf));
# Prepare dataset
set_seed(args.seed)
# define datasets and dataloaders
train_data, val_data, test_data = build_HDF5_feat_dataset(os.path.join(conf.data_dir, 'patch_feats_pretrain_%s.h5'%conf.pretrain), conf)
train_loader = DataLoader(train_data, batch_size=conf.B, shuffle=True,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=True)
val_loader = DataLoader(val_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
test_loader = DataLoader(test_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
# define network
if conf.arch == 'transmil':
net = TransMIL(conf)
elif conf.arch == 'selfattn':
net = TransformWrapper(conf)
elif conf.arch == 'clam_sb':
net = CLAM_SB(conf).to(device)
elif conf.arch == 'clam_mb':
|
# !/usr/bin/env python
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_arguments():
parser = argparse.ArgumentParser('Patch classification training', add_help=False)
parser.add_argument('--config', dest='config', default='config/camelyon_medical_ssl_config.yml',
help='settings of Tip-Adapter in yaml format')
parser.add_argument(
"--eval-only", action="store_true", help="evaluation only"
)
parser.add_argument(
"--seed", type=int, default=1, help="set the random seed to ensure reproducibility"
)
parser.add_argument('--wandb_mode', default='disabled', choices=['offline', 'online', 'disabled'],
help='the model of wandb')
parser.add_argument(
"--n_shot", type=int, default=-1, help="number of wsi images"
)
parser.add_argument(
"--w_loss", type=float, default=1.0, help="number of query token"
)
parser.add_argument(
"--arch", type=str, default='transmil', choices=['transmil', 'clam_sb', 'clam_mb', 'attnmil',
'selfattn', 'dsmil', 'bmil_spvis', 'meanmil', 'maxmil'], help="number of query token"
)
parser.add_argument(
"--n_token", type=int, default=1, help="number of query token"
)
parser.add_argument(
"--n_masked_patch", type=int, default=0, help="whether use adversarial mask"
)
args = parser.parse_args()
return args
def main():
# Load config file
args = get_arguments()
# get config
with open(args.config, "r") as ymlfile:
c = yaml.load(ymlfile, Loader=yaml.FullLoader)
c.update(vars(args))
conf = Struct(**c)
group_name = 'ds_%s_%s_arch_%s_%sepochs' % (conf.dataset, conf.pretrain, conf.arch, conf.train_epoch)
log_writer = Wandb_Writer(group_name=group_name, mode=args.wandb_mode, name=args.seed)
conf.ckpt_dir = log_writer.wandb.dir[:-5] + 'saved_models'
if conf.wandb_mode == 'disabled':
conf.ckpt_dir = os.path.join(conf.ckpt_dir, group_name, str(args.seed))
os.makedirs(conf.ckpt_dir, exist_ok=True)
print("Used config:");
pprint(vars(conf));
# Prepare dataset
set_seed(args.seed)
# define datasets and dataloaders
train_data, val_data, test_data = build_HDF5_feat_dataset(os.path.join(conf.data_dir, 'patch_feats_pretrain_%s.h5'%conf.pretrain), conf)
train_loader = DataLoader(train_data, batch_size=conf.B, shuffle=True,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=True)
val_loader = DataLoader(val_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
test_loader = DataLoader(test_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
# define network
if conf.arch == 'transmil':
net = TransMIL(conf)
elif conf.arch == 'selfattn':
net = TransformWrapper(conf)
elif conf.arch == 'clam_sb':
net = CLAM_SB(conf).to(device)
elif conf.arch == 'clam_mb':
| net = CLAM_MB(conf).to(device)
| 15 | 2023-11-12 14:07:34+00:00 | 12k |
zhang-tao-whu/DVIS_Plus | dvis_Plus/data_video/ytvis_eval.py | [
{
"identifier": "YTVOS",
"path": "dvis_Plus/data_video/datasets/ytvis_api/ytvos.py",
"snippet": "class YTVOS:\n def __init__(self, annotation_file=None):\n \"\"\"\n Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n :param annotation_file (str): location of annotation file\n :param image_folder (str): location to the folder that hosts images.\n :return:\n \"\"\"\n # load dataset\n self.dataset,self.anns,self.cats,self.vids = dict(),dict(),dict(),dict()\n self.vidToAnns, self.catToVids = defaultdict(list), defaultdict(list)\n if not annotation_file == None:\n print('loading annotations into memory...')\n tic = time.time()\n dataset = json.load(open(annotation_file, 'r'))\n assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))\n print('Done (t={:0.2f}s)'.format(time.time()- tic))\n self.dataset = dataset\n self.createIndex()\n\n def createIndex(self):\n # create index\n print('creating index...')\n anns, cats, vids = {}, {}, {}\n vidToAnns,catToVids = defaultdict(list),defaultdict(list)\n if 'annotations' in self.dataset and self.dataset['annotations'] is not None:\n for ann in self.dataset['annotations']:\n vidToAnns[ann['video_id']].append(ann)\n anns[ann['id']] = ann\n\n if 'videos' in self.dataset:\n for vid in self.dataset['videos']:\n vids[vid['id']] = vid\n\n if 'categories' in self.dataset:\n for cat in self.dataset['categories']:\n cats[cat['id']] = cat\n\n if 'annotations' in self.dataset and self.dataset['annotations'] is not None and 'categories' in self.dataset:\n for ann in self.dataset['annotations']:\n catToVids[ann['category_id']].append(ann['video_id'])\n\n print('index created!')\n\n # create class members\n self.anns = anns\n self.vidToAnns = vidToAnns\n self.catToVids = catToVids\n self.vids = vids\n self.cats = cats\n\n def info(self):\n \"\"\"\n Print information about the annotation file.\n :return:\n \"\"\"\n for key, value in self.dataset['info'].items():\n print('{}: {}'.format(key, value))\n\n def getAnnIds(self, vidIds=[], catIds=[], areaRng=[], iscrowd=None):\n \"\"\"\n Get ann ids that satisfy given filter conditions. default skips that filter\n :param vidIds (int array) : get anns for given vids\n catIds (int array) : get anns for given cats\n areaRng (float array) : get anns for given area range (e.g. [0 inf])\n iscrowd (boolean) : get anns for given crowd label (False or True)\n :return: ids (int array) : integer array of ann ids\n \"\"\"\n vidIds = vidIds if _isArrayLike(vidIds) else [vidIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(vidIds) == len(catIds) == len(areaRng) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(vidIds) == 0:\n lists = [self.vidToAnns[vidId] for vidId in vidIds if vidId in self.vidToAnns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.dataset['annotations']\n anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]\n anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['avg_area'] > areaRng[0] and ann['avg_area'] < areaRng[1]]\n if not iscrowd == None:\n ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]\n else:\n ids = [ann['id'] for ann in anns]\n return ids\n\n def getCatIds(self, catNms=[], supNms=[], catIds=[]):\n \"\"\"\n filtering parameters. default skips that filter.\n :param catNms (str array) : get cats for given cat names\n :param supNms (str array) : get cats for given supercategory names\n :param catIds (int array) : get cats for given cat ids\n :return: ids (int array) : integer array of cat ids\n \"\"\"\n catNms = catNms if _isArrayLike(catNms) else [catNms]\n supNms = supNms if _isArrayLike(supNms) else [supNms]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(catNms) == len(supNms) == len(catIds) == 0:\n cats = self.dataset['categories']\n else:\n cats = self.dataset['categories']\n cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]\n cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]\n cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]\n ids = [cat['id'] for cat in cats]\n return ids\n\n def getVidIds(self, vidIds=[], catIds=[]):\n '''\n Get vid ids that satisfy given filter conditions.\n :param vidIds (int array) : get vids for given ids\n :param catIds (int array) : get vids with all given cats\n :return: ids (int array) : integer array of vid ids\n '''\n vidIds = vidIds if _isArrayLike(vidIds) else [vidIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(vidIds) == len(catIds) == 0:\n ids = self.vids.keys()\n else:\n ids = set(vidIds)\n for i, catId in enumerate(catIds):\n if i == 0 and len(ids) == 0:\n ids = set(self.catToVids[catId])\n else:\n ids &= set(self.catToVids[catId])\n return list(ids)\n\n def loadAnns(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying anns\n :return: anns (object array) : loaded ann objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.anns[id] for id in ids]\n elif type(ids) == int:\n return [self.anns[ids]]\n\n def loadCats(self, ids=[]):\n \"\"\"\n Load cats with the specified ids.\n :param ids (int array) : integer ids specifying cats\n :return: cats (object array) : loaded cat objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.cats[id] for id in ids]\n elif type(ids) == int:\n return [self.cats[ids]]\n\n def loadVids(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying vid\n :return: vids (object array) : loaded vid objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.vids[id] for id in ids]\n elif type(ids) == int:\n return [self.vids[ids]]\n\n\n def loadRes(self, resFile):\n \"\"\"\n Load result file and return a result api object.\n :param resFile (str) : file name of result file\n :return: res (obj) : result api object\n \"\"\"\n res = YTVOS()\n res.dataset['videos'] = [img for img in self.dataset['videos']]\n\n print('Loading and preparing results...')\n tic = time.time()\n if type(resFile) == str or (PYTHON_VERSION == 2 and type(resFile) == unicode):\n anns = json.load(open(resFile))\n elif type(resFile) == np.ndarray:\n anns = self.loadNumpyAnnotations(resFile)\n else:\n anns = resFile\n assert type(anns) == list, 'results in not an array of objects'\n annsVidIds = [ann['video_id'] for ann in anns]\n assert set(annsVidIds) == (set(annsVidIds) & set(self.getVidIds())), \\\n 'Results do not correspond to current coco set'\n if 'segmentations' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n ann['areas'] = []\n if not 'bboxes' in ann:\n ann['bboxes'] = []\n for seg in ann['segmentations']:\n # now only support compressed RLE format as segmentation results\n if seg:\n ann['areas'].append(maskUtils.area(seg))\n if len(ann['bboxes']) < len(ann['areas']):\n ann['bboxes'].append(maskUtils.toBbox(seg))\n else:\n ann['areas'].append(None)\n if len(ann['bboxes']) < len(ann['areas']):\n ann['bboxes'].append(None)\n ann['id'] = id+1\n l = [a for a in ann['areas'] if a]\n if len(l)==0:\n ann['avg_area'] = 0\n else:\n ann['avg_area'] = np.array(l).mean() \n ann['iscrowd'] = 0\n print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res\n\n def annToRLE(self, ann, frameId):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE to RLE.\n :return: binary mask (numpy 2D array)\n \"\"\"\n t = self.vids[ann['video_id']]\n h, w = t['height'], t['width']\n segm = ann['segmentations'][frameId]\n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = segm\n return rle\n\n def annToMask(self, ann, frameId):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.\n :return: binary mask (numpy 2D array)\n \"\"\"\n rle = self.annToRLE(ann, frameId)\n m = maskUtils.decode(rle)\n return m"
},
{
"identifier": "YTVOSeval",
"path": "dvis_Plus/data_video/datasets/ytvis_api/ytvoseval.py",
"snippet": "class YTVOSeval:\n # Interface for evaluating video instance segmentation on the YouTubeVIS dataset.\n #\n # The usage for YTVOSeval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = YTVOSeval(cocoGt,cocoDt); # initialize YTVOSeval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n print('iouType not specified. use default iouType segm')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.params = {} # evaluation parameters\n self.evalVids = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n if not cocoGt is None:\n self.params.vidIds = sorted(cocoGt.getVidIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n for i, a in enumerate(ann['segmentations']):\n if a:\n rle = coco.annToRLE(ann, i)\n ann['segmentations'][i] = rle\n l = [a for a in ann['areas'] if a]\n if len(l)==0:\n ann['avg_area'] = 0\n else:\n ann['avg_area'] = np.array(l).mean() \n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(vidIds=p.vidIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(vidIds=p.vidIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(vidIds=p.vidIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(vidIds=p.vidIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['video_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['video_id'], dt['category_id']].append(dt)\n self.evalVids = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalVids\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.vidIds = list(np.unique(p.vidIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {(vidId, catId): computeIoU(vidId, catId) \\\n for vidId in p.vidIds\n for catId in catIds}\n\n evaluateVid = self.evaluateVid\n maxDet = p.maxDets[-1]\n \n \n self.evalImgs = [evaluateVid(vidId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for vidId in p.vidIds\n ]\n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU(self, vidId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[vidId,catId]\n dt = self._dts[vidId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[vidId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[vidId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentations'] for g in gt]\n d = [d['segmentations'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bboxes'] for g in gt]\n d = [d['bboxes'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n #ious = maskUtils.iou(d,g,iscrowd)\n def iou_seq(d_seq, g_seq):\n i = .0\n u = .0\n for d, g in zip(d_seq, g_seq):\n if d and g:\n i += maskUtils.area(maskUtils.merge([d, g], True))\n u += maskUtils.area(maskUtils.merge([d, g], False))\n elif not d and g:\n u += maskUtils.area(g)\n elif d and not g:\n u += maskUtils.area(d)\n if not u > .0:\n print(\"Mask sizes in video {} and category {} may not match!\".format(vidId, catId))\n iou = i / u if u > .0 else .0\n return iou\n ious = np.zeros([len(d), len(g)])\n for i, j in np.ndindex(ious.shape):\n ious[i, j] = iou_seq(d[i], g[j])\n #print(vidId, catId, ious.shape, ious)\n return ious\n\n def computeOks(self, imgId, catId):\n p = self.params\n # dimention here should be Nxm\n gts = self._gts[imgId, catId]\n dts = self._dts[imgId, catId]\n inds = np.argsort([-d['score'] for d in dts], kind='mergesort')\n dts = [dts[i] for i in inds]\n if len(dts) > p.maxDets[-1]:\n dts = dts[0:p.maxDets[-1]]\n # if len(gts) == 0 and len(dts) == 0:\n if len(gts) == 0 or len(dts) == 0:\n return []\n ious = np.zeros((len(dts), len(gts)))\n sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0\n vars = (sigmas * 2)**2\n k = len(sigmas)\n # compute oks between each detection and ground truth object\n for j, gt in enumerate(gts):\n # create bounds for ignore regions(double the gt bbox)\n g = np.array(gt['keypoints'])\n xg = g[0::3]; yg = g[1::3]; vg = g[2::3]\n k1 = np.count_nonzero(vg > 0)\n bb = gt['bbox']\n x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2\n y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2\n for i, dt in enumerate(dts):\n d = np.array(dt['keypoints'])\n xd = d[0::3]; yd = d[1::3]\n if k1>0:\n # measure the per-keypoint distance if keypoints visible\n dx = xd - xg\n dy = yd - yg\n else:\n # measure minimum distance to keypoints in (x0,y0) & (x1,y1)\n z = np.zeros((k))\n dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)\n dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)\n e = (dx**2 + dy**2) / vars / (gt['avg_area']+np.spacing(1)) / 2\n if k1 > 0:\n e=e[vg > 0]\n ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]\n return ious\n\n def evaluateVid(self, vidId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[vidId,catId]\n dt = self._dts[vidId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[vidId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[vidId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['avg_area']<aRng[0] or g['avg_area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[vidId, catId][:, gtind] if len(self.ious[vidId, catId]) > 0 else self.ious[vidId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['avg_area']<aRng[0] or d['avg_area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'video_id': vidId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.vidIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.vidIds) if i in setI]\n I0 = len(_pe.vidIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=.75)\n stats[3] = _summarize(1, maxDets=20, areaRng='medium')\n stats[4] = _summarize(1, maxDets=20, areaRng='large')\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=.75)\n stats[8] = _summarize(0, maxDets=20, areaRng='medium')\n stats[9] = _summarize(0, maxDets=20, areaRng='large')\n return stats\n if not self.eval:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()"
}
] | import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import pycocotools.mask as mask_util
import torch
import detectron2.utils.comm as comm
from collections import OrderedDict
from .datasets.ytvis_api.ytvos import YTVOS
from .datasets.ytvis_api.ytvoseval import YTVOSeval
from tabulate import tabulate
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.evaluation import DatasetEvaluator
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table | 10,681 | # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC
class YTVISEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
use_fast_impl=True,
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file in torch serialization
format that contains all the raw original predictions.
2. "coco_instances_results.json" a json file in COCO's result
format.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
if tasks is not None and isinstance(tasks, CfgNode):
self._logger.warning(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
| # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC
class YTVISEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
use_fast_impl=True,
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file in torch serialization
format that contains all the raw original predictions.
2. "coco_instances_results.json" a json file in COCO's result
format.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
if tasks is not None and isinstance(tasks, CfgNode):
self._logger.warning(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()): | self._ytvis_api = YTVOS(json_file) | 0 | 2023-11-14 10:55:11+00:00 | 12k |
ej0cl6/TextEE | TextEE/models/AMRIE/E2Emodel.py | [
{
"identifier": "Graph",
"path": "TextEE/models/AMRIE/graph.py",
"snippet": "class Graph(object):\n def __init__(self, entities, triggers, relations, roles, vocabs, mentions=None):\n \"\"\"\n :param entities (list): A list of entities represented as a tuple of\n (start_offset, end_offset, label_idx). end_offset = the index of the end\n token + 1.\n :param triggers (list): A list of triggers represented as a tuple of\n (start_offset, end_offset, label_idx). end_offset = the index of the end\n token + 1.\n :param relations (list): A list of relations represented as a tuple of\n (entity_idx_1, entity_idx_2, label_idx). As we do not consider the\n direction of relations (list), it is better to have entity_idx_1 <\n entity_idx2.\n :param roles: A list of roles represented as a tuple of (trigger_idx_1,\n entity_idx_2, label_idx).\n :param vocabs (dict): Label type vocabularies.\n \"\"\"\n self.entities = entities\n self.triggers = triggers\n self.relations = relations\n self.roles = roles\n self.vocabs = vocabs\n self.mentions = [] if mentions is None else mentions\n\n self.entity_num = len(entities)\n self.trigger_num = len(triggers)\n self.relation_num = len(relations)\n self.role_num = len(roles)\n self.graph_local_score = 0.0\n\n # subscores\n self.entity_scores = []\n self.trigger_scores = []\n self.relation_scores = []\n self.role_scores = []\n\n def __eq__(self, other):\n if isinstance(other, Graph):\n equal = (self.entities == other.entities and\n self.triggers == other.triggers and\n self.relations == other.relations and\n self.roles == other.roles and\n self.mentions == other.mentions)\n return equal\n return False\n\n\n def to_dict(self):\n \"\"\"Convert a graph to a dict object\n :return (dict): A dictionary representing the graph, where label indices\n have been replaced with label strings.\n \"\"\"\n entity_itos = {i: s for s, i in self.vocabs['entity_type'].items()}\n trigger_itos = {i: s for s, i in self.vocabs['event_type'].items()}\n relation_itos = {i: s for s, i in self.vocabs['relation_type'].items()}\n role_itos = {i: s for s, i in self.vocabs['role_type'].items()}\n mention_itos = {i: s for s, i in self.vocabs['mention_type'].items()}\n\n entities = [[i, j, entity_itos[k], mention_itos[l]] for (i, j, k), (_, _, l) in zip(self.entities, self.mentions)]\n triggers = [[i, j, trigger_itos[k]] for (i, j, k) in self.triggers]\n relations = [[i, j, relation_itos[k]] for (i, j, k) in self.relations]\n roles = [[i, j, role_itos[k]] for (i, j, k) in self.roles]\n\n return {\n 'entities': entities,\n 'triggers': triggers,\n 'relations': relations,\n 'roles': roles,\n }\n\n def __str__(self):\n return str(self.to_dict())\n\n def copy(self):\n \"\"\"Make a copy of the graph\n :return (Graph): a copy of the current graph.\n \"\"\"\n graph = Graph(\n entities=self.entities.copy(),\n triggers=self.triggers.copy(),\n relations=self.relations.copy(),\n roles=self.roles.copy(),\n mentions=self.mentions.copy(),\n vocabs=self.vocabs\n )\n graph.graph_local_score = self.graph_local_score\n graph.entity_scores = self.entity_scores\n graph.trigger_scores = self.trigger_scores\n graph.relation_scores = self.relation_scores\n graph.role_scores = self.role_scores\n return graph\n\n def clean(self, relation_directional=False, symmetric_relations=None):\n\n entities = [(i, j, k, l) for (i, j, k), l in zip(self.entities, self.entity_scores)]\n triggers = [(i, j, k, l) for (i, j, k), l in zip(self.triggers, self.trigger_scores)]\n relations = [(i, j, k, l) for (i, j, k), l in zip(self.relations, self.relation_scores)]\n roles = [(i, j, k, l) for (i, j, k), l in zip(self.roles, self.role_scores)]\n\n # clean relations\n if relation_directional and symmetric_relations:\n relation_itos = {i: s for s, i in self.vocabs['relation_type'].items()}\n # relations = []\n relations_tmp = []\n # for i, j, k in self.relations:\n for i, j, k, l in relations:\n if relation_itos[k] not in symmetric_relations:\n # relations.append((i, j, k))\n relations_tmp.append((i, j, k, l))\n else:\n if j < i:\n i, j = j, i\n relations_tmp.append((i, j, k, l))\n # self.relations = relations\n relations = relations_tmp\n\n self.entities = [(i, j, k) for i, j, k, _ in entities]\n self.entity_scores = [l for _, _, _, l in entities]\n self.triggers = [(i, j, k) for i, j, k, _ in triggers]\n self.trigger_scores = [l for _, _, _, l in triggers]\n self.relations = [(i, j, k) for i, j, k, _ in relations]\n self.relation_scores = [l for _, _, _, l in relations]\n self.roles = [(i, j, k) for i, j, k, _ in roles]\n self.role_scores = [l for _, _, _, l in roles]\n\n def add_entity(self, start, end, label, score=0, score_norm=0):\n \"\"\"Add an entity mention to the graph.\n :param start (int): Start token offset of the entity mention.\n :param end (int): End token offset of the entity mention + 1.\n :param label (int): Index of the entity type label.\n :param score (float): Label score.\n \"\"\"\n self.entities.append((start, end, label))\n self.entity_num = len(self.entities)\n self.graph_local_score += score\n self.entity_scores.append(score_norm)\n\n def add_trigger(self, start, end, label, score=0, score_norm=0):\n \"\"\"Add an event trigger to the graph.\n :param start (int): Start token offset of the trigger.\n :param end (int): End token offset of the trigger + 1.\n :param label (int): Index of the event type label.\n :param score (float): Label score.\n \"\"\"\n self.triggers.append((start, end, label))\n self.trigger_num = len(self.triggers)\n self.graph_local_score += score\n self.trigger_scores.append(score_norm)\n\n def add_relation(self, idx1, idx2, label, score=0, score_norm=0):\n \"\"\"Add a relation edge to the graph.\n :param idx1 (int): Index of the entity node 1.\n :param idx2 (int): Index of the entity node 2.\n :param label (int): Index of the relation type label.\n :param score (float): Label score.\n \"\"\"\n # assert idx1 < self.entity_num and idx2 < self.entity_num\n if label:\n self.relations.append((idx1, idx2, label))\n self.relation_scores.append(score_norm)\n self.relation_num = len(self.relations)\n self.graph_local_score += score\n\n def add_role(self, idx1, idx2, label, score=0, score_norm=0):\n \"\"\"Add an event-argument link edge to the graph.\n :param idx1 (int): Index of the trigger node.\n :param idx2 (int): Index of the entity node.\n :param label (int): Index of the role label.\n :param score (float): Label score.\n \"\"\"\n # assert idx1 < self.trigger_num and idx2 < self.entity_num\n # self.roles.append((idx1, idx2, label))\n if label:\n self.roles.append((idx1, idx2, label))\n self.role_scores.append(score_norm)\n self.role_num = len(self.roles)\n self.graph_local_score += score\n\n @staticmethod\n def empty_graph(vocabs):\n \"\"\"Create a graph without any node and edge.\n :param vocabs (dict): Vocabulary object.\n \"\"\"\n return Graph([], [], [], [], vocabs)\n\n def to_label_idxs(self, max_entity_num, max_trigger_num,\n relation_directional=False,\n symmetric_relation_idxs=None):\n \"\"\"Generate label index tensors (which are actually list objects not\n Pytorch tensors) to gather calculated scores.\n :param max_entity_num: Max entity number of the batch.\n :param max_trigger_num: Max trigger number of the batch.\n :return: Index and mask tensors.\n \"\"\"\n entity_idxs = [i[-1] for i in self.entities] + [0] * (max_entity_num - self.entity_num)\n entity_mask = [1] * self.entity_num + [0] * (max_entity_num - self.entity_num)\n\n trigger_idxs = [i[-1] for i in self.triggers] + [0] * (max_trigger_num - self.trigger_num)\n trigger_mask = [1] * self.trigger_num + [0] * (max_trigger_num - self.trigger_num)\n\n relation_idxs = [0] * max_entity_num * max_entity_num\n relation_mask = [1 if i < self.entity_num and j < self.entity_num and i != j else 0\n for i in range(max_entity_num) for j in range(max_entity_num)]\n for i, j, relation in self.relations:\n relation_idxs[i * max_entity_num + j] = relation\n if not relation_directional:\n relation_idxs[j * max_entity_num + i] = relation\n if relation_directional and symmetric_relation_idxs and relation in symmetric_relation_idxs:\n relation_idxs[j * max_entity_num + i] = relation\n \n\n role_idxs = [0] * max_trigger_num * max_entity_num\n for i, j, role in self.roles:\n role_idxs[i * max_entity_num + j] = role\n role_mask = [1 if i < self.trigger_num and j < self.entity_num else 0\n for i in range(max_trigger_num) for j in range(max_entity_num)]\n\n return (\n entity_idxs, entity_mask, trigger_idxs, trigger_mask,\n relation_idxs, relation_mask, role_idxs, role_mask,\n )"
},
{
"identifier": "generate_global_feature_vector",
"path": "TextEE/models/AMRIE/global_feature.py",
"snippet": "def generate_global_feature_vector(graph,\n global_feature_maps,\n features=None):\n role_role_map = global_feature_maps['role_role']\n role_role_vec = np.zeros(len((role_role_map)))\n role_entity_map = global_feature_maps['role_entity']\n role_entity_vec = np.zeros(len(role_entity_map))\n event_role_num_map = global_feature_maps['event_role_num']\n event_role_num_vec = np.zeros(len(event_role_num_map))\n multi_role_map = global_feature_maps['multi_role']\n multi_role_vec = np.zeros(len(multi_role_map))\n event_role_event_role_map = global_feature_maps['event_role_event_role']\n event_role_event_role_vec = np.zeros(len(event_role_event_role_map))\n relation_entity_entity_map = global_feature_maps['relation_entity_entity']\n relation_entity_entity_vec = np.zeros(len(relation_entity_entity_map))\n relation_entity_map = global_feature_maps['relation_entity']\n relation_entity_vec = np.zeros(len(relation_entity_map))\n relation_role_role_map = global_feature_maps['relation_role_role']\n relation_role_role_vec = np.zeros(len(relation_role_role_map))\n multi_relation_map = global_feature_maps['multi_relation']\n multi_relation_vec = np.zeros(len(multi_relation_map))\n relation_relation_map = global_feature_maps['relation_relation']\n relation_relation_vec = np.zeros(len(relation_relation_map))\n multi_event_map = global_feature_maps['multi_event']\n multi_event_vec = np.zeros(len(multi_event_map))\n\n # event argument role related features\n entity_roles = [[] for _ in range(graph.entity_num)]\n entity_event_role = [[] for _ in range(graph.entity_num)]\n event_role_count = [Counter() for _ in range(graph.trigger_num)]\n for trigger_idx, entity_idx, role in graph.roles:\n entity_roles[entity_idx].append(role)\n entity_event_role[entity_idx].append(\n (graph.triggers[trigger_idx][-1], role))\n event_role_count[trigger_idx][role] += 1\n # 3. role entity\n role_entity = role * 100 + graph.entities[entity_idx][-1]\n if role_entity in role_entity_map:\n role_entity_vec[role_entity_map[role_entity]] += 1\n # 1. role role\n for roles in entity_roles:\n for role1, role2 in itertools.combinations(roles, 2):\n key = role1 * 100 + role2 if role1 < role2 \\\n else role2 * 100 + role1\n if key in role_role_map:\n role_role_vec[role_role_map[key]] += 1\n # 2. event role num & 4. multiple role\n for event, role_count in enumerate(event_role_count):\n for role, count in role_count.items():\n # to reduce the number of features, we treat numbers > 2 as 2\n key = graph.triggers[event][-1] * 1000 + role * 10 + min(count, 2)\n if key in event_role_num_map:\n event_role_num_vec[event_role_num_map[key]] += 1\n if count > 1 and role in multi_role_map:\n multi_role_vec[multi_role_map[role]] += 1\n # 5. event role event role\n for event_role_pairs in entity_event_role:\n for (event1, role1), (event2, role2) in itertools.combinations(\n event_role_pairs, 2):\n if event1 < event2:\n key = event1 * 1000000 + role1 * 10000 + event2 * 100 + role2\n else:\n key = event2 * 1000000 + role2 * 10000 + event1 * 100 + role1\n if key in event_role_event_role_map:\n event_role_event_role_vec[event_role_event_role_map[key]] += 1\n\n # relation related features\n entity_role_unique = [set(x) for x in entity_roles]\n entity_relation_count = [Counter() for _ in range(graph.entity_num)]\n for entity_idx1, entity_idx2, relation in graph.relations:\n entity_relation_count[entity_idx1][relation] += 1\n entity_relation_count[entity_idx2][relation] += 1\n entity1 = graph.entities[entity_idx1][-1]\n entity2 = graph.entities[entity_idx2][-1]\n # 6. relation entity entity\n if entity1 < entity2:\n key = relation * 10000 + entity1 * 100 + entity2\n else:\n key = relation * 10000 + entity2 * 100 + entity1\n if key in relation_entity_entity_map:\n relation_entity_entity_vec[relation_entity_entity_map[key]] += 1\n # 7. relation entity\n key1 = relation * 100 + entity1\n key2 = relation * 100 + entity2\n if key1 in relation_entity_map:\n relation_entity_vec[relation_entity_map[key1]] += 1\n if key2 in relation_entity_map:\n relation_entity_vec[relation_entity_map[key2]] += 1\n # 8. relation role role\n roles1 = entity_role_unique[entity_idx1]\n roles2 = entity_role_unique[entity_idx2]\n for role1 in roles1:\n for role2 in roles2:\n if role1 < role2:\n key = relation * 10000 + role1 * 100 + role2\n else:\n key = relation * 10000 + role2 * 100 + role1\n if key in relation_role_role_map:\n relation_role_role_vec[relation_role_role_map[key]] += 1\n # 9. multiple relation & 10. relation relation\n for relation_count in entity_relation_count:\n relations = []\n for relation, count in relation_count.items():\n relations.append(relation)\n if count > 1:\n relations.append(relation)\n if relation in multi_relation_map:\n multi_relation_vec[multi_relation_map[relation]] += 1\n for relation1, relation2 in itertools.combinations(relations, 2):\n if relation1 < relation2:\n key = relation1 * 100 + relation2\n else:\n key = relation2 * 100 + relation1\n if key in relation_relation_map:\n relation_relation_vec[relation_relation_map[key]] += 1\n\n # 11. multiple event\n trigger_count = Counter()\n for _, _, trigger in graph.triggers:\n trigger_count[trigger] += 1\n for trigger, count in trigger_count.items():\n if count > 1 and trigger in multi_event_map:\n multi_event_vec[multi_event_map[trigger]] = 1\n\n feature_vector = np.concatenate(\n [role_role_vec, event_role_num_vec, role_entity_vec,\n multi_role_vec, event_role_event_role_vec, relation_entity_entity_vec,\n relation_entity_vec, relation_role_role_vec,\n multi_relation_vec, relation_relation_vec, multi_event_vec]\n )\n\n if features:\n vectors = {\n 'role_role': role_role_vec,\n 'event_role_num': event_role_num_vec,\n 'role_entity': role_entity_vec,\n 'multi_role': multi_role_vec,\n 'event_role_event_role': event_role_event_role_vec,\n 'relation_entity_entity': relation_entity_entity_vec,\n 'relation_entity': relation_entity_vec,\n 'relation_role_role': relation_role_role_vec,\n 'multi_relation': multi_relation_vec,\n 'relation_relation': relation_relation_vec,\n 'multi_event': multi_event_vec\n }\n feature_vector = np.concatenate([vectors[k] for k in features])\n else:\n feature_vector = np.concatenate(\n [role_role_vec, event_role_num_vec, role_entity_vec,\n multi_role_vec, event_role_event_role_vec, relation_entity_entity_vec,\n relation_entity_vec, relation_role_role_vec,\n multi_relation_vec, relation_relation_vec, multi_event_vec]\n )\n return feature_vector"
},
{
"identifier": "generate_global_feature_maps",
"path": "TextEE/models/AMRIE/global_feature.py",
"snippet": "def generate_global_feature_maps(vocabs, valid_patterns):\n \"\"\"\n Note that feature maps here refer to \"feature-index mappings\", not feature\n maps in CNNs.\n :param vocabs: vocabularies.\n :param valid_patterns: valid patterns (only event-role patterns are used).\n :return (dict): a dictionary of feature-index maps.\n \"\"\"\n event_type_vocab = vocabs['event_type']\n entity_type_vocab = vocabs['entity_type']\n role_type_vocab = vocabs['role_type']\n relation_type_vocab = vocabs['relation_type']\n event_role = valid_patterns['event_role']\n\n # 1. role role: the number of entities that act as <role_i> and <role_j>\n # arguments at the same time\n role_role_map = set()\n for role1 in role_type_vocab.values():\n for role2 in role_type_vocab.values():\n if role1 and role2:\n if role1 < role2:\n key = role1 * 1000 + role2\n else:\n key = role2 * 1000 + role1\n role_role_map.add(key)\n role_role_map = sorted(list(role_role_map))\n role_role_map = {k: i for i, k in enumerate(role_role_map)}\n\n # 2. event role num: the number of <event_type_i> events with <number>\n # <role_j> arguments\n event_role_num_map = list()\n for event in event_type_vocab.values():\n for role in role_type_vocab.values():\n if event and role:\n key = event * 100000 + role * 100\n event_role_num_map.append(key + 1)\n event_role_num_map.append(key + 2)\n event_role_num_map.sort()\n event_role_num_map = {k: i for i, k in enumerate(event_role_num_map)}\n\n # 3. role entity: the number of occurrences of <entity_type_i> and <role_j>\n # combination\n role_entity_map = list()\n for role in role_type_vocab.values():\n for entity in entity_type_vocab.values():\n if role and entity:\n role_entity_map.append(role * 1000 + entity)\n role_entity_map.sort()\n role_entity_map = {k: i for i, k in enumerate(role_entity_map)}\n\n # 4. multiple role\n multi_role_map = [role for role in role_type_vocab.values() if role]\n multi_role_map.sort()\n multi_role_map = {k: i for i, k in enumerate(multi_role_map)}\n\n # 5. event role event role: the number of entities that act as a <role_i>\n # argument of an <event_type_j> event and a <role_k> argument of an\n # <event_type_l> event at the same time\n event_role_event_role_map = set()\n for event_role1 in event_role:\n for event_role2 in event_role:\n event1 = event_role1 // 1000\n event2 = event_role2 // 1000\n role1 = event_role1 % 1000\n role2 = event_role2 % 1000\n if event1 < event2:\n key = event1 * 1000000000 + role1 * 1000000 + event2 * 1000 + role2\n else:\n key = event2 * 1000000000 + role2 * 1000000 + event1 * 1000 + role1\n event_role_event_role_map.add(key)\n event_role_event_role_map = sorted(list(event_role_event_role_map))\n event_role_event_role_map = {k: i for i, k in enumerate(event_role_event_role_map)}\n\n # 6. relation entity entity: the number of occurrences of <entity_type_i>,\n # <entity_type_j>, and <relation_type_k> combination\n relation_entity_entity_map = set()\n for relation in relation_type_vocab.values():\n for entity1 in entity_type_vocab.values():\n for entity2 in entity_type_vocab.values():\n if relation and entity1 and entity2:\n key = relation * 1000000\n if entity1 < entity2:\n key += entity1 * 1000 + entity2\n else:\n key += entity2 * 1000 + entity1\n relation_entity_entity_map.add(key)\n relation_entity_entity_map = sorted(list(relation_entity_entity_map))\n relation_entity_entity_map = {k: i for i, k in enumerate(relation_entity_entity_map)}\n\n # 7. relation entity: the number of occurrences of <entity_type_i> and\n # <relation_type_j> combination\n relation_entity_map = [relation * 1000 + entity\n for relation in relation_type_vocab.values()\n for entity in entity_type_vocab.values()\n if relation and entity]\n relation_entity_map.sort()\n relation_entity_map = {k: i for i, k in enumerate(relation_entity_map)}\n\n # 8. relation role role: the number of occurrences of a <relation_type_i>\n # relation between a <role_j> argument and a <role_k> argument of the same\n # event\n relation_role_role_map = set()\n for relation in relation_type_vocab.values():\n for role1 in role_type_vocab.values():\n for role2 in role_type_vocab.values():\n if relation and role1 and role2:\n key = relation * 1000000\n if role1 < role2:\n key += role1 * 1000 + role2\n else:\n key += role2 * 1000 + role1\n relation_role_role_map.add(key)\n relation_role_role_map = sorted(list(relation_role_role_map))\n relation_role_role_map = {k: i for i, k in enumerate(relation_role_role_map)}\n\n # 9. multiple relation: the number of entities that have a <relation_type_i>\n # relation with multiple entities\n multi_relation_map = [relation for relation in relation_type_vocab.values()\n if relation]\n multi_relation_map.sort()\n multi_relation_map = {k: i for i, k in enumerate((multi_relation_map))}\n\n # 10. relation relation: the number of entities involving in <relation_type_i>\n # and <relation_type_j> relations simultaneously\n relation_relation_map = set()\n for relation1 in relation_type_vocab.values():\n for relation2 in relation_type_vocab.values():\n if relation1 and relation2:\n key = relation1 * 1000 + relation2 if relation1 < relation2 \\\n else relation2 * 1000 + relation1\n relation_relation_map.add(key)\n relation_relation_map = sorted(list(relation_relation_map))\n relation_relation_map = {k: i for i, k in enumerate(relation_relation_map)}\n\n # 11. multiple event: whether a graph contains more than one <event_type_i>\n # event\n multi_event_map = [event for event in event_type_vocab.values() if event]\n multi_event_map.sort()\n multi_event_map = {k: i for i, k in enumerate(multi_event_map)}\n\n return {\n 'role_role': role_role_map,\n 'event_role_num': event_role_num_map,\n 'role_entity': role_entity_map,\n 'multi_role': multi_role_map,\n 'event_role_event_role': event_role_event_role_map,\n 'relation_entity_entity': relation_entity_entity_map,\n 'relation_entity': relation_entity_map,\n 'relation_role_role': relation_role_role_map,\n 'multi_relation': multi_relation_map,\n 'relation_relation': relation_relation_map,\n 'multi_event': multi_event_map\n }"
},
{
"identifier": "normalize_score",
"path": "TextEE/models/AMRIE/util.py",
"snippet": "def normalize_score(scores):\n min_score, max_score = min(scores), max(scores)\n if min_score == max_score:\n return [0] * len(scores)\n return [(s - min_score) / (max_score - min_score) for s in scores]"
},
{
"identifier": "FinalGNN",
"path": "TextEE/models/AMRIE/gnn.py",
"snippet": "class FinalGNN(nn.Module):\n \n def __init__(self, bert_dim, edge_dim, edge_type_num, layers, lamda, device):\n super(FinalGNN, self).__init__()\n self.bert_dim = bert_dim\n self.edge_dim = edge_dim\n self.nlayers = layers\n self.edge_type_num = edge_type_num\n self.device = device\n self.gnn = MultiGAT(bert_dim, edge_dim, bert_dim, layers, lamda)\n # for i in range(len(self.gnn.gats)):\n # self.gnn.gats[i].attn_fc.to(device)\n # self.gnn.gats[i].edge_fc.to(device)\n # self.gnn.gats[i].fc.to(device)\n self.edge_embeds = nn.Embedding(edge_type_num, edge_dim)\n\n def forward(self, g, amr_emb):\n # amr_emb: (max_seq_len, bert_dim)\n\n edge_idx = g.edata['type'].squeeze(1)\n edge_embed = self.edge_embeds(edge_idx)\n nodes_out = self.gnn(g, amr_emb, edge_embed)\n return nodes_out"
}
] | import torch
import torch.nn as nn
import dgl
import numpy as np
from .graph import Graph
from transformers import BertModel, RobertaModel, XLMRobertaModel, AutoModel, BertConfig, RobertaConfig, XLMRobertaConfig
from .global_feature import generate_global_feature_vector, generate_global_feature_maps
from .util import normalize_score
from .gnn import FinalGNN | 9,709 |
def calc_conf_score_(self, logits, labels):
batch_size, _, _ = logits.size()
logits_t = logits.transpose(1, 0)
scores = [[] for _ in range(batch_size)]
pre_labels = [self.start] * batch_size
for i, logit in enumerate(logits_t):
logit_exp = logit.unsqueeze(-1).expand(batch_size,
self.label_size,
self.label_size)
trans_exp = self.transition.unsqueeze(0).expand(batch_size,
self.label_size,
self.label_size)
score = logit_exp + trans_exp
score = score.view(-1, self.label_size * self.label_size) \
.softmax(1)
for j in range(batch_size):
cur_label = labels[j][i]
cur_score = score[j][cur_label * self.label_size + pre_labels[j]]
scores[j].append(cur_score)
pre_labels[j] = cur_label
return scores
class AMRIEE2EModel(nn.Module):
def __init__(self,
config,
vocabs,
valid_patterns=None):
super().__init__()
self.if_local = 0
# vocabularies
self.vocabs = vocabs
self.entity_label_stoi = vocabs['entity_label']
self.trigger_label_stoi = vocabs['trigger_label']
self.mention_type_stoi = vocabs['mention_type']
self.entity_type_stoi = vocabs['entity_type']
self.event_type_stoi = vocabs['event_type']
self.relation_type_stoi = vocabs['relation_type']
self.role_type_stoi = vocabs['role_type']
self.entity_label_itos = {i:s for s, i in self.entity_label_stoi.items()}
self.trigger_label_itos = {i:s for s, i in self.trigger_label_stoi.items()}
self.entity_type_itos = {i: s for s, i in self.entity_type_stoi.items()}
self.event_type_itos = {i: s for s, i in self.event_type_stoi.items()}
self.relation_type_itos = {i: s for s, i in self.relation_type_stoi.items()}
self.role_type_itos = {i: s for s, i in self.role_type_stoi.items()}
self.entity_label_num = len(self.entity_label_stoi)
self.trigger_label_num = len(self.trigger_label_stoi)
self.mention_type_num = len(self.mention_type_stoi)
self.entity_type_num = len(self.entity_type_stoi)
self.event_type_num = len(self.event_type_stoi)
self.relation_type_num = len(self.relation_type_stoi)
self.role_type_num = len(self.role_type_stoi)
self.valid_relation_entity = set()
self.valid_event_role = set()
self.valid_role_entity = set()
if valid_patterns:
self.valid_event_role = valid_patterns['event_role']
self.valid_relation_entity = valid_patterns['relation_entity']
self.valid_role_entity = valid_patterns['role_entity']
self.relation_directional = config.relation_directional
self.symmetric_relations = config.symmetric_relations
self.symmetric_relation_idxs = {self.relation_type_stoi[r]
for r in self.symmetric_relations}
# BERT encoder
self.pretrained_model_name = config.pretrained_model_name
self.cache_dir = config.cache_dir
if self.pretrained_model_name.startswith('bert-'):
self.bert = BertModel.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir,
output_hidden_states=True)
self.bert_config = BertConfig.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir)
elif self.pretrained_model_name.startswith('roberta-'):
self.bert = RobertaModel.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir,
output_hidden_states=True)
self.bert_config = RobertaConfig.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir)
elif self.pretrained_model_name.startswith('xlm-'):
self.bert = XLMRobertaModel.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir,
output_hidden_states=True)
self.bert_config = XLMRobertaConfig.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir)
else:
raise ValueError
self.bert_dim = self.bert_config.hidden_size
self.extra_bert = config.extra_bert
self.use_extra_bert = config.use_extra_bert
if self.use_extra_bert:
self.bert_dim *= 2
# print(self.use_extra_bert)
# print(bert_config)
# self.bert = BertModel(bert_config)
self.bert_dropout = nn.Dropout(p=config.bert_dropout)
self.multi_piece = config.multi_piece_strategy
# local classifiers
self.use_entity_type = config.use_entity_type
self.binary_dim = self.bert_dim * 2
linear_bias = config.linear_bias
linear_dropout = config.linear_dropout
entity_hidden_num = config.entity_hidden_num
mention_hidden_num = config.mention_hidden_num
event_hidden_num = config.event_hidden_num
relation_hidden_num = config.relation_hidden_num
role_hidden_num = config.role_hidden_num
self.edge_type_num = config.edge_type_num
self.edge_type_dim = config.edge_type_dim
self.use_graph_encoder = config.use_graph_encoder
gnn_layers = config.gnn_layers
self.lamda = config.lamda
role_input_dim = self.binary_dim + (self.entity_type_num if self.use_entity_type else 0)
self.device = config.gpu_device
# print(self.bert_dim)
if self.use_graph_encoder:
if not self.if_local:
|
def log_sum_exp(tensor, dim=0, keepdim: bool = False):
"""LogSumExp operation used by CRF."""
m, _ = tensor.max(dim, keepdim=keepdim)
if keepdim:
stable_vec = tensor - m
else:
stable_vec = tensor - m.unsqueeze(dim)
return m + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
def sequence_mask(lens, max_len=None):
"""Generate a sequence mask tensor from sequence lengths, used by CRF."""
batch_size = lens.size(0)
if max_len is None:
max_len = lens.max().item()
ranges = torch.arange(0, max_len, device=lens.device).long()
ranges = ranges.unsqueeze(0).expand(batch_size, max_len)
lens_exp = lens.unsqueeze(1).expand_as(ranges)
mask = ranges < lens_exp
return mask
def token_lens_to_offsets(token_lens):
"""Map token lengths to first word piece indices, used by the sentence
encoder.
:param token_lens (list): token lengths (word piece numbers)
:return (list): first word piece indices (offsets)
"""
max_token_num = max([len(x) for x in token_lens])
offsets = []
for seq_token_lens in token_lens:
seq_offsets = [0]
for l in seq_token_lens[:-1]:
seq_offsets.append(seq_offsets[-1] + l)
offsets.append(seq_offsets + [-1] * (max_token_num - len(seq_offsets)))
return offsets
def token_lens_to_idxs(token_lens):
"""Map token lengths to a word piece index matrix (for torch.gather) and a
mask tensor.
For example (only show a sequence instead of a batch):
token lengths: [1,1,1,3,1]
=>
indices: [[0,0,0], [1,0,0], [2,0,0], [3,4,5], [6,0,0]]
masks: [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0],
[0.33, 0.33, 0.33], [1.0, 0.0, 0.0]]
Next, we use torch.gather() to select vectors of word pieces for each token,
and average them as follows (incomplete code):
:param token_lens (list): token lengths.
:return: a index matrix and a mask tensor.
"""
max_token_num = max([len(x) for x in token_lens])
max_token_len = max([max(x) for x in token_lens])
idxs, masks = [], []
for seq_token_lens in token_lens:
seq_idxs, seq_masks = [], []
offset = 0
for token_len in seq_token_lens:
seq_idxs.extend([i + offset for i in range(token_len)]
+ [-1] * (max_token_len - token_len))
seq_masks.extend([1.0 / token_len] * token_len
+ [0.0] * (max_token_len - token_len))
offset += token_len
seq_idxs.extend([-1] * max_token_len * (max_token_num - len(seq_token_lens)))
seq_masks.extend([0.0] * max_token_len * (max_token_num - len(seq_token_lens)))
idxs.append(seq_idxs)
masks.append(seq_masks)
return idxs, masks, max_token_num, max_token_len
def graphs_to_node_idxs(graphs):
"""
:param graphs (list): A list of Graph objects.
:return: entity/trigger index matrix, mask tensor, max number, and max length
"""
entity_idxs, entity_masks = [], []
trigger_idxs, trigger_masks = [], []
max_entity_num = max(max(graph.entity_num for graph in graphs), 1)
max_trigger_num = max(max(graph.trigger_num for graph in graphs), 1)
max_entity_len = max(max([e[1] - e[0] for e in graph.entities] + [1])
for graph in graphs)
max_trigger_len = max(max([t[1] - t[0] for t in graph.triggers] + [1])
for graph in graphs)
for graph in graphs:
seq_entity_idxs, seq_entity_masks = [], []
seq_trigger_idxs, seq_trigger_masks = [], []
for entity in graph.entities:
entity_len = entity[1] - entity[0]
seq_entity_idxs.extend([i for i in range(entity[0], entity[1])])
seq_entity_idxs.extend([0] * (max_entity_len - entity_len))
seq_entity_masks.extend([1.0 / entity_len] * entity_len)
seq_entity_masks.extend([0.0] * (max_entity_len - entity_len))
seq_entity_idxs.extend([0] * max_entity_len * (max_entity_num - graph.entity_num))
seq_entity_masks.extend([0.0] * max_entity_len * (max_entity_num - graph.entity_num))
entity_idxs.append(seq_entity_idxs)
entity_masks.append(seq_entity_masks)
for trigger in graph.triggers:
trigger_len = trigger[1] - trigger[0]
seq_trigger_idxs.extend([i for i in range(trigger[0], trigger[1])])
seq_trigger_idxs.extend([0] * (max_trigger_len - trigger_len))
seq_trigger_masks.extend([1.0 / trigger_len] * trigger_len)
seq_trigger_masks.extend([0.0] * (max_trigger_len - trigger_len))
seq_trigger_idxs.extend([0] * max_trigger_len * (max_trigger_num - graph.trigger_num))
seq_trigger_masks.extend([0.0] * max_trigger_len * (max_trigger_num - graph.trigger_num))
trigger_idxs.append(seq_trigger_idxs)
trigger_masks.append(seq_trigger_masks)
return (
entity_idxs, entity_masks, max_entity_num, max_entity_len,
trigger_idxs, trigger_masks, max_trigger_num, max_trigger_len,
)
def graphs_to_label_idxs(graphs, max_entity_num=-1, max_trigger_num=-1,
relation_directional=False,
symmetric_relation_idxs=None):
"""Convert a list of graphs to label index and mask matrices
:param graphs (list): A list of Graph objects.
:param max_entity_num (int) Max entity number (default = -1).
:param max_trigger_num (int) Max trigger number (default = -1).
"""
if max_entity_num == -1:
max_entity_num = max(max([g.entity_num for g in graphs]), 1)
if max_trigger_num == -1:
max_trigger_num = max(max([g.trigger_num for g in graphs]), 1)
(
batch_entity_idxs, batch_entity_mask,
batch_trigger_idxs, batch_trigger_mask,
batch_relation_idxs, batch_relation_mask,
batch_role_idxs, batch_role_mask
) = [[] for _ in range(8)]
for graph in graphs:
(
entity_idxs, entity_mask, trigger_idxs, trigger_mask,
relation_idxs, relation_mask, role_idxs, role_mask,
) = graph.to_label_idxs(max_entity_num, max_trigger_num,
relation_directional=relation_directional,
symmetric_relation_idxs=symmetric_relation_idxs)
batch_entity_idxs.append(entity_idxs)
batch_entity_mask.append(entity_mask)
batch_trigger_idxs.append(trigger_idxs)
batch_trigger_mask.append(trigger_mask)
batch_relation_idxs.append(relation_idxs)
batch_relation_mask.append(relation_mask)
batch_role_idxs.append(role_idxs)
batch_role_mask.append(role_mask)
return (
batch_entity_idxs, batch_entity_mask,
batch_trigger_idxs, batch_trigger_mask,
batch_relation_idxs, batch_relation_mask,
batch_role_idxs, batch_role_mask
)
def generate_pairwise_idxs(num1, num2):
"""Generate all pairwise combinations among entity mentions (relation) or
event triggers and entity mentions (argument role).
For example, if there are 2 triggers and 3 mentions in a sentence, num1 = 2,
and num2 = 3. We generate the following vector:
idxs = [0, 2, 0, 3, 0, 4, 1, 2, 1, 3, 1, 4]
Suppose `trigger_reprs` and `entity_reprs` are trigger/entity representation
tensors. We concatenate them using:
te_reprs = torch.cat([entity_reprs, entity_reprs], dim=1)
After that we select vectors from `te_reprs` using (incomplete code) to obtain
pairwise combinations of all trigger and entity vectors.
te_reprs = torch.gather(te_reprs, 1, idxs)
te_reprs = te_reprs.view(batch_size, -1, 2 * bert_dim)
:param num1: trigger number (argument role) or entity number (relation)
:param num2: entity number (relation)
:return (list): a list of indices
"""
idxs = []
for i in range(num1):
for j in range(num2):
idxs.append(i)
idxs.append(j + num1)
return idxs
def tag_paths_to_spans(paths, token_nums, vocab):
"""Convert predicted tag paths to a list of spans (entity mentions or event
triggers).
:param paths: predicted tag paths.
:return (list): a list (batch) of lists (sequence) of spans.
"""
batch_mentions = []
itos = {i: s for s, i in vocab.items()}
for i, path in enumerate(paths):
mentions = []
cur_mention = None
path = path.tolist()[:token_nums[i].item()]
for j, tag in enumerate(path):
tag = itos[tag]
if tag == 'O':
prefix = tag = 'O'
else:
prefix, tag = tag.split('-', 1)
if prefix == 'B':
if cur_mention:
mentions.append(cur_mention)
cur_mention = [j, j + 1, tag]
elif prefix == 'I':
if cur_mention is None:
# treat it as B-*
cur_mention = [j, j + 1, tag]
elif cur_mention[-1] == tag:
cur_mention[1] = j + 1
else:
# treat it as B-*
mentions.append(cur_mention)
cur_mention = [j, j + 1, tag]
else:
if cur_mention:
mentions.append(cur_mention)
cur_mention = None
if cur_mention:
mentions.append(cur_mention)
batch_mentions.append(mentions)
return batch_mentions
class Linears(nn.Module):
"""Multiple linear layers with Dropout."""
def __init__(self, dimensions, activation='relu', dropout_prob=0.0, bias=True):
super().__init__()
assert len(dimensions) > 1
self.layers = nn.ModuleList([nn.Linear(dimensions[i], dimensions[i + 1], bias=bias)
for i in range(len(dimensions) - 1)])
self.activation = getattr(torch, activation)
self.dropout = nn.Dropout(dropout_prob)
def forward(self, inputs):
for i, layer in enumerate(self.layers):
if i > 0:
inputs = self.activation(inputs)
inputs = self.dropout(inputs)
inputs = layer(inputs)
return inputs
class CRF(nn.Module):
def __init__(self, label_vocab, bioes=False):
super(CRF, self).__init__()
self.label_vocab = label_vocab
self.label_size = len(label_vocab) + 2
# self.same_type = self.map_same_types()
self.bioes = bioes
self.start = self.label_size - 2
self.end = self.label_size - 1
transition = torch.randn(self.label_size, self.label_size)
self.transition = nn.Parameter(transition)
self.initialize()
def initialize(self):
self.transition.data[:, self.end] = -100.0
self.transition.data[self.start, :] = -100.0
for label, label_idx in self.label_vocab.items():
if label.startswith('I-') or label.startswith('E-'):
self.transition.data[label_idx, self.start] = -100.0
if label.startswith('B-') or label.startswith('I-'):
self.transition.data[self.end, label_idx] = -100.0
for label_from, label_from_idx in self.label_vocab.items():
if label_from == 'O':
label_from_prefix, label_from_type = 'O', 'O'
else:
label_from_prefix, label_from_type = label_from.split('-', 1)
for label_to, label_to_idx in self.label_vocab.items():
if label_to == 'O':
label_to_prefix, label_to_type = 'O', 'O'
else:
label_to_prefix, label_to_type = label_to.split('-', 1)
if self.bioes:
is_allowed = any(
[
label_from_prefix in ['O', 'E', 'S']
and label_to_prefix in ['O', 'B', 'S'],
label_from_prefix in ['B', 'I']
and label_to_prefix in ['I', 'E']
and label_from_type == label_to_type
]
)
else:
is_allowed = any(
[
label_to_prefix in ['B', 'O'],
label_from_prefix in ['B', 'I']
and label_to_prefix == 'I'
and label_from_type == label_to_type
]
)
if not is_allowed:
self.transition.data[
label_to_idx, label_from_idx] = -100.0
def pad_logits(self, logits):
"""Pad the linear layer output with <SOS> and <EOS> scores.
:param logits: Linear layer output (no non-linear function).
"""
batch_size, seq_len, _ = logits.size()
pads = logits.new_full((batch_size, seq_len, 2), -100.0,
requires_grad=False)
logits = torch.cat([logits, pads], dim=2)
return logits
def calc_binary_score(self, labels, lens):
batch_size, seq_len = labels.size()
# A tensor of size batch_size * (seq_len + 2)
labels_ext = labels.new_empty((batch_size, seq_len + 2))
labels_ext[:, 0] = self.start
labels_ext[:, 1:-1] = labels
mask = sequence_mask(lens + 1, max_len=(seq_len + 2)).long()
pad_stop = labels.new_full((1,), self.end, requires_grad=False)
pad_stop = pad_stop.unsqueeze(-1).expand(batch_size, seq_len + 2)
labels_ext = (1 - mask) * pad_stop + mask * labels_ext
labels = labels_ext
trn = self.transition
trn_exp = trn.unsqueeze(0).expand(batch_size, self.label_size,
self.label_size)
lbl_r = labels[:, 1:]
lbl_rexp = lbl_r.unsqueeze(-1).expand(*lbl_r.size(), self.label_size)
# score of jumping to a tag
trn_row = torch.gather(trn_exp, 1, lbl_rexp)
lbl_lexp = labels[:, :-1].unsqueeze(-1)
trn_scr = torch.gather(trn_row, 2, lbl_lexp)
trn_scr = trn_scr.squeeze(-1)
mask = sequence_mask(lens + 1).float()
trn_scr = trn_scr * mask
score = trn_scr
return score
def calc_unary_score(self, logits, labels, lens):
"""Checked"""
labels_exp = labels.unsqueeze(-1)
scores = torch.gather(logits, 2, labels_exp).squeeze(-1)
mask = sequence_mask(lens).float()
scores = scores * mask
return scores
def calc_gold_score(self, logits, labels, lens):
"""Checked"""
unary_score = self.calc_unary_score(logits, labels, lens).sum(
1).squeeze(-1)
binary_score = self.calc_binary_score(labels, lens).sum(1).squeeze(-1)
return unary_score + binary_score
def calc_norm_score(self, logits, lens):
batch_size, _, _ = logits.size()
alpha = logits.new_full((batch_size, self.label_size), -100.0)
alpha[:, self.start] = 0
lens_ = lens.clone()
logits_t = logits.transpose(1, 0)
for logit in logits_t:
logit_exp = logit.unsqueeze(-1).expand(batch_size,
self.label_size,
self.label_size)
alpha_exp = alpha.unsqueeze(1).expand(batch_size,
self.label_size,
self.label_size)
trans_exp = self.transition.unsqueeze(0).expand_as(alpha_exp)
mat = logit_exp + alpha_exp + trans_exp
alpha_nxt = log_sum_exp(mat, 2).squeeze(-1)
mask = (lens_ > 0).float().unsqueeze(-1).expand_as(alpha)
alpha = mask * alpha_nxt + (1 - mask) * alpha
lens_ = lens_ - 1
alpha = alpha + self.transition[self.end].unsqueeze(0).expand_as(alpha)
norm = log_sum_exp(alpha, 1).squeeze(-1)
return norm
def loglik(self, logits, labels, lens):
norm_score = self.calc_norm_score(logits, lens)
gold_score = self.calc_gold_score(logits, labels, lens)
return gold_score - norm_score
def viterbi_decode(self, logits, lens):
"""Borrowed from pytorch tutorial
Arguments:
logits: [batch_size, seq_len, n_labels] FloatTensor
lens: [batch_size] LongTensor
"""
batch_size, _, n_labels = logits.size()
vit = logits.new_full((batch_size, self.label_size), -100.0)
vit[:, self.start] = 0
c_lens = lens.clone()
logits_t = logits.transpose(1, 0)
pointers = []
for logit in logits_t:
vit_exp = vit.unsqueeze(1).expand(batch_size, n_labels, n_labels)
trn_exp = self.transition.unsqueeze(0).expand_as(vit_exp)
vit_trn_sum = vit_exp + trn_exp
vt_max, vt_argmax = vit_trn_sum.max(2)
vt_max = vt_max.squeeze(-1)
vit_nxt = vt_max + logit
pointers.append(vt_argmax.squeeze(-1).unsqueeze(0))
mask = (c_lens > 0).float().unsqueeze(-1).expand_as(vit_nxt)
vit = mask * vit_nxt + (1 - mask) * vit
mask = (c_lens == 1).float().unsqueeze(-1).expand_as(vit_nxt)
vit += mask * self.transition[self.end].unsqueeze(
0).expand_as(vit_nxt)
c_lens = c_lens - 1
pointers = torch.cat(pointers)
scores, idx = vit.max(1)
paths = [idx.unsqueeze(1)]
for argmax in reversed(pointers):
idx_exp = idx.unsqueeze(-1)
idx = torch.gather(argmax, 1, idx_exp)
idx = idx.squeeze(-1)
paths.insert(0, idx.unsqueeze(1))
paths = torch.cat(paths[1:], 1)
scores = scores.squeeze(-1)
return scores, paths
def calc_conf_score_(self, logits, labels):
batch_size, _, _ = logits.size()
logits_t = logits.transpose(1, 0)
scores = [[] for _ in range(batch_size)]
pre_labels = [self.start] * batch_size
for i, logit in enumerate(logits_t):
logit_exp = logit.unsqueeze(-1).expand(batch_size,
self.label_size,
self.label_size)
trans_exp = self.transition.unsqueeze(0).expand(batch_size,
self.label_size,
self.label_size)
score = logit_exp + trans_exp
score = score.view(-1, self.label_size * self.label_size) \
.softmax(1)
for j in range(batch_size):
cur_label = labels[j][i]
cur_score = score[j][cur_label * self.label_size + pre_labels[j]]
scores[j].append(cur_score)
pre_labels[j] = cur_label
return scores
class AMRIEE2EModel(nn.Module):
def __init__(self,
config,
vocabs,
valid_patterns=None):
super().__init__()
self.if_local = 0
# vocabularies
self.vocabs = vocabs
self.entity_label_stoi = vocabs['entity_label']
self.trigger_label_stoi = vocabs['trigger_label']
self.mention_type_stoi = vocabs['mention_type']
self.entity_type_stoi = vocabs['entity_type']
self.event_type_stoi = vocabs['event_type']
self.relation_type_stoi = vocabs['relation_type']
self.role_type_stoi = vocabs['role_type']
self.entity_label_itos = {i:s for s, i in self.entity_label_stoi.items()}
self.trigger_label_itos = {i:s for s, i in self.trigger_label_stoi.items()}
self.entity_type_itos = {i: s for s, i in self.entity_type_stoi.items()}
self.event_type_itos = {i: s for s, i in self.event_type_stoi.items()}
self.relation_type_itos = {i: s for s, i in self.relation_type_stoi.items()}
self.role_type_itos = {i: s for s, i in self.role_type_stoi.items()}
self.entity_label_num = len(self.entity_label_stoi)
self.trigger_label_num = len(self.trigger_label_stoi)
self.mention_type_num = len(self.mention_type_stoi)
self.entity_type_num = len(self.entity_type_stoi)
self.event_type_num = len(self.event_type_stoi)
self.relation_type_num = len(self.relation_type_stoi)
self.role_type_num = len(self.role_type_stoi)
self.valid_relation_entity = set()
self.valid_event_role = set()
self.valid_role_entity = set()
if valid_patterns:
self.valid_event_role = valid_patterns['event_role']
self.valid_relation_entity = valid_patterns['relation_entity']
self.valid_role_entity = valid_patterns['role_entity']
self.relation_directional = config.relation_directional
self.symmetric_relations = config.symmetric_relations
self.symmetric_relation_idxs = {self.relation_type_stoi[r]
for r in self.symmetric_relations}
# BERT encoder
self.pretrained_model_name = config.pretrained_model_name
self.cache_dir = config.cache_dir
if self.pretrained_model_name.startswith('bert-'):
self.bert = BertModel.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir,
output_hidden_states=True)
self.bert_config = BertConfig.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir)
elif self.pretrained_model_name.startswith('roberta-'):
self.bert = RobertaModel.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir,
output_hidden_states=True)
self.bert_config = RobertaConfig.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir)
elif self.pretrained_model_name.startswith('xlm-'):
self.bert = XLMRobertaModel.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir,
output_hidden_states=True)
self.bert_config = XLMRobertaConfig.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir)
else:
raise ValueError
self.bert_dim = self.bert_config.hidden_size
self.extra_bert = config.extra_bert
self.use_extra_bert = config.use_extra_bert
if self.use_extra_bert:
self.bert_dim *= 2
# print(self.use_extra_bert)
# print(bert_config)
# self.bert = BertModel(bert_config)
self.bert_dropout = nn.Dropout(p=config.bert_dropout)
self.multi_piece = config.multi_piece_strategy
# local classifiers
self.use_entity_type = config.use_entity_type
self.binary_dim = self.bert_dim * 2
linear_bias = config.linear_bias
linear_dropout = config.linear_dropout
entity_hidden_num = config.entity_hidden_num
mention_hidden_num = config.mention_hidden_num
event_hidden_num = config.event_hidden_num
relation_hidden_num = config.relation_hidden_num
role_hidden_num = config.role_hidden_num
self.edge_type_num = config.edge_type_num
self.edge_type_dim = config.edge_type_dim
self.use_graph_encoder = config.use_graph_encoder
gnn_layers = config.gnn_layers
self.lamda = config.lamda
role_input_dim = self.binary_dim + (self.entity_type_num if self.use_entity_type else 0)
self.device = config.gpu_device
# print(self.bert_dim)
if self.use_graph_encoder:
if not self.if_local: | self.graph_encoder = FinalGNN(self.bert_dim, self.edge_type_dim, self.edge_type_num, gnn_layers, self.lamda, config.gpu_device) | 4 | 2023-11-15 21:32:56+00:00 | 12k |
chaiNNer-org/spandrel | src/spandrel/architectures/GRLIR/arch/grl.py | [
{
"identifier": "to_2tuple",
"path": "src/spandrel/architectures/__arch_helpers/timm/helpers.py",
"snippet": "def to_2tuple(x: T | Iterable[T]) -> tuple[T, T]:\n if isinstance(x, str):\n return x, x # type: ignore\n if isinstance(x, collections.abc.Iterable):\n return tuple(x) # type: ignore\n return x, x"
},
{
"identifier": "trunc_normal_",
"path": "src/spandrel/architectures/__arch_helpers/timm/weight_init.py",
"snippet": "def trunc_normal_(\n tensor: torch.Tensor, mean=0.0, std=1.0, a=-2.0, b=2.0\n) -> torch.Tensor:\n r\"\"\"\n Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n\n NOTE: this impl is similar to the PyTorch `trunc_normal_`, the bounds [a, b] are\n applied while sampling the normal with mean/std applied, therefore a, b args\n should be adjusted to match the range of mean, std args.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n mean: the mean of the normal distribution\n std: the standard deviation of the normal distribution\n a: the minimum cutoff value\n b: the maximum cutoff value\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.trunc_normal_(w)\n \"\"\"\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)"
},
{
"identifier": "GRLConfig",
"path": "src/spandrel/architectures/GRLIR/arch/config.py",
"snippet": "class GRLConfig:\n out_proj_type: Literal[\"linear\", \"conv2d\"] = \"linear\"\n \"\"\"\n Type of the output projection in the self-attention modules.\n \"\"\"\n local_connection: bool = False\n \"\"\"\n Whether to enable the local modelling module (two convs followed by Channel attention). For GRL base model, this is used.\n \"\"\"\n euclidean_dist: bool = False\n \"\"\"\n use Euclidean distance or inner product as the similarity metric. An ablation study.\n \"\"\"\n double_window: bool = False\n stripe_square: bool = False\n separable_conv_act: bool = False\n use_buffer: bool = False\n \"\"\"\n Whether to use buffer.\n False: the attention masks, tables, and indices are pre-computed. Huge GPU memory consumption when the window size is large.\n True:\n use_efficient_buffer=False: buffers are not shared. computed for each layer during forward pass. Slow forward pass.\n use_efficient_buffer=True: pre-computed and shared buffers. Small GPU memory consumption, fast forward pass. Need to allocate buffers manually.\n \"\"\"\n use_efficient_buffer: bool = False\n \"\"\"\n Whether to use efficient buffer.\n \"\"\""
},
{
"identifier": "EfficientMixAttnTransformerBlock",
"path": "src/spandrel/architectures/GRLIR/arch/mixed_attn_block_efficient.py",
"snippet": "class EfficientMixAttnTransformerBlock(nn.Module):\n r\"\"\"Mix attention transformer block with shared QKV projection and output projection for mixed attention modules.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resulotion.\n num_heads (int): Number of attention heads.\n window_size (int): Window size.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float, optional): Stochastic depth rate. Default: 0.0\n act_layer (nn.Module, optional): Activation layer. Default: nn.GELU\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n pretrained_stripe_size (int): Window size in pre-training.\n attn_type (str, optional): Attention type. Default: cwhv.\n c: residual blocks\n w: window attention\n h: horizontal stripe attention\n v: vertical stripe attention\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n input_resolution: tuple[int, int],\n num_heads_w: int,\n num_heads_s: int,\n window_size: tuple[int, int] | int = 7,\n window_shift=False,\n stripe_size=[8, 8],\n stripe_groups=[None, None],\n stripe_shift=False,\n stripe_type=\"H\",\n mlp_ratio=4.0,\n qkv_bias=True,\n qkv_proj_type=\"linear\",\n anchor_proj_type=\"separable_conv\",\n anchor_one_stage=True,\n anchor_window_down_factor=1,\n drop=0.0,\n attn_drop=0.0,\n drop_path=0.0,\n act_layer=nn.GELU,\n norm_layer=nn.LayerNorm,\n pretrained_window_size=[0, 0],\n pretrained_stripe_size=[0, 0],\n res_scale=1.0,\n args: GRLConfig = None, # type: ignore\n ):\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.num_heads_w = num_heads_w\n self.num_heads_s = num_heads_s\n self.window_size = window_size\n self.window_shift = window_shift\n self.stripe_shift = stripe_shift\n self.stripe_type = stripe_type\n self.args = args\n if self.stripe_type == \"W\":\n self.stripe_size = stripe_size[::-1]\n self.stripe_groups = stripe_groups[::-1]\n else:\n self.stripe_size = stripe_size\n self.stripe_groups = stripe_groups\n self.mlp_ratio = mlp_ratio\n self.res_scale = res_scale\n\n self.attn = MixedAttention(\n dim,\n input_resolution,\n num_heads_w,\n num_heads_s,\n window_size,\n window_shift,\n self.stripe_size,\n self.stripe_groups,\n stripe_shift,\n qkv_bias,\n qkv_proj_type,\n anchor_proj_type,\n anchor_one_stage,\n anchor_window_down_factor,\n attn_drop,\n drop,\n pretrained_window_size,\n pretrained_stripe_size,\n args,\n )\n self.norm1 = norm_layer(dim)\n if self.args.local_connection:\n self.conv = CAB(dim)\n\n self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()\n\n self.mlp = Mlp(\n in_features=dim,\n hidden_features=int(dim * mlp_ratio),\n act_layer=act_layer,\n drop=drop,\n )\n self.norm2 = norm_layer(dim)\n\n def _get_table_index_mask(self, all_table_index_mask):\n table_index_mask = {\n \"table_w\": all_table_index_mask[\"table_w\"],\n \"index_w\": all_table_index_mask[\"index_w\"],\n }\n if self.stripe_type == \"W\":\n table_index_mask[\"table_s\"] = all_table_index_mask[\"table_sv\"]\n table_index_mask[\"index_a2w\"] = all_table_index_mask[\"index_sv_a2w\"]\n table_index_mask[\"index_w2a\"] = all_table_index_mask[\"index_sv_w2a\"]\n else:\n table_index_mask[\"table_s\"] = all_table_index_mask[\"table_sh\"]\n table_index_mask[\"index_a2w\"] = all_table_index_mask[\"index_sh_a2w\"]\n table_index_mask[\"index_w2a\"] = all_table_index_mask[\"index_sh_w2a\"]\n if self.window_shift:\n table_index_mask[\"mask_w\"] = all_table_index_mask[\"mask_w\"]\n else:\n table_index_mask[\"mask_w\"] = None\n if self.stripe_shift:\n if self.stripe_type == \"W\":\n table_index_mask[\"mask_a2w\"] = all_table_index_mask[\"mask_sv_a2w\"]\n table_index_mask[\"mask_w2a\"] = all_table_index_mask[\"mask_sv_w2a\"]\n else:\n table_index_mask[\"mask_a2w\"] = all_table_index_mask[\"mask_sh_a2w\"]\n table_index_mask[\"mask_w2a\"] = all_table_index_mask[\"mask_sh_w2a\"]\n else:\n table_index_mask[\"mask_a2w\"] = None\n table_index_mask[\"mask_w2a\"] = None\n return table_index_mask\n\n def forward(self, x, x_size, all_table_index_mask):\n # Mixed attention\n table_index_mask = self._get_table_index_mask(all_table_index_mask)\n if self.args.local_connection:\n x = (\n x\n + self.res_scale\n * self.drop_path(self.norm1(self.attn(x, x_size, table_index_mask)))\n + self.conv(x, x_size)\n )\n else:\n x = x + self.res_scale * self.drop_path(\n self.norm1(self.attn(x, x_size, table_index_mask))\n )\n # FFN\n x = x + self.res_scale * self.drop_path(self.norm2(self.mlp(x)))\n\n return x\n\n def extra_repr(self) -> str:\n return (\n f\"dim={self.dim}, input_resolution={self.input_resolution}, num_heads=({self.num_heads_w}, {self.num_heads_s}), \"\n f\"window_size={self.window_size}, window_shift={self.window_shift}, \"\n f\"stripe_size={self.stripe_size}, stripe_groups={self.stripe_groups}, stripe_shift={self.stripe_shift}, self.stripe_type={self.stripe_type}, \"\n f\"mlp_ratio={self.mlp_ratio}, res_scale={self.res_scale}\"\n )\n\n def flops(self):\n pass"
},
{
"identifier": "get_stripe_info",
"path": "src/spandrel/architectures/GRLIR/arch/mixed_attn_block_efficient.py",
"snippet": "def get_stripe_info(\n stripe_size_in: list[int],\n stripe_groups_in: list[int | None],\n stripe_shift: bool,\n input_resolution: list[int] | tuple[int, int],\n):\n stripe_size: list[int] = []\n shift_size: list[int] = []\n for s, g, d in zip(stripe_size_in, stripe_groups_in, input_resolution):\n if g is None:\n stripe_size.append(s)\n shift_size.append(s // 2 if stripe_shift else 0)\n else:\n stripe_size.append(d // g)\n shift_size.append(0 if g == 1 else d // (g * 2))\n return stripe_size, shift_size"
},
{
"identifier": "bchw_to_blc",
"path": "src/spandrel/architectures/GRLIR/arch/ops.py",
"snippet": "def bchw_to_blc(x: torch.Tensor) -> torch.Tensor:\n \"\"\"Rearrange a tensor from the shape (B, C, H, W) to (B, L, C).\"\"\"\n return x.flatten(2).transpose(1, 2)"
},
{
"identifier": "blc_to_bchw",
"path": "src/spandrel/architectures/GRLIR/arch/ops.py",
"snippet": "def blc_to_bchw(x: torch.Tensor, x_size: tuple[int, int]) -> torch.Tensor:\n \"\"\"Rearrange a tensor from the shape (B, L, C) to (B, C, H, W).\"\"\"\n B, _L, C = x.shape\n return x.transpose(1, 2).view(B, C, *x_size)"
},
{
"identifier": "calculate_mask",
"path": "src/spandrel/architectures/GRLIR/arch/ops.py",
"snippet": "def calculate_mask(input_resolution, window_size, shift_size):\n \"\"\"\n Use case: 1)\n \"\"\"\n # calculate attention mask for SW-MSA\n if isinstance(shift_size, int):\n shift_size = to_2tuple(shift_size)\n mask_windows = _fill_window(input_resolution, window_size, shift_size)\n\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(\n attn_mask == 0, 0.0\n ) # nW, window_size**2, window_size**2\n\n return attn_mask"
},
{
"identifier": "calculate_mask_all",
"path": "src/spandrel/architectures/GRLIR/arch/ops.py",
"snippet": "def calculate_mask_all(\n input_resolution,\n window_size,\n shift_size,\n anchor_window_down_factor=1,\n window_to_anchor=True,\n):\n \"\"\"\n Use case: 3)\n \"\"\"\n # calculate attention mask for SW-MSA\n anchor_resolution = [s // anchor_window_down_factor for s in input_resolution]\n aws = [s // anchor_window_down_factor for s in window_size]\n anchor_shift = [s // anchor_window_down_factor for s in shift_size]\n\n # mask of window1: nW, Wh**Ww\n mask_windows = _fill_window(input_resolution, window_size, shift_size)\n # mask of window2: nW, AWh*AWw\n mask_anchor = _fill_window(anchor_resolution, aws, anchor_shift)\n\n if window_to_anchor:\n attn_mask = mask_windows.unsqueeze(2) - mask_anchor.unsqueeze(1)\n else:\n attn_mask = mask_anchor.unsqueeze(2) - mask_windows.unsqueeze(1)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(\n attn_mask == 0, 0.0\n ) # nW, Wh**Ww, AWh*AWw\n\n return attn_mask"
},
{
"identifier": "get_relative_coords_table_all",
"path": "src/spandrel/architectures/GRLIR/arch/ops.py",
"snippet": "def get_relative_coords_table_all(\n window_size: tuple[int, int] | list[int],\n pretrained_window_size: tuple[int, int] | list[int] = [0, 0],\n anchor_window_down_factor=1,\n):\n \"\"\"\n Use case: 3)\n\n Support all window shapes.\n Args:\n window_size:\n pretrained_window_size:\n anchor_window_down_factor:\n\n Returns:\n\n \"\"\"\n # get relative_coords_table\n ws = window_size\n aws = [w // anchor_window_down_factor for w in window_size]\n pws = pretrained_window_size\n paws = [w // anchor_window_down_factor for w in pretrained_window_size]\n\n # positive table size: (Ww - 1) - (Ww - AWw) // 2\n ts_p = [w1 - 1 - (w1 - w2) // 2 for w1, w2 in zip(ws, aws)]\n # negative table size: -(AWw - 1) - (Ww - AWw) // 2\n ts_n = [-(w2 - 1) - (w1 - w2) // 2 for w1, w2 in zip(ws, aws)]\n pts = [w1 - 1 - (w1 - w2) // 2 for w1, w2 in zip(pws, paws)]\n\n # TODO: pretrained window size and pretrained anchor window size is only used here.\n # TODO: Investigate whether it is really important to use this setting when finetuning large window size\n # TODO: based on pretrained weights with small window size.\n\n coord_h = torch.arange(ts_n[0], ts_p[0] + 1, dtype=torch.float32)\n coord_w = torch.arange(ts_n[1], ts_p[1] + 1, dtype=torch.float32)\n table = torch.stack(torch.meshgrid([coord_h, coord_w], indexing=\"ij\")).permute(\n 1, 2, 0\n )\n table = table.contiguous().unsqueeze(0) # 1, Wh+AWh-1, Ww+AWw-1, 2\n if pts[0] > 0:\n table[:, :, :, 0] /= pts[0]\n table[:, :, :, 1] /= pts[1]\n else:\n table[:, :, :, 0] /= ts_p[0]\n table[:, :, :, 1] /= ts_p[1]\n table *= 8 # normalize to -8, 8\n table = torch.sign(table) * torch.log2(torch.abs(table) + 1.0) / np.log2(8)\n # 1, Wh+AWh-1, Ww+AWw-1, 2\n return table"
},
{
"identifier": "get_relative_position_index_simple",
"path": "src/spandrel/architectures/GRLIR/arch/ops.py",
"snippet": "def get_relative_position_index_simple(\n window_size: tuple[int, int] | list[int],\n anchor_window_down_factor=1,\n window_to_anchor=True,\n):\n \"\"\"\n Use case: 3)\n This is a simplified version of get_relative_position_index_all\n The start coordinate of anchor window is also (0, 0)\n get pair-wise relative position index for each token inside the window\n \"\"\"\n ws = window_size\n aws = [w // anchor_window_down_factor for w in window_size]\n\n coords = _get_meshgrid_coords((0, 0), window_size) # 2, Wh*Ww\n coords_anchor = _get_meshgrid_coords((0, 0), aws)\n # 2, AWh*AWw\n\n max_horizontal_diff = aws[1] + ws[1] - 1\n if window_to_anchor:\n offset = [w2 - 1 for w2 in aws]\n idx = coords_diff_odd(coords, coords_anchor, offset, max_horizontal_diff)\n else:\n offset = [w1 - 1 for w1 in ws]\n idx = coords_diff_odd(coords_anchor, coords, offset, max_horizontal_diff)\n return idx # Wh*Ww, AWh*AWw or AWh*AWw, Wh*Ww"
},
{
"identifier": "build_last_conv",
"path": "src/spandrel/architectures/GRLIR/arch/swin_v1_block.py",
"snippet": "def build_last_conv(conv_type: str, dim: int):\n if conv_type == \"1conv\":\n block = nn.Conv2d(dim, dim, 3, 1, 1)\n elif conv_type == \"3conv\":\n # to save parameters and memory\n block = nn.Sequential(\n nn.Conv2d(dim, dim // 4, 3, 1, 1),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(dim // 4, dim, 3, 1, 1),\n )\n elif conv_type == \"1conv1x1\":\n block = nn.Conv2d(dim, dim, 1, 1, 0)\n elif conv_type == \"linear\":\n block = Linear(dim, dim)\n else:\n raise ValueError(f\"Unsupported conv_type {conv_type}\")\n return block"
},
{
"identifier": "Upsample",
"path": "src/spandrel/architectures/GRLIR/arch/upsample.py",
"snippet": "class Upsample(nn.Module):\n \"\"\"Upsample module.\n Args:\n scale (int): Scale factor. Supported scales: 2^n and 3.\n num_feat (int): Channel number of intermediate features.\n \"\"\"\n\n def __init__(self, scale, num_feat):\n super().__init__()\n m = []\n if (scale & (scale - 1)) == 0: # scale = 2^n\n for _ in range(int(math.log(scale, 2))):\n m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))\n m.append(nn.PixelShuffle(2))\n elif scale == 3:\n m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))\n m.append(nn.PixelShuffle(3))\n else:\n raise ValueError(\n f\"scale {scale} is not supported. \" \"Supported scales: 2^n and 3.\"\n )\n self.up = nn.Sequential(*m)\n\n def forward(self, x):\n return self.up(x)"
},
{
"identifier": "UpsampleOneStep",
"path": "src/spandrel/architectures/GRLIR/arch/upsample.py",
"snippet": "class UpsampleOneStep(nn.Module):\n \"\"\"UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)\n Used in lightweight SR to save parameters.\n Args:\n scale (int): Scale factor. Supported scales: 2^n and 3.\n num_feat (int): Channel number of intermediate features.\n \"\"\"\n\n def __init__(self, scale, num_feat, num_out_ch):\n super().__init__()\n self.num_feat = num_feat\n m = []\n m.append(nn.Conv2d(num_feat, (scale**2) * num_out_ch, 3, 1, 1))\n m.append(nn.PixelShuffle(scale))\n self.up = nn.Sequential(*m)\n\n def forward(self, x):\n return self.up(x)"
}
] | from typing import Literal
from ...__arch_helpers.timm.helpers import to_2tuple
from ...__arch_helpers.timm.weight_init import trunc_normal_
from .config import GRLConfig
from .mixed_attn_block_efficient import (
EfficientMixAttnTransformerBlock,
get_stripe_info,
)
from .ops import (
bchw_to_blc,
blc_to_bchw,
calculate_mask,
calculate_mask_all,
get_relative_coords_table_all,
get_relative_position_index_simple,
)
from .swin_v1_block import (
build_last_conv,
)
from .upsample import Upsample, UpsampleOneStep
from fairscale.nn import checkpoint_wrapper # type: ignore
import torch
import torch.nn as nn
import torch.nn.functional as F | 8,600 | self.stripe_size = stripe_size
self.stripe_groups = stripe_groups
self.pretrained_window_size = pretrained_window_size
self.pretrained_stripe_size = pretrained_stripe_size
self.anchor_window_down_factor = anchor_window_down_factor
# Head of the network. First convolution.
self.conv_first = nn.Conv2d(in_channels, embed_dim, 3, 1, 1)
# Body of the network
self.norm_start = norm_layer(embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# stochastic depth decay rule
args = GRLConfig(
out_proj_type=out_proj_type,
local_connection=local_connection,
euclidean_dist=euclidean_dist,
)
for k, v in self.set_table_index_mask(self.input_resolution).items():
self.register_buffer(k, v, persistent=False)
self.layers = nn.ModuleList()
for i in range(len(depths)):
layer = TransformerStage(
dim=embed_dim,
input_resolution=self.input_resolution,
depth=depths[i],
num_heads_window=num_heads_window[i],
num_heads_stripe=num_heads_stripe[i],
window_size=self.window_size,
stripe_size=stripe_size,
stripe_groups=stripe_groups,
stripe_shift=stripe_shift,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qkv_proj_type=qkv_proj_type,
anchor_proj_type=anchor_proj_type,
anchor_one_stage=anchor_one_stage,
anchor_window_down_factor=anchor_window_down_factor,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[
sum(depths[:i]) : sum(
depths[: i + 1]
) # type: ignore
], # no impact on SR results
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size,
pretrained_stripe_size=pretrained_stripe_size,
conv_type=conv_type,
init_method=init_method,
fairscale_checkpoint=fairscale_checkpoint,
offload_to_cpu=offload_to_cpu,
args=args,
)
self.layers.append(layer)
self.norm_end = norm_layer(embed_dim)
# Tail of the network
self.conv_after_body = build_last_conv(conv_type, embed_dim)
#####################################################################################################
################################ 3, high quality image reconstruction ################################
if self.upsampler == "pixelshuffle":
# for classical SR
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_out_feats, 3, 1, 1), nn.LeakyReLU(inplace=True)
)
self.upsample = Upsample(upscale, num_out_feats)
self.conv_last = nn.Conv2d(num_out_feats, out_channels, 3, 1, 1)
elif self.upsampler == "pixelshuffledirect":
# for lightweight SR (to save parameters)
self.upsample = UpsampleOneStep(
upscale,
embed_dim,
out_channels,
)
elif self.upsampler == "nearest+conv":
# for real-world SR (less artifacts)
assert self.upscale == 4, "only support x4 now."
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_out_feats, 3, 1, 1), nn.LeakyReLU(inplace=True)
)
self.conv_up1 = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)
self.conv_up2 = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)
self.conv_hr = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)
self.conv_last = nn.Conv2d(num_out_feats, out_channels, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
# for image denoising and JPEG compression artifact reduction
self.conv_last = nn.Conv2d(embed_dim, out_channels, 3, 1, 1)
self.apply(self._init_weights)
if init_method in ["l", "w"] or init_method.find("t") >= 0:
for layer in self.layers:
layer._init_weights()
def set_table_index_mask(self, x_size: tuple[int, int]):
"""
Two used cases:
1) At initialization: set the shared buffers.
2) During forward pass: get the new buffers if the resolution of the input changes
"""
# ss - stripe_size, sss - stripe_shift_size
# ss ~= self.stripe_size
# sss ~= self.stripe_size / 2
ss, sss = get_stripe_info(self.stripe_size, self.stripe_groups, True, x_size)
df = self.anchor_window_down_factor
table_w = get_relative_coords_table_all(
self.window_size, self.pretrained_window_size
)
table_sh = get_relative_coords_table_all(ss, self.pretrained_stripe_size, df)
table_sv = get_relative_coords_table_all(
ss[::-1], self.pretrained_stripe_size, df
)
| """
Efficient and Explicit Modelling of Image Hierarchies for Image Restoration
Image restoration transformers with global, regional, and local modelling
A clean version of the.
Shared buffers are used for relative_coords_table, relative_position_index, and attn_mask.
"""
from __future__ import annotations
class TransformerStage(nn.Module):
"""Transformer stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads_window (list[int]): Number of window attention heads in different layers.
num_heads_stripe (list[int]): Number of stripe attention heads in different layers.
stripe_size (list[int]): Stripe size. Default: [8, 8]
stripe_groups (list[int]): Number of stripe groups. Default: [None, None].
stripe_shift (bool): whether to shift the stripes. This is used as an ablation study.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qkv_proj_type (str): QKV projection type. Default: linear. Choices: linear, separable_conv.
anchor_proj_type (str): Anchor projection type. Default: avgpool. Choices: avgpool, maxpool, conv2d, separable_conv, patchmerging.
anchor_one_stage (bool): Whether to use one operator or multiple progressive operators to reduce feature map resolution. Default: True.
anchor_window_down_factor (int): The downscale factor used to get the anchors.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
pretrained_window_size (list[int]): pretrained window size. This is actually not used. Default: [0, 0].
pretrained_stripe_size (list[int]): pretrained stripe size. This is actually not used. Default: [0, 0].
conv_type: The convolutional block before residual connection.
init_method: initialization method of the weight parameters used to train large scale models.
Choices: n, normal -- Swin V1 init method.
l, layernorm -- Swin V2 init method. Zero the weight and bias in the post layer normalization layer.
r, res_rescale -- EDSR rescale method. Rescale the residual blocks with a scaling factor 0.1
w, weight_rescale -- MSRResNet rescale method. Rescale the weight parameter in residual blocks with a scaling factor 0.1
t, trunc_normal_ -- nn.Linear, trunc_normal; nn.Conv2d, weight_rescale
fairscale_checkpoint (bool): Whether to use fairscale checkpoint.
offload_to_cpu (bool): used by fairscale_checkpoint
args:
out_proj_type (str): Type of the output projection in the self-attention modules. Default: linear. Choices: linear, conv2d.
local_connection (bool): Whether to enable the local modelling module (two convs followed by Channel attention). For GRL base model, this is used. "local_connection": local_connection,
euclidean_dist (bool): use Euclidean distance or inner product as the similarity metric. An ablation study.
"""
def __init__(
self,
dim: int,
input_resolution: tuple[int, int],
depth: int,
num_heads_window: int,
num_heads_stripe: int,
window_size: tuple[int, int],
stripe_size,
stripe_groups,
stripe_shift,
mlp_ratio=4.0,
qkv_bias=True,
qkv_proj_type="linear",
anchor_proj_type="avgpool",
anchor_one_stage=True,
anchor_window_down_factor=1,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
pretrained_window_size=[0, 0],
pretrained_stripe_size=[0, 0],
conv_type="1conv",
init_method="",
fairscale_checkpoint=False,
offload_to_cpu=False,
args: GRLConfig = None, # type: ignore
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.init_method = init_method
self.blocks = nn.ModuleList()
for i in range(depth):
block = EfficientMixAttnTransformerBlock(
dim=dim,
input_resolution=input_resolution,
num_heads_w=num_heads_window,
num_heads_s=num_heads_stripe,
window_size=window_size,
window_shift=i % 2 == 0,
stripe_size=stripe_size,
stripe_groups=stripe_groups,
stripe_type="H" if i % 2 == 0 else "W",
stripe_shift=i % 4 in [2, 3] if stripe_shift else False,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qkv_proj_type=qkv_proj_type,
anchor_proj_type=anchor_proj_type,
anchor_one_stage=anchor_one_stage,
anchor_window_down_factor=anchor_window_down_factor,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size,
pretrained_stripe_size=pretrained_stripe_size,
res_scale=0.1 if init_method == "r" else 1.0,
args=args,
)
# print(fairscale_checkpoint, offload_to_cpu)
if fairscale_checkpoint:
block = checkpoint_wrapper(block, offload_to_cpu=offload_to_cpu)
self.blocks.append(block)
self.conv = build_last_conv(conv_type, dim)
def _init_weights(self):
for n, m in self.named_modules():
if self.init_method == "w":
if isinstance(m, (nn.Linear, nn.Conv2d)) and n.find("cpb_mlp") < 0:
print("nn.Linear and nn.Conv2d weight initilization")
m.weight.data *= 0.1
elif self.init_method == "l":
if isinstance(m, nn.LayerNorm):
print("nn.LayerNorm initialization")
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 0)
elif self.init_method.find("t") >= 0:
scale = 0.1 ** (len(self.init_method) - 1) * int(self.init_method[-1])
if isinstance(m, nn.Linear) and n.find("cpb_mlp") < 0:
trunc_normal_(m.weight, std=scale)
elif isinstance(m, nn.Conv2d):
m.weight.data *= 0.1
print(
"Initialization nn.Linear - trunc_normal; nn.Conv2d - weight rescale."
)
else:
raise NotImplementedError(
f"Parameter initialization method {self.init_method} not implemented in TransformerStage."
)
def forward(self, x, x_size, table_index_mask):
res = x
for blk in self.blocks:
res = blk(res, x_size, table_index_mask)
res = bchw_to_blc(self.conv(blc_to_bchw(res, x_size)))
return res + x
def flops(self):
pass
class GRL(nn.Module):
r"""Image restoration transformer with global, non-local, and local connections
Args:
img_size (int | list[int]): Input image size. Default 64
in_channels (int): Number of input image channels. Default: 3
out_channels (int): Number of output image channels. Default: None
embed_dim (int): Patch embedding dimension. Default: 96
upscale (int): Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
img_range (float): Image range. 1. or 255.
upsampler (str): The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
depths (list[int]): Depth of each Swin Transformer layer.
num_heads_window (list[int]): Number of window attention heads in different layers.
num_heads_stripe (list[int]): Number of stripe attention heads in different layers.
window_size (int): Window size. Default: 8.
stripe_size (list[int]): Stripe size. Default: [8, 8]
stripe_groups (list[int]): Number of stripe groups. Default: [None, None].
stripe_shift (bool): whether to shift the stripes. This is used as an ablation study.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qkv_proj_type (str): QKV projection type. Default: linear. Choices: linear, separable_conv.
anchor_proj_type (str): Anchor projection type. Default: avgpool. Choices: avgpool, maxpool, conv2d, separable_conv, patchmerging.
anchor_one_stage (bool): Whether to use one operator or multiple progressive operators to reduce feature map resolution. Default: True.
anchor_window_down_factor (int): The downscale factor used to get the anchors.
out_proj_type (str): Type of the output projection in the self-attention modules. Default: linear. Choices: linear, conv2d.
local_connection (bool): Whether to enable the local modelling module (two convs followed by Channel attention). For GRL base model, this is used.
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
pretrained_window_size (list[int]): pretrained window size. This is actually not used. Default: [0, 0].
pretrained_stripe_size (list[int]): pretrained stripe size. This is actually not used. Default: [0, 0].
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
conv_type (str): The convolutional block before residual connection. Default: 1conv. Choices: 1conv, 3conv, 1conv1x1, linear
init_method: initialization method of the weight parameters used to train large scale models.
Choices: n, normal -- Swin V1 init method.
l, layernorm -- Swin V2 init method. Zero the weight and bias in the post layer normalization layer.
r, res_rescale -- EDSR rescale method. Rescale the residual blocks with a scaling factor 0.1
w, weight_rescale -- MSRResNet rescale method. Rescale the weight parameter in residual blocks with a scaling factor 0.1
t, trunc_normal_ -- nn.Linear, trunc_normal; nn.Conv2d, weight_rescale
fairscale_checkpoint (bool): Whether to use fairscale checkpoint.
offload_to_cpu (bool): used by fairscale_checkpoint
euclidean_dist (bool): use Euclidean distance or inner product as the similarity metric. An ablation study.
"""
def __init__(
self,
img_size=64,
in_channels: int = 3,
out_channels: int | None = None,
embed_dim=96,
upscale=2,
img_range=1.0,
upsampler="",
depths: list[int] = [6, 6, 6, 6, 6, 6],
num_heads_window: list[int] = [3, 3, 3, 3, 3, 3],
num_heads_stripe: list[int] = [3, 3, 3, 3, 3, 3],
window_size=8,
stripe_size: list[int] = [8, 8], # used for stripe window attention
stripe_groups: list[int | None] = [None, None],
stripe_shift=False,
mlp_ratio=4.0,
qkv_bias=True,
qkv_proj_type="linear",
anchor_proj_type="avgpool",
anchor_one_stage=True,
anchor_window_down_factor=1,
out_proj_type: Literal["linear", "conv2d"] = "linear",
local_connection=False,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
pretrained_window_size: list[int] = [0, 0],
pretrained_stripe_size: list[int] = [0, 0],
conv_type="1conv",
init_method="n", # initialization method of the weight parameters used to train large scale models.
fairscale_checkpoint=False, # fairscale activation checkpointing
offload_to_cpu=False,
euclidean_dist=False,
):
super().__init__()
# Process the input arguments
out_channels = out_channels or in_channels
self.in_channels = in_channels
self.out_channels = out_channels
num_out_feats = 64
self.embed_dim = embed_dim
self.upscale = upscale
self.upsampler = upsampler
self.img_range = img_range
if in_channels == 3:
rgb_mean = (0.4488, 0.4371, 0.4040)
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
else:
self.mean = torch.zeros(1, 1, 1, 1)
max_stripe_size = max([0 if s is None else s for s in stripe_size]) # type: ignore
max_stripe_groups = max([0 if s is None else s for s in stripe_groups])
max_stripe_groups *= anchor_window_down_factor
self.pad_size = max(window_size, max_stripe_size, max_stripe_groups)
# if max_stripe_size >= window_size:
# self.pad_size *= anchor_window_down_factor
# if stripe_groups[0] is None and stripe_groups[1] is None:
# self.pad_size = max(stripe_size)
# else:
# self.pad_size = window_size
self.input_resolution = to_2tuple(img_size)
self.window_size = to_2tuple(window_size)
self.shift_size = [w // 2 for w in self.window_size]
self.stripe_size = stripe_size
self.stripe_groups = stripe_groups
self.pretrained_window_size = pretrained_window_size
self.pretrained_stripe_size = pretrained_stripe_size
self.anchor_window_down_factor = anchor_window_down_factor
# Head of the network. First convolution.
self.conv_first = nn.Conv2d(in_channels, embed_dim, 3, 1, 1)
# Body of the network
self.norm_start = norm_layer(embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# stochastic depth decay rule
args = GRLConfig(
out_proj_type=out_proj_type,
local_connection=local_connection,
euclidean_dist=euclidean_dist,
)
for k, v in self.set_table_index_mask(self.input_resolution).items():
self.register_buffer(k, v, persistent=False)
self.layers = nn.ModuleList()
for i in range(len(depths)):
layer = TransformerStage(
dim=embed_dim,
input_resolution=self.input_resolution,
depth=depths[i],
num_heads_window=num_heads_window[i],
num_heads_stripe=num_heads_stripe[i],
window_size=self.window_size,
stripe_size=stripe_size,
stripe_groups=stripe_groups,
stripe_shift=stripe_shift,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qkv_proj_type=qkv_proj_type,
anchor_proj_type=anchor_proj_type,
anchor_one_stage=anchor_one_stage,
anchor_window_down_factor=anchor_window_down_factor,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[
sum(depths[:i]) : sum(
depths[: i + 1]
) # type: ignore
], # no impact on SR results
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size,
pretrained_stripe_size=pretrained_stripe_size,
conv_type=conv_type,
init_method=init_method,
fairscale_checkpoint=fairscale_checkpoint,
offload_to_cpu=offload_to_cpu,
args=args,
)
self.layers.append(layer)
self.norm_end = norm_layer(embed_dim)
# Tail of the network
self.conv_after_body = build_last_conv(conv_type, embed_dim)
#####################################################################################################
################################ 3, high quality image reconstruction ################################
if self.upsampler == "pixelshuffle":
# for classical SR
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_out_feats, 3, 1, 1), nn.LeakyReLU(inplace=True)
)
self.upsample = Upsample(upscale, num_out_feats)
self.conv_last = nn.Conv2d(num_out_feats, out_channels, 3, 1, 1)
elif self.upsampler == "pixelshuffledirect":
# for lightweight SR (to save parameters)
self.upsample = UpsampleOneStep(
upscale,
embed_dim,
out_channels,
)
elif self.upsampler == "nearest+conv":
# for real-world SR (less artifacts)
assert self.upscale == 4, "only support x4 now."
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_out_feats, 3, 1, 1), nn.LeakyReLU(inplace=True)
)
self.conv_up1 = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)
self.conv_up2 = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)
self.conv_hr = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)
self.conv_last = nn.Conv2d(num_out_feats, out_channels, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
# for image denoising and JPEG compression artifact reduction
self.conv_last = nn.Conv2d(embed_dim, out_channels, 3, 1, 1)
self.apply(self._init_weights)
if init_method in ["l", "w"] or init_method.find("t") >= 0:
for layer in self.layers:
layer._init_weights()
def set_table_index_mask(self, x_size: tuple[int, int]):
"""
Two used cases:
1) At initialization: set the shared buffers.
2) During forward pass: get the new buffers if the resolution of the input changes
"""
# ss - stripe_size, sss - stripe_shift_size
# ss ~= self.stripe_size
# sss ~= self.stripe_size / 2
ss, sss = get_stripe_info(self.stripe_size, self.stripe_groups, True, x_size)
df = self.anchor_window_down_factor
table_w = get_relative_coords_table_all(
self.window_size, self.pretrained_window_size
)
table_sh = get_relative_coords_table_all(ss, self.pretrained_stripe_size, df)
table_sv = get_relative_coords_table_all(
ss[::-1], self.pretrained_stripe_size, df
)
| index_w = get_relative_position_index_simple(self.window_size) | 10 | 2023-11-17 01:11:47+00:00 | 12k |
motexture/VSeq2VSeq | models/unet.py | [
{
"identifier": "TransformerTemporalModel",
"path": "models/transformers.py",
"snippet": "class TransformerTemporalModel(ModelMixin, ConfigMixin):\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n out_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n sample_size: Optional[int] = None,\n activation_fn: str = \"geglu\",\n norm_elementwise_affine: bool = True,\n double_self_attention: bool = True,\n ):\n super().__init__()\n\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n # 3. Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n attention_bias=attention_bias,\n double_self_attention=double_self_attention,\n norm_elementwise_affine=norm_elementwise_affine,\n )\n for d in range(num_layers)\n ]\n )\n\n self.proj_out = nn.Linear(inner_dim, in_channels)\n\n def forward(\n self,\n hidden_states,\n num_frames=1,\n encoder_hidden_states=None,\n timestep=None,\n class_labels=None,\n cross_attention_kwargs=None,\n return_dict: bool = True,\n ):\n # 1. Input\n batch_frames, channel, height, width = hidden_states.shape\n batch_size = batch_frames // num_frames\n\n residual = hidden_states\n\n hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width)\n hidden_states = hidden_states.permute(0, 2, 1, 3, 4)\n\n hidden_states = self.norm(hidden_states)\n hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel)\n\n hidden_states = self.proj_in(hidden_states)\n\n # 2. Blocks\n for block in self.transformer_blocks:\n hidden_states = block(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n timestep=timestep,\n cross_attention_kwargs=cross_attention_kwargs,\n class_labels=class_labels,\n )\n\n # 3. Output\n hidden_states = self.proj_out(hidden_states)\n hidden_states = (\n hidden_states[None, None, :]\n .reshape(batch_size, height, width, channel, num_frames)\n .permute(0, 3, 4, 1, 2)\n .contiguous()\n )\n hidden_states = hidden_states.reshape(batch_frames, channel, height, width)\n\n output = hidden_states + residual\n\n if not return_dict:\n return (output,)\n\n return TransformerTemporalModelOutput(sample=output)"
},
{
"identifier": "Conditioner",
"path": "models/resnet.py",
"snippet": "class Conditioner(nn.Module):\n def __init__(self, dim, dim_out, kernel_size, **kwargs):\n super().__init__()\n\n self.spatial_conv = nn.Conv2d(dim, dim_out, kernel_size, **kwargs)\n self.conditioning_conv = nn.Conv2d(dim, dim_out, kernel_size, **kwargs)\n \n def forward(self, hidden_states, conditioning_hidden_states): \n hidden_states = self.spatial_conv(hidden_states)\n conditioning_hidden_states = self.conditioning_conv(conditioning_hidden_states)\n\n return hidden_states, conditioning_hidden_states"
},
{
"identifier": "CrossAttnDownBlock3D",
"path": "models/unet_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n\n resnets = []\n attentions = []\n temp_attentions = []\n temp_conditioning_attentions = []\n temp_convs = []\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=dropout\n )\n )\n attentions.append(\n Transformer2DModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n temp_conditioning_attentions.append(\n TransformerTemporalConditioningModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n norm_num_groups=resnet_groups,\n only_cross_attention=True\n )\n )\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n self.temp_conditioning_attentions = nn.ModuleList(temp_conditioning_attentions)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n def forward(\n self,\n hidden_states,\n conditioning_hidden_states,\n h_emb=None,\n c_emb=None,\n encoder_hidden_states=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None,\n ):\n output_states = ()\n conditioning_output_states = ()\n\n for resnet, temp_conv, attn, temp_attn, temp_cond_attn in zip_longest(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions, self.temp_conditioning_attentions\n ):\n \n if self.gradient_checkpointing:\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n \n hidden_states, conditioning_hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states,)[0]\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_attn, return_dict=False), hidden_states, num_frames)[0]\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_cond_attn, return_dict=False), hidden_states, conditioning_hidden_states, num_frames)[0]\n else:\n hidden_states, conditioning_hidden_states = resnet(hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs,).sample\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n hidden_states = temp_cond_attn(hidden_states, conditioning_hidden_states, num_frames=num_frames).sample\n\n output_states += (hidden_states,)\n conditioning_output_states += (conditioning_hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states, conditioning_hidden_states = downsampler(hidden_states, conditioning_hidden_states)\n\n output_states += (hidden_states,)\n conditioning_output_states += (conditioning_hidden_states,)\n\n return hidden_states, output_states, conditioning_hidden_states, conditioning_output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "models/unet_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n\n resnets = []\n temp_convs = []\n attentions = []\n temp_attentions = []\n temp_conditioning_attentions = []\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=dropout\n )\n )\n attentions.append(\n Transformer2DModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n temp_conditioning_attentions.append(\n TransformerTemporalConditioningModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n norm_num_groups=resnet_groups,\n only_cross_attention=True\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n self.temp_conditioning_attentions = nn.ModuleList(temp_conditioning_attentions)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n conditioning_hidden_states,\n res_conditioning_hidden_states_tuple,\n h_emb=None,\n c_emb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None,\n ):\n for resnet, temp_conv, attn, temp_attn, temp_cond_attn in zip_longest(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions, self.temp_conditioning_attentions\n ):\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n res_conditioning_hidden_states = res_conditioning_hidden_states_tuple[-1]\n res_conditioning_hidden_states_tuple = res_conditioning_hidden_states_tuple[:-1]\n conditioning_hidden_states = torch.cat([conditioning_hidden_states, res_conditioning_hidden_states], dim=1)\n\n if self.gradient_checkpointing:\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n \n hidden_states, conditioning_hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states,)[0]\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_attn, return_dict=False), hidden_states, num_frames)[0]\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_cond_attn, return_dict=False), hidden_states, conditioning_hidden_states, num_frames)[0]\n else:\n hidden_states, conditioning_hidden_states = resnet(hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs,).sample\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n hidden_states = temp_cond_attn(hidden_states, conditioning_hidden_states, num_frames=num_frames).sample\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states, conditioning_hidden_states = upsampler(hidden_states, conditioning_hidden_states, upsample_size)\n\n return hidden_states, conditioning_hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "models/unet_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n ):\n super().__init__()\n\n resnets = []\n temp_convs = []\n\n self.gradient_checkpointing = False\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=dropout\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n def forward(self, hidden_states, conditioning_hidden_states, h_emb=None, c_emb=None, num_frames=1):\n output_states = ()\n conditioning_output_states = ()\n\n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n if self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states, conditioning_hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames)\n else:\n hidden_states, conditioning_hidden_states = resnet(hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames) \n\n output_states += (hidden_states,)\n conditioning_output_states += (conditioning_hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states, conditioning_hidden_states = downsampler(hidden_states, conditioning_hidden_states)\n\n output_states += (hidden_states,)\n conditioning_output_states += (conditioning_hidden_states,)\n\n return hidden_states, output_states, conditioning_hidden_states, conditioning_output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "models/unet_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n upcast_attention=False,\n ):\n super().__init__()\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm\n )\n ]\n temp_convs = [\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=dropout\n )\n ]\n attentions = []\n temp_attentions = []\n temp_conditioning_attentions = []\n\n for _ in range(num_layers):\n attentions.append(\n Transformer2DModel(\n in_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n in_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n temp_conditioning_attentions.append(\n TransformerTemporalConditioningModel(\n in_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n norm_num_groups=resnet_groups,\n only_cross_attention=True\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=dropout\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n self.temp_conditioning_attentions = nn.ModuleList(temp_conditioning_attentions)\n\n def forward(\n self,\n hidden_states,\n conditioning_hidden_states,\n h_emb=None,\n c_emb=None,\n encoder_hidden_states=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None\n ):\n if self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n \n hidden_states, conditioning_hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(self.resnets[0]), hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(self.temp_convs[0]), hidden_states, num_frames)\n else:\n hidden_states, conditioning_hidden_states = self.resnets[0](hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = self.temp_convs[0](hidden_states, num_frames)\n \n for attn, temp_attn, temp_cond_attn, resnet, temp_conv in zip_longest(\n self.attentions, self.temp_attentions, self.temp_conditioning_attentions, self.resnets[1:], self.temp_convs[1:]\n ):\n if self.gradient_checkpointing:\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n \n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states,)[0]\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_attn, return_dict=False), hidden_states, num_frames)[0]\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_cond_attn, return_dict=False), hidden_states, conditioning_hidden_states, num_frames)[0]\n hidden_states, conditioning_hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames)\n else:\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states,cross_attention_kwargs=cross_attention_kwargs,).sample\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n hidden_states = temp_cond_attn(hidden_states, conditioning_hidden_states, num_frames=num_frames).sample\n hidden_states, conditioning_hidden_states = resnet(hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames) \n\n return hidden_states, conditioning_hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "models/unet_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n ):\n super().__init__()\n\n resnets = []\n temp_convs = []\n self.gradient_checkpointing = False\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=dropout\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n def forward(\n self, \n hidden_states, \n res_hidden_states_tuple, \n conditioning_hidden_states, \n res_conditioning_hidden_states_tuple, \n h_emb=None,\n c_emb=None,\n upsample_size=None, \n num_frames=1\n ):\n \n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n res_conditioning_hidden_states = res_conditioning_hidden_states_tuple[-1]\n res_conditioning_hidden_states_tuple = res_conditioning_hidden_states_tuple[:-1]\n conditioning_hidden_states = torch.cat([conditioning_hidden_states, res_conditioning_hidden_states], dim=1)\n\n if self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states, conditioning_hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames)\n else:\n hidden_states, conditioning_hidden_states = resnet(hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states, conditioning_hidden_states = upsampler(hidden_states, conditioning_hidden_states, upsample_size)\n\n return hidden_states, conditioning_hidden_states"
},
{
"identifier": "get_down_block",
"path": "models/unet_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n only_cross_attention=False,\n upcast_attention=False\n):\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "models/unet_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n only_cross_attention=False,\n upcast_attention=False\n):\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_groups=resnet_groups\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
}
] | from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from safetensors.torch import load_file
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import WEIGHTS_NAME
from .transformers import TransformerTemporalModel
from .resnet import Conditioner
from .unet_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block
)
import torch
import torch.nn as nn
import torch.utils.checkpoint
import os | 10,463 |
# count how many layers upsample the images
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i]
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
if norm_num_groups is not None:
self.conv_norm_out = nn.GroupNorm(
num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
)
self.conv_act = nn.SiLU()
else:
self.conv_norm_out = None
self.conv_act = None
conv_out_padding = (conv_out_kernel - 1) // 2
self.conv_out = Conditioner(block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, value=False):
self.gradient_checkpointing = value
self.mid_block.gradient_checkpointing = value
for module in self.down_blocks + self.up_blocks:
| # Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved.
# Copyright 2023 The ModelScope Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
"""
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
r"""
UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep
and returns sample shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
implements for all the models (such as downloading or saving, etc.)
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`):
The tuple of upsample blocks to use.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, it will skip the normalization and activation layers in post-processing
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1024,
attention_head_dim: Union[int, Tuple[int]] = 64,
):
super().__init__()
self.sample_size = sample_size
self.gradient_checkpointing = False
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_out_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = Conditioner(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding)
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], True, 0)
timestep_input_dim = block_out_channels[0]
self.hidden_time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
self.conditioning_time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
self.transformer_in = TransformerTemporalModel(
num_attention_heads=8,
attention_head_dim=attention_head_dim,
in_channels=block_out_channels[0],
num_layers=1,
)
# class embedding
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding
)
self.down_blocks.append(down_block)
# mid
self.mid_block = UNetMidBlock3DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
output_scale_factor=mid_block_scale_factor,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[-1],
resnet_groups=norm_num_groups
)
# count how many layers upsample the images
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i]
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
if norm_num_groups is not None:
self.conv_norm_out = nn.GroupNorm(
num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
)
self.conv_act = nn.SiLU()
else:
self.conv_norm_out = None
self.conv_act = None
conv_out_padding = (conv_out_kernel - 1) // 2
self.conv_out = Conditioner(block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, value=False):
self.gradient_checkpointing = value
self.mid_block.gradient_checkpointing = value
for module in self.down_blocks + self.up_blocks: | if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): | 2 | 2023-11-14 09:09:09+00:00 | 12k |
TCLResearchEurope/torch-dag | torch_dag_algorithms/pruning/masking_inserter.py | [
{
"identifier": "DagModule",
"path": "torch_dag/core/dag_module.py",
"snippet": "class DagModule(torch.nn.Module):\n MAX_LEN_REPR = None\n\n def __init__(\n self,\n name: str,\n vertices: Optional[List[Vertex]] = None,\n output_vertex: Optional[InnerVertex] = None,\n ):\n super().__init__()\n self.name = name\n self.vertices = vertices if vertices is not None else []\n self.output_vertex = output_vertex\n self.forward_dict = None\n self.inputs_dict = None\n self.cache_forward_dict = False\n self._inner_modules = None\n self.forward_scaffold = {}\n self.output_index = None\n self.compiled = False\n self.update_inner_modules()\n\n def compile(self, inputs: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None):\n \"\"\"\n In general `forward` method of DagModule is not `torch.compile` friendly. To overcome that\n we need to use a modified implementation of the forward pass, with no cacheing of intermediate tensors.\n Additionally, some modules may require a compile-type step for `torch.compile` usage.\n :param inputs: optional input (a dummy tensor for a single forward pass)\n \"\"\"\n if inputs is not None:\n is_training = self.training\n if is_training:\n self.eval()\n _ = self(inputs)\n if is_training:\n self.train()\n\n self.forward_scaffold, self.output_index = self.get_forward_scaffold()\n for v in self.inner_vertices:\n if isinstance(v.module, DagModule):\n v.module.compile()\n self.compiled = True\n\n @property\n def inner_modules(self) -> torch.nn.ModuleList:\n self._inner_modules = torch.nn.ModuleList([vertex.module for vertex in self.inner_vertices])\n return self._inner_modules\n\n @property\n def input_vertices(self) -> List[InputVertex]:\n return [vertex for vertex in self.vertices if isinstance(vertex, InputVertex)]\n\n @property\n def inner_vertices(self) -> List[InnerVertex]:\n return [vertex for vertex in self.vertices if isinstance(vertex, InnerVertex)]\n\n def update_inner_modules(self):\n self._inner_modules = torch.nn.ModuleList([vertex.module for vertex in self.inner_vertices])\n for iv in self.inner_vertices:\n if isinstance(iv.module, DagModule):\n iv.module.update_inner_modules()\n\n def get_vertex_by_name(self, name: str) -> Union[InnerVertex, InputVertex]:\n result = [vertex for vertex in self.vertices if vertex.name == name]\n if len(result) == 1:\n return result[0]\n elif len(result) > 1:\n raise AssertionError(f'Multiple vertices found with name: {name} -> {result}')\n else:\n return\n\n def get_forward_scaffold(self):\n # a mapping between vertices index and its predecessors indices\n forward_scaffold = {}\n for k, vertex in enumerate(self.vertices):\n if isinstance(vertex, InputVertex):\n pass\n elif isinstance(vertex, InnerVertex):\n predecessors = vertex.predecessors\n predecessors_indices = [\n self.vertices.index(pd) for pd in predecessors\n ]\n forward_scaffold[k] = predecessors_indices\n\n output_index = self.vertices.index(self.output_vertex)\n\n return forward_scaffold, output_index\n\n def compiled_forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[\n InnerVertex, Union[torch.Tensor, List[torch.Tensor]]]:\n\n assert self.compiled\n\n if not isinstance(inputs, List):\n inputs = [inputs]\n if len(inputs) != len(self.input_vertices):\n raise AssertionError\n\n forward_list = [None for _ in range(len(self.vertices))]\n\n for k, input in enumerate(inputs):\n forward_list[k] = input\n\n num_inputs = len(inputs)\n\n for k in range(len(self.vertices)):\n if k < num_inputs:\n pass\n else:\n\n pd_indices = self.forward_scaffold[k]\n module_inputs = [forward_list[pd_index] for pd_index in pd_indices]\n if len(module_inputs) == 1:\n module_inputs = module_inputs[0]\n try:\n result = self.vertices[k].module(module_inputs)\n except (TypeError, AttributeError):\n result = self.vertices[k].module(*module_inputs)\n result = _postprocess_module_output(result)\n\n forward_list[k] = result\n\n return forward_list[self.output_index]\n\n def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[\n InnerVertex, Union[torch.Tensor, List[torch.Tensor]]]:\n # this is for `torch.compile` usage\n if self.compiled:\n return self.compiled_forward(inputs)\n\n if not isinstance(inputs, List):\n inputs = [inputs]\n if len(inputs) != len(self.input_vertices):\n raise AssertionError\n\n forward_dict = {}\n for k, v in enumerate(self.input_vertices):\n forward_dict[v] = inputs[k]\n\n # forward_dict = {vertex: tensor for vertex, tensor in zip(self.input_vertices, inputs)}\n inputs_dict = {}\n\n for vertex in self.vertices:\n if isinstance(vertex, InputVertex):\n pass\n elif isinstance(vertex, InnerVertex):\n predecessors = vertex.predecessors\n module_inputs = [forward_dict[pd] for pd in predecessors]\n inputs_dict[vertex] = module_inputs\n\n if len(module_inputs) == 1:\n module_inputs = module_inputs[0]\n\n try:\n result = vertex.module(module_inputs)\n except (TypeError, AttributeError):\n result = vertex.module(*module_inputs)\n # if isinstance(result, Tuple):\n result = _postprocess_module_output(result)\n\n forward_dict[vertex] = result\n if self.cache_forward_dict:\n self.forward_dict = forward_dict\n self.inputs_dict = inputs_dict\n return forward_dict[self.output_vertex]\n\n def traverse(\n self,\n processor: VertexProcessor = None,\n ):\n if processor is None:\n inner_vertices = []\n for inner_vertex in self.inner_vertices:\n if isinstance(inner_vertex.module, DagModule):\n inner_vertices.extend(inner_vertex.module.traverse())\n inner_vertices.append(inner_vertex)\n return inner_vertices\n else:\n for inner_vertex in self.traverse():\n processor(inner_vertex)\n # TODO: Remove after validation\n # self._update_inner_modules()\n\n def _check_if_name_unique(self, name: str):\n if name in [v.name for v in self.vertices]:\n raise ValueError(\n f'{self.name} already has an Vertex with name {name}. Please use different name.'\n )\n\n def add_input_vertex(self, name: str) -> InputVertex:\n self._check_if_name_unique(name)\n input_vertex = InputVertex(name)\n self.vertices.append(input_vertex)\n return input_vertex\n\n def add_vertex(\n self,\n name: str,\n module: torch.nn.Module,\n predecessors: List[Vertex],\n ) -> InnerVertex:\n self._check_if_name_unique(name)\n assert isinstance(module, torch.nn.Module)\n\n inner_vertex = InnerVertex(\n name=name,\n module=module,\n predecessors=predecessors,\n )\n for predecessor in predecessors:\n if predecessor not in self.vertices:\n raise ValueError(f'The predecessor: {predecessor} of InnerVertex: {InnerVertex} is not in '\n f'the DagModule: {self.name}')\n self.vertices.append(inner_vertex)\n self.inner_modules.append(module)\n inner_vertex.dag_module = self\n return inner_vertex\n\n def __repr__(self):\n representation = f'{self.__class__.__name__}[{self.name}]'\n if len(self.vertices) == 0:\n return representation\n for inner_vertex in self.inner_vertices:\n inner_vertex.MAX_LEN_REPR = self.MAX_LEN_REPR\n\n for vertex in self.vertices:\n if isinstance(vertex, InputVertex):\n representation += f'\\n << {vertex.name} '\n else:\n index = self.inner_vertices.index(vertex)\n prefix = '>>' if vertex == self.output_vertex else '*'\n if isinstance(vertex.module, DagModule):\n prefix += '#'\n representation += f\"\\n{prefix} {index}: {vertex} \" \\\n f\"--> predecessors: {vertex.predecessors}, \" \\\n f\"successors: {vertex.successors}\"\n representation += f' {self.add_custom_module_info(vertex)}'\n for vertex in self.inner_vertices:\n vertex.MAX_LEN_REPR = None\n return representation\n\n def add_custom_module_info(self, vertex: InnerVertex):\n m = vertex.module\n if isinstance(m, torch.nn.Conv2d):\n return f'Conv2d(in={m.in_channels}, out={m.out_channels}, ks={m.kernel_size}, padding={m.padding})'\n if isinstance(m, torch.nn.Linear):\n return f'Linear(in={m.in_features}, out={m.out_features})'\n return ''\n\n def mark_current_top_vertex_as_output(self):\n if not self.inner_vertices:\n raise ValueError(f'One cannot mark top node in an empty {self}')\n if self.output_vertex is not None:\n logger.warning(f'{self} already has an output vertex. Replacing...')\n self.output_vertex = self.inner_vertices[-1]\n\n @property\n def module_classes(self) -> Set:\n return set([m.__class__ for m in self.inner_modules])\n\n def unroll_inner_modules(self) -> List[torch.nn.Module]:\n result = []\n for m in self.inner_modules:\n if not isinstance(m, DagModule):\n result.append(m)\n else:\n result.extend(m.unroll_inner_modules())\n return result\n\n def save(self, path: str):\n # TODO: Remove after validation\n # self._update_inner_modules()\n self.enforce_names_uniqueness()\n os.makedirs(path, exist_ok=True)\n atomic_modules = self.unroll_inner_modules()\n self.clear_custom_buffers()\n torch.save(torch.nn.ModuleList(atomic_modules), os.path.join(path, 'modules.pt'))\n with open(os.path.join(path, 'config.dict.json'), 'w') as f:\n json.dump(self.config_dict(), f)\n\n def clear_custom_buffers(self):\n for module in self.unroll_inner_modules():\n if hasattr(module, 'clear_custom_buffers'):\n module._buffers.clear()\n\n @classmethod\n def load(\n cls,\n path: str,\n map_location='cpu',\n custom_module_classes: Tuple[Type[torch.nn.Module]] = (),\n ) -> \"DagModule\":\n \"\"\"\n\n :param path: directory from which model should be loaded\n :param map_location: defaults to `cpu`\n :param custom_module_classes: custom torch module classes needed for loading a `DagModule` that was built\n using these modules\n \"\"\"\n with open(os.path.join(path, 'config.dict.json'), 'r') as f:\n config_dict = json.load(f)\n m = torch.load(os.path.join(path, 'modules.pt'), map_location=map_location)\n return cls.load_from_config_dict_and_atomic_modules(\n config_dict=config_dict,\n atomic_modules=m\n )\n\n @classmethod\n def load_from_config_dict_and_atomic_modules(\n cls,\n config_dict: Dict,\n atomic_modules: List[torch.nn.Module]\n ) -> \"DagModule\":\n output_index = config_dict.pop('output_index')\n name = config_dict.pop('name')\n if 'class' in config_dict:\n class_name = config_dict.pop('class')\n else:\n class_name = cls.__name__\n dag = None\n if class_name == cls.__name__:\n dag = cls(name)\n for subclass in cls.__subclasses__():\n if subclass.__name__ == class_name:\n dag = subclass(name)\n\n if dag is None:\n raise NotImplementedError(f'There is no subclass with name: {class_name}.')\n\n for k, (key, config) in enumerate(config_dict.items()):\n if config['type'] == 'input':\n dag.add_input_vertex(name=config['name'])\n else:\n predecessors = [dag.vertices[index] for index in config['predecessor_indices']]\n if config['is_atomic']:\n module = atomic_modules[config['module_index']]\n else:\n module = cls.load_from_config_dict_and_atomic_modules(\n config_dict=config['module_dict'],\n atomic_modules=atomic_modules,\n )\n vertex = dag.add_vertex(\n name=config['name'],\n module=module,\n predecessors=predecessors,\n )\n orbit = config.get('orbit')\n if orbit:\n vertex.orbit = orbit\n if k == output_index:\n dag.output_vertex = vertex\n\n return dag\n\n def config_dict(self, atomic_modules: List[torch.nn.Module] = None) -> Dict:\n if atomic_modules is None:\n atomic_modules = self.unroll_inner_modules()\n config_dict = {}\n for k, vertex in enumerate(self.vertices):\n _config = vertex.config_dict(atomic_modules)\n config_dict[k] = _config\n\n config_dict['name'] = self.name\n config_dict['class'] = self.__class__.__name__\n config_dict['output_index'] = self.vertices.index(self.output_vertex)\n return config_dict\n\n def _get_inner_vertex_predecessor_indices(self, inner_vertex: InnerVertex) -> List[int]:\n result = [\n self.vertices.index(predecessor)\n for predecessor in inner_vertex.predecessors\n ]\n return result\n\n @property\n def flat(self) -> bool:\n for v in self.inner_vertices:\n if isinstance(v.module, DagModule):\n return False\n return True\n\n def flatten(self, input_shape_for_verification: Optional[Tuple[int, ...]] = None) -> \"DagModule\":\n \"\"\"\n This method will switch the `dag` to `eval` mode if `input_shape_for_verification` is provided.\n :param input_shape_for_verification:\n :return:\n \"\"\"\n dag_copy = deepcopy(self)\n if self.flat:\n return dag_copy\n\n if input_shape_for_verification:\n dag_copy.eval()\n x = torch.normal(mean=torch.zeros(size=input_shape_for_verification))\n reference_output = dag_copy(x)\n\n # builds a new cell (not in place flatten)\n dag = self.__class__(name=dag_copy.name, vertices=dag_copy.input_vertices)\n for v in dag_copy.inner_vertices:\n if not isinstance(v.module, DagModule):\n dag.vertices.append(v)\n v.dag_module = dag\n if v == dag_copy.output_vertex:\n dag.output_vertex = v\n else:\n inner_dag_predecessors = v.predecessors\n inner_dag_successors = v.successors\n inner_dag = v.module.flatten()\n for iv in inner_dag.inner_vertices:\n for pd in iv.predecessors: # remap predecessors where needed\n if isinstance(pd, InputVertex):\n pd_index_in_inner_dag = inner_dag.input_vertices.index(pd)\n index = iv.predecessors.index(pd)\n iv.predecessors[index] = inner_dag_predecessors[pd_index_in_inner_dag]\n if inner_dag.output_vertex == iv: # remap output of inner dag\n for suc in inner_dag_successors:\n index = suc.predecessors.index(v)\n suc.predecessors[index] = iv\n iv.dag_module = dag\n dag.vertices.append(iv)\n if v == dag_copy.output_vertex:\n dag.output_vertex = iv\n assert all([e in dag.vertices for e in iv.predecessors])\n\n if input_shape_for_verification:\n dag.eval()\n new_output = dag(x)\n assert torch.abs(reference_output - new_output).sum() == 0.0\n\n # TODO: Remove after validation\n # self._update_inner_modules()\n dag.enforce_names_uniqueness()\n\n return dag\n\n def enforce_names_uniqueness(self):\n names = [v.name for v in self.vertices]\n while len(names) != len(set(names)):\n names_counter = Counter()\n for v in self.vertices:\n name = v.name\n names_counter[name] += 1\n if names_counter[name] > 1:\n new_name = f'{name}_{names_counter[name] - 1}'\n logger.debug(f'Renaming: {name} -> {new_name}')\n v.name = new_name\n names = [v.name for v in self.vertices]\n\n def clear_tensor_dicts(self):\n self.forward_dict = None\n self.inputs_dict = None\n\n @property\n def device(self):\n # https://discuss.pytorch.org/t/how-to-check-if-model-is-on-cuda/180/10\n # useful, but may be dangerous\n self.update_inner_modules()\n device_ = next(iter(self.parameters())).device\n if not all([p.device == device_ for p in self.parameters()]):\n raise AssertionError(f'Not all parameters of {self.name} are on the same device')\n return device_"
},
{
"identifier": "MaskModule",
"path": "torch_dag_algorithms/pruning/modules.py",
"snippet": "class MaskModule(torch.nn.Module, ZeroFlopsMixin, ZeroParamsMixin):\n def __init__(self, orbit: OrbitModule):\n super().__init__()\n self.orbit = orbit\n\n def forward(self, x: torch.Tensor):\n mask = self.orbit.sample()\n # (B, C, H, W) case\n if len(x.shape) == 4:\n non_channel_dim = (0, 2, 3)\n mask = mask.view(1, -1, 1, 1)\n # (B, T, dim) case\n elif len(x.shape) == 3:\n non_channel_dim = (0, 1)\n mask = mask.view(1, 1, -1)\n elif len(x.shape) == 2:\n non_channel_dim = (0,)\n mask = mask.view(1, -1)\n else:\n raise NotImplementedError\n\n try:\n x_masked = mask * x\n except:\n # TODO: find more elegent solution to this channel dim switching issue\n mask = mask.view(1, 1, 1, -1)\n x_masked = mask * x\n if self.training:\n bkd_loss = per_channel_noise_to_signal_ratio(x_masked, x, non_channel_dim=non_channel_dim)\n self.orbit.bkd_masking_losses[self] = bkd_loss\n return x_masked"
},
{
"identifier": "OrbitModule",
"path": "torch_dag_algorithms/pruning/modules.py",
"snippet": "class OrbitModule(torch.nn.Module):\n\n def __init__(\n self,\n name: str,\n num_channels: int,\n distillation_mode: str = constants.PRUNING_DEFAULT_MODE_NAME,\n block_size: Optional[int] = None,\n indices_of_source_vertices=None,\n ):\n super().__init__()\n self.name = name\n self.num_channels = num_channels\n self.distillation_mode = distillation_mode\n self.block_size = block_size\n self._testing_logits = None\n self.conv1 = torch.nn.Conv2d(\n in_channels=num_channels, out_channels=num_channels, kernel_size=3, groups=num_channels)\n self.conv2 = torch.nn.Conv2d(\n in_channels=num_channels,\n out_channels=num_channels,\n kernel_size=1,\n )\n self._optionally_set_block_size_for_whole_block_pruning(distillation_mode=distillation_mode)\n self._validate_distilation_mode_and_block_size(distillation_mode=distillation_mode, block_size=block_size)\n self.bkd_masking_losses = {}\n self.indices_of_source_vertices = indices_of_source_vertices\n self.debug_logits = None\n\n def _validate_distilation_mode_and_block_size(self, distillation_mode: str, block_size: int):\n if distillation_mode not in PRUNING_MODES:\n raise NotImplementedError(f'Distillation mode: {distillation_mode} not supported')\n if distillation_mode == constants.PRUNING_BLOCK_SNPE_MODE_NAME and block_size is None:\n raise AssertionError(f'In {constants.PRUNING_BLOCK_SNPE_MODE_NAME} pruning mode block size must not '\n f'be `None`.')\n\n def _optionally_set_block_size_for_whole_block_pruning(self, distillation_mode: str):\n if distillation_mode == constants.PRUNING_WHOLE_BLOCK_MODE_NAME:\n self.block_size = self.num_channels\n\n @staticmethod\n def clip_logits(\n logits: torch.Tensor,\n clip_val=constants.MAX_LOGITS_ABS_VALUE,\n ) -> torch.Tensor:\n return torch.clip(logits, min=-clip_val, max=clip_val)\n\n @property\n def logits(self) -> torch.Tensor:\n # TODO This is a hack for testing, remove/refactor it\n if self.debug_logits is not None:\n return self.debug_logits\n kernel_size = self.conv1.kernel_size\n device = self.conv1.weight.device\n x = torch.ones(size=(1, self.num_channels, *kernel_size), device=device)\n x = self.conv1(x)\n x = self.conv2(x)\n x = (constants.INITIAL_LOGITS_VALUE_FOR_PRUNING + constants.SIMPLE_ORBIT_LOGITS_MULTIPLIER * x)\n return self.clip_logits(torch.mean(x, dim=(0, 2, 3), keepdim=False))\n\n def compute_average_number_of_output_channels(self):\n if self.distillation_mode == constants.PRUNING_DEFAULT_MODE_NAME:\n return torch.sigmoid(self.logits).sum()\n\n elif self.distillation_mode in (\n constants.PRUNING_BLOCK_SNPE_MODE_NAME, constants.PRUNING_WHOLE_BLOCK_MODE_NAME):\n split_list = get_split_list_of_logits(logits=self.logits, block_size=self.block_size)\n max_per_block_logits = get_sorted_per_block_max_logits(\n logits=self.logits,\n block_size=self.block_size,\n )\n num_channels = torch.stack(\n [float(block_size) * torch.sigmoid(max_logit) for \\\n block_size, max_logit in zip(split_list, max_per_block_logits)], dim=0).sum()\n return num_channels\n else:\n msg = f'Mode {self.distillation_mode} not implemented for average channels computation.'\n raise NotImplementedError(msg)\n\n def compute_output_channel_masks(\n self,\n predecessors_channel_masks: List[List[torch.Tensor]] = None,\n ) -> List[torch.Tensor]:\n predecessors_channel_masks = [mask_list for mask_list in predecessors_channel_masks if mask_list is not None]\n logits = self.logits\n num_logits = int(logits.shape[0])\n if self.distillation_mode == constants.PRUNING_DEFAULT_MODE_NAME:\n scores_ = torch.where(\n logits > 0.0,\n 1,\n 0,\n )\n elif self.distillation_mode == constants.PRUNING_WHOLE_BLOCK_MODE_NAME:\n max_logits_per_block = get_sorted_per_block_max_logits(\n logits=logits,\n block_size=self.block_size,\n )\n max_logits_per_block_tensor = torch.stack(max_logits_per_block)\n indices_of_blocks_to_leave = np.where(max_logits_per_block_tensor > 0.)[0]\n if len(indices_of_blocks_to_leave) == 1:\n scores_ = np.ones(shape=(self.block_size,), dtype=np.int32)\n else:\n scores_ = np.zeros(shape=(self.block_size,), dtype=np.int32)\n\n elif self.distillation_mode == constants.PRUNING_BLOCK_SNPE_MODE_NAME:\n max_logits_per_block = get_sorted_per_block_max_logits(\n logits=logits,\n block_size=self.block_size,\n )\n max_logits_per_block_tensor = torch.stack(max_logits_per_block)\n indices_of_blocks_to_leave = np.where(max_logits_per_block_tensor > 0.)[0]\n if len(indices_of_blocks_to_leave) == 0:\n # removing whole orbit\n scores_ = np.zeros(shape=(self.num_channels,), dtype=np.int32)\n\n else:\n # compute block indices that are left\n sorted_logits = torch.sort(logits, descending=True)[0]\n split_list = get_split_list_of_logits(logits=logits, block_size=self.block_size)\n split_sorted_logits = list(torch.split(sorted_logits, split_list))\n residual = num_logits % self.block_size\n if residual != 0:\n logits_fake_tail = split_sorted_logits[-1].mean() * torch.ones(\n size=(self.block_size - residual,))\n split_sorted_logits[-1] = torch.cat([split_sorted_logits[-1], logits_fake_tail], dim=0)\n split_sorted_logits = [e.detach().numpy() for e in split_sorted_logits]\n if len(split_sorted_logits) == 1:\n res = split_sorted_logits\n else:\n res = np.take(\n split_sorted_logits,\n axis=0,\n indices=indices_of_blocks_to_leave,\n )\n threshold_value = torch.tensor(res).min()\n scores_ = np.where(\n logits >= threshold_value,\n 1,\n 0,\n )\n else:\n raise NotImplementedError\n\n if len(predecessors_channel_masks) == 0:\n return [torch.tensor(scores_)]\n else:\n return [torch.tensor(np.where(\n predecessors_channel_masks[0][0].sum() == 0,\n np.array([0] * self.num_channels, dtype=np.int32),\n scores_,\n ))]\n\n def sample(self):\n return sample_from_logits(logits=self.logits)"
},
{
"identifier": "commons",
"path": "torch_dag_algorithms/pruning/commons.py",
"snippet": "PASS_THROUGH_CHANNELS_CLASSES = (\n smodules.ChannelAffineModule,\n smodules.NormalizeModule,\n smodules.LayerNormWithOptionalBias,\n smodules.TfBatchNorm1d,\n nn.BatchNorm2d,\n nn.MaxPool2d,\n nn.AvgPool2d,\n nn.AdaptiveAvgPool2d,\n nn.Dropout,\n nn.Upsample,\n nn.LayerNorm,\n nn.BatchNorm1d,\n MaskModule,\n smodules.PowerModule,\n smodules.AddcmulModule,\n smodules.HalfPixelCentersFalseBilinearUpsample,\n smodules.BilinearUpsampling,\n smodules.PadModule,\n smodules.NormalizeModule,\n smodules.InterpolateModule,\n smodules.ScalarMul,\n smodules.MeanModule,\n\n)\nELEMENWISE_CHANNEL_CLASSES = (\n smodules.AddModule,\n smodules.MulModule,\n smodules.SubModule,\n)\nTRUNCATE_ON = smodules.ConcatModule\ndef is_depthwise_conv(module: nn.Module) -> bool:\ndef is_conv_source(module: nn.Module):\ndef is_linear_source(module: nn.Module):\ndef is_source(module: nn.Module):\ndef get_source_out_channels(module: nn.Module) -> int:\ndef get_source_in_channels(module: nn.Module) -> int:\ndef is_sink(module: nn.Module):\ndef get_orbits_dict(dag) -> Dict:\nclass Skipped:"
},
{
"identifier": "get_source_out_channels",
"path": "torch_dag_algorithms/pruning/commons.py",
"snippet": "def get_source_out_channels(module: nn.Module) -> int:\n assert is_source(module)\n if isinstance(module, (nn.Conv2d, nn.ConvTranspose2d)):\n return module.out_channels\n elif isinstance(module, nn.Linear):\n return module.out_features\n else:\n raise NotImplementedError"
},
{
"identifier": "MaskingInsertionStrategy",
"path": "torch_dag_algorithms/pruning/masking_insertion_strategy.py",
"snippet": "class MaskingInsertionStrategy(ABC):\n \"\"\"\n Interface to find a spot within orbit where masking should be inserted. \n \"\"\"\n\n @abstractmethod\n def find_reference_nodes(self, orbit: Orbit) -> List[Tuple[InnerVertex, InnerVertex]]:\n \"\"\"\n Function that return list of tuples (start_icn, end_icn) where masking should be inserted. End_icn can be either a proper sink like Conv2D/Dense or \"dummy\" sink like Concat. Masking has to be inserted in each unique end within orbit. \n \"\"\"\n raise NotImplementedError"
},
{
"identifier": "Orbit",
"path": "torch_dag_algorithms/pruning/orbit.py",
"snippet": "class Orbit:\n def __init__(self, color: int):\n \"\"\"Basic orbit object that can represent either extended or final orbit. If orbit has `allow_for_further_processing` set to True then it can be processed by Orbitalizer by it's general mechanism. If set to False orbit won't be processed in any way and will be passed to orbitalization algorithm in unchanged state.\n\n `_found_by` - indicates what stage lead to orbit being found. It's used in testing handling custom known patterns that are handled by hand. It also holds information that can be usefull durning debugging. \n\n Args:\n color (int): orbit color. has to be unique\n allow_for_further_processing (bool, optional): If False orbit won't be process in any way. Defaults to True.\n \"\"\"\n self.color = color\n\n self.vertices_in_scope: Set[InnerVertex] = set()\n self.sources: List[InnerVertex] = []\n self.sinks: List[InnerVertex] = []\n self.end_path: List[Tuple[InnerVertex, InnerVertex]] = []\n\n self.kmapps = None\n self._discovery_stage = None\n\n @property\n def discovery_stage(self) -> OrbitsDiscoveryStage:\n return self._discovery_stage\n\n @discovery_stage.setter\n def discovery_stage(self, val: OrbitsDiscoveryStage):\n if not self._discovery_stage:\n self._discovery_stage = val\n else:\n raise AttributeError('_found_by property is already set and cannot be changed.')\n\n @property\n def non_border(self) -> Set[InnerVertex]:\n return self.vertices_in_scope - (set(self.sources) | set(self.sinks))\n\n def __repr__(self):\n return f'\\033[1m\\033[95mOrbit\\033[0m[\\033[1m\\033[93mcolor\\033[0m={self.color}, \\033[1m\\033[93mdiscovery_stage\\033[0m={self.discovery_stage}, \\033[1m\\033[93msources\\033[0m={self.sources}, \\033[1m\\033[93msinks\\033[0m={self.sinks}, \\033[1m\\033[93mnon_border\\033[0m={self.non_border}, \\033[1m\\033[93mend_path\\033[0m={self.end_path}]'\n\n def __iter__(self):\n yield from self.vertices_in_scope\n\n def __len__(self):\n return len(self.vertices_in_scope)\n\n def __eq__(self, other: 'Orbit'):\n sources_equal = (set(self.sources) == set(other.sources))\n sinks_equal = (set(self.sinks) == set(other.sinks))\n non_border_equal = self.non_border == other.non_border\n\n return sources_equal and sinks_equal and non_border_equal\n\n def add_to_scope(self, vertex: InnerVertex):\n self.vertices_in_scope.add(vertex)\n\n def mark_as_source(self, vertex: InnerVertex):\n if vertex not in self.sources:\n self.sources += [vertex]\n\n def mark_as_sink(self, vertex: InnerVertex):\n if vertex not in self.sinks:\n self.sinks += [vertex]\n\n def mark_end_path_node_and_sink(self, end_vertex: InnerVertex, sink_vertex: InnerVertex):\n self.end_path += [(end_vertex, sink_vertex)]\n\n def is_valid(self, orbit_filters: List['OrbitFilter']) -> bool:\n for orbit_filter in orbit_filters:\n include = orbit_filter.filter(self)\n if not include:\n return False\n\n return True"
},
{
"identifier": "dag_module_utils",
"path": "torch_dag/core/dag_module_utils.py",
"snippet": "def remove_vertex(dag: DagModule, vertex: InnerVertex):\ndef check_if_flat_dag_has_redundant_vertices(dag: DagModule) -> bool:\ndef recursively_remove_redundant_vertices(dag: DagModule):\ndef remove_redundant_vertices_from_flat_dag(dag: DagModule):\ndef insert_before(\n dag: DagModule,\n reference_vertex: InnerVertex,\n name: str,\n new_module: torch.nn.Module,\n):\ndef insert_after(\n dag: DagModule,\n reference_vertex: InnerVertex,\n name: str,\n new_module: torch.nn.Module,\n):\ndef insert_between(\n dag: DagModule,\n name: str,\n after_vertex: InnerVertex,\n new_module: torch.nn.Module,\n before_vertex: InnerVertex,\n):\ndef wrap_sequence_in_dag_module(name: str, sequence: List[InnerVertex]) -> DagModule:\ndef wrap_subgraph_of_dag_module(\n dag: DagModule,\n end_vertex: InnerVertex,\n begin_vertex: InnerVertex,\n subgraph_name: str = None,\n input_shape_for_validation: Tuple[int, ...] = None,\n allow_subgraphs_inside: bool = False,\n):\ndef compare_module_outputs(\n first_module: torch.nn.Module,\n second_module: torch.nn.Module,\n input_shape: Tuple[int, ...],\n atol=1e-6,\n):\ndef in_place_remove_traverser(v: InnerVertex):\ndef remove_identity_traverser(v: InnerVertex):"
}
] | import logging
from typing import Optional, Union
from torch_dag.core.dag_module import DagModule
from torch_dag_algorithms.pruning.modules import MaskModule, OrbitModule
from torch_dag_algorithms.pruning import commons
from torch_dag_algorithms.pruning.commons import get_source_out_channels
from torch_dag_algorithms.pruning.masking_insertion_strategy import MaskingInsertionStrategy
from torch_dag_algorithms.pruning.orbit import Orbit
from torch_dag.core import dag_module_utils | 8,136 |
logger = logging.getLogger(__name__)
class MaskInserter:
def __init__(
self,
masking_strategy: MaskingInsertionStrategy,
block_size: Optional[int] = None
):
self.masking_strategy = masking_strategy
self.block_size = block_size
def insert_masking(
self,
|
logger = logging.getLogger(__name__)
class MaskInserter:
def __init__(
self,
masking_strategy: MaskingInsertionStrategy,
block_size: Optional[int] = None
):
self.masking_strategy = masking_strategy
self.block_size = block_size
def insert_masking(
self, | dag: DagModule, | 0 | 2023-11-17 15:36:44+00:00 | 12k |
newcastleuniversity/DISPEL | dispel/providers/generic/preprocessing.py | [
{
"identifier": "Level",
"path": "dispel/data/levels.py",
"snippet": "class Level(Epoch):\n \"\"\"An entity to separate sub-task inside each test (Levels).\n\n FIXME: DOC\n\n Attributes\n ----------\n context\n Contextual information about the level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n\n Parameters\n ----------\n id_\n The identifier of a given Level.\n start\n The timestamp of the beginning of the level\n end\n The timestamp of the end of the level\n context\n Contextual information about the level\n raw_data_sets\n An iterable of :class:'~dispel.data.raw.RawDataSet' of a given Level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n epochs\n An iterable of :class:`~dispel.data.measures.EpochMeasureSet` to be added to the\n level.\n \"\"\"\n\n def __init__(\n self,\n id_: Union[str, List[str], LevelId],\n start: Any,\n end: Any,\n context: Optional[Context] = None,\n raw_data_sets: Optional[Iterable[RawDataSet]] = None,\n measure_set: Optional[MeasureSet] = None,\n epochs: Optional[Iterable[LevelEpoch]] = None,\n ):\n if not isinstance(id_, LevelId):\n id_ = LevelId(id_)\n\n definition = EpochDefinition(id_=id_)\n super().__init__(start=start, end=end, definition=definition)\n\n self.context = context or Context()\n self.measure_set = measure_set or MeasureSet()\n\n # create dictionary of raw data sets\n self._raw_data_sets: Dict[str, RawDataSet] = {}\n\n # set raw data sets if arg is provided\n if raw_data_sets:\n for raw_data_set in raw_data_sets:\n self.set(raw_data_set)\n\n # create data frame for each epoch\n self._epochs = pd.DataFrame(columns=[\"definition_id\", \"start\", \"end\", \"epoch\"])\n if epochs:\n for epoch in epochs:\n self.set(epoch)\n\n @property\n def id(self) -> LevelId:\n \"\"\"Get the ID of the level from its definition.\n\n Returns\n -------\n LevelId\n The ID of the definition provided via `definition`.\n \"\"\"\n assert self.definition is not None, \"Require definition to access id\"\n return cast(LevelId, self.definition.id)\n\n @id.setter\n def id(self, value: Union[str, DefinitionId]):\n \"\"\"Set the ID of the level's definition.\n\n Parameters\n ----------\n value\n The ID to be set.\n \"\"\"\n assert self.definition is not None, \"Require definition to set id\"\n self.definition.id = value # type: ignore\n\n def __hash__(self):\n return hash(self.id)\n\n def __repr__(self):\n return f\"<Level: {self.id} ({self.flag_count_repr})>\"\n\n @property\n def raw_data_sets(self) -> List[RawDataSet]:\n \"\"\"Get all raw data sets.\"\"\"\n return list(self._raw_data_sets.values())\n\n def has_raw_data_set(self, id_: str) -> bool:\n \"\"\"Return ``True`` if the level contains the desired raw data set.\"\"\"\n return id_ in self._raw_data_sets\n\n def get_raw_data_set(self, id_: str) -> RawDataSet:\n \"\"\"Get the raw data set for a given data set id.\n\n Parameters\n ----------\n id_\n The id of the raw data set to be returned\n\n Returns\n -------\n RawDataSet\n The raw data set with the matching id\n\n Raises\n ------\n ValueError\n If the given id does not correspond to any existing raw data set within the\n level.\n \"\"\"\n if id_ not in self._raw_data_sets:\n raise ValueError(\n f'Unknown data set with id: \"{id_}\" for level_id == \"{self.id}\" '\n f\"please provide an id within {list(self._raw_data_sets.keys())}\"\n )\n\n return self._raw_data_sets[id_]\n\n @property\n def epochs(self) -> List[LevelEpoch]:\n \"\"\"Get all epoch measure sets.\"\"\"\n return self._epochs[\"epoch\"].tolist()\n\n @singledispatchmethod\n def set(self, value, **kwargs):\n \"\"\"Set a value inside a level.\"\"\"\n raise TypeError(f\"Unsupported set type: {type(value)}\")\n\n @set.register(MeasureSet)\n def _set_measure_set(self, value: MeasureSet):\n self.measure_set += value\n\n @set.register(MeasureValue)\n def _set_measure_value(self, value: MeasureValue):\n self.measure_set.set(value)\n\n @set.register(RawDataSet)\n def _set_raw_data_set(\n self, value: RawDataSet, concatenate: bool = False, overwrite: bool = False\n ):\n if overwrite and concatenate:\n raise ValueError(\n \"You cannot both concatenate and overwrite an existing raw data set. \"\n \"Only one of these arguments must be set to ``True``.\"\n )\n\n if (id_ := value.id) in self._raw_data_sets: # pylint: disable=all\n if concatenate:\n value = value.concat(self.get_raw_data_set(id_))\n elif not overwrite:\n raise RawDataSetAlreadyExists(\n id_, self.id, \"Use overwrite=True to overwrite\"\n )\n\n self._raw_data_sets[id_] = value\n\n @set.register(LevelEpoch)\n def _set_epoch(self, value: LevelEpoch):\n new_index = len(self._epochs)\n self._epochs.loc[new_index] = pd.Series(\n dict(\n definition_id=value.id if value.definition else None,\n start=value.start,\n end=value.end,\n epoch=value,\n )\n )\n\n @set.register(Flag)\n def _set_flag(self, value: Flag):\n self.add_flag(value)"
},
{
"identifier": "DEFAULT_COLUMNS",
"path": "dispel/data/raw.py",
"snippet": "DEFAULT_COLUMNS = list(\"xyz\")"
},
{
"identifier": "GRAVITY_COLUMNS",
"path": "dispel/data/raw.py",
"snippet": "GRAVITY_COLUMNS = [f\"gravity{x}\" for x in \"XYZ\"]"
},
{
"identifier": "ProcessingStep",
"path": "dispel/processing/core.py",
"snippet": "class ProcessingStep:\n r\"\"\"A processing step in a processing sequence.\n\n :class:`ProcessingStep` is the basic entity through which\n :class:`~dispel.data.core.Reading`\\ s are processed. The processing step's\n :meth:`process_reading` function is called with the reading and additional arguments\n passed to :func:`process`. Results from the process step are expected to be an\n instance of :class:`ProcessingResult`. For a comprehensive description see\n :ref:`measure-extraction`.\n\n The method :meth:`flag_reading` can be overwritten to ensure that the reading\n about to be processed is valid, and return\n :class:`~dispel.data.flags.Flag`\\ s if that is not the case.\n\n Examples\n --------\n .. testsetup:: processing-step\n\n >>> import pandas as pd\n >>> import numpy as np\n\n >>> from dispel.data.core import Reading\n >>> from dispel.data.levels import Level\n >>> from dispel.data.raw import (RawDataSet, RawDataSetDefinition,\n ... RawDataValueDefinition)\n\n >>> reading = Reading(\n ... evaluation=None,\n ... levels=[\n ... Level(id_='my-level', start=0, end=1, raw_data_sets=[\n ... RawDataSet(\n ... RawDataSetDefinition('my-data-set', None, [\n ... RawDataValueDefinition('dummy', 'dummy')\n ... ]),\n ... pd.DataFrame({'dummy': list(range(6))})\n ... )\n ... ])\n ... ])\n\n .. doctest:: processing-step\n\n >>> from dispel.data.measures import MeasureValue\n >>> from dispel.data.values import ValueDefinition\n >>> from dispel.processing import process\n >>> from dispel.processing.core import ProcessingResult, ProcessingStep\n >>> class MyStep(ProcessingStep):\n ... def process_reading(self, reading, **kwargs):\n ... level = reading.get_level('my-level')\n ... raw_data_set = level.get_raw_data_set('my-data-set')\n ... data = raw_data_set.data\n ... yield ProcessingResult(\n ... step=self,\n ... sources=raw_data_set,\n ... result=MeasureValue(\n ... ValueDefinition('my-measure-id','max value'),\n ... data.max().max()\n ... )\n ... )\n >>> _ = process(reading, MyStep())\n >>> reading.measure_set.get_raw_value('my-measure-id')\n 5\n \"\"\"\n\n def __init__(self):\n self.predecessor = None\n self.successor = None\n\n def process(self, reading: Reading, **kwargs) -> ProcessResultType:\n \"\"\"Check reading for validity and process it.\n\n Parameters\n ----------\n reading\n The reading to be processed\n kwargs\n Additional arguments passed by :func:`process`.\n\n Yields\n ------\n ProcessResultType\n The results from processing readings.\n \"\"\"\n for flag in self.flag_reading(reading, **kwargs):\n yield ProcessingControlResult.from_flag(\n flag=flag,\n step=self,\n targets=self.get_reading_flag_targets(reading, **kwargs),\n )\n try:\n self.assert_valid_reading(reading, **kwargs)\n except AssertionError as error:\n yield ProcessingControlResult.from_assertion_error(step=self, error=error)\n else:\n yield from self.process_reading(reading, **kwargs)\n\n def assert_valid_reading(self, reading: Reading, **kwargs):\n \"\"\"Assert that reading is valid.\"\"\"\n\n def flag_reading(self, reading: Reading, **kwargs) -> Generator[Flag, None, None]:\n \"\"\"Flag the provided reading.\n\n Parameters\n ----------\n reading\n The reading to be flagged.\n kwargs\n Additional arguments passed by :func:`~dispel.processing.process`.\n\n Yields\n ------\n Flag\n The resulted flags.\n \"\"\"\n # pylint: disable=unused-argument\n yield from []\n\n def get_reading_flag_targets(\n self, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n \"\"\"Get the reading flag targets.\n\n Parameters\n ----------\n reading\n The reading that is concerned with flagging.\n kwargs\n Additional keyword arguments eventually used for flag targets\n extraction.\n\n Returns\n -------\n Iterable[EntityType]\n An iterable of entities that are flagged.\n \"\"\"\n # pylint: disable=unused-argument\n return [reading]\n\n @abstractmethod\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n \"\"\"Process the provided reading.\n\n Parameters\n ----------\n reading\n The reading to be processed\n kwargs\n Additional arguments passed by :func:`~dispel.processing.process`.\n\n Yields\n ------\n ProcessResultType\n The results from processing readings.\n \"\"\"\n yield NotImplemented\n\n def set_previous(self, step: \"ProcessingStep\"):\n \"\"\"Set the previous step in a processing chain of this step.\"\"\"\n if self.predecessor is not None:\n warnings.warn(\n \"Changing predecessors can lead to side-effects. Previous predecessor \"\n f\"was {self.predecessor}\",\n UserWarning,\n )\n self.predecessor = step\n\n def set_next(self, step: \"ProcessingStep\"):\n \"\"\"Set the next step in a processing chain of this step.\"\"\"\n if self.successor is not None:\n warnings.warn(\n \"Changing successors can lead to side-effects. Previous successor was \"\n f\"{self.successor}\",\n UserWarning,\n )\n self.successor = step\n\n def chain(self, successor: \"ProcessingStep\") -> \"ProcessingStep\":\n \"\"\"Chain this step with the successor step.\"\"\"\n assert isinstance(successor, ProcessingStep), \"Can only chain processing steps\"\n\n self.set_next(successor)\n successor.set_previous(self)\n return _ChainedProcesses([self, successor])\n\n def __and__(self, other):\n \"\"\"See :meth:`ProcessingStep.chain`.\"\"\"\n return self.chain(other)\n\n def get_parameters(self) -> List[Tuple[str, Parameter]]:\n \"\"\"Get all parameters defined by the processing step.\n\n Returns\n -------\n List[Tuple[str, Parameter]]\n A list of tuples of parameter name and :class:`Parameter`\n objects defined by the processing step.\n \"\"\"\n return inspect.getmembers(self, lambda x: isinstance(x, Parameter))"
},
{
"identifier": "DefaultLevelFilter",
"path": "dispel/processing/level.py",
"snippet": "class LevelProcessingResultBase:\nclass LevelProcessingResult(ProcessingResult, LevelProcessingResultBase):\nclass LevelProcessingControlResult(ProcessingControlResult, LevelProcessingResultBase):\nclass LevelFilter(ABC):\nclass LevelIdFilter(LevelFilter):\nclass DefaultLevelFilter(LevelFilter):\nclass LevelProcessingStepProtocol(metaclass=ABCMeta):\nclass LevelFilterProcessingStepMixin:\nclass LevelProcessingStep(\n LevelProcessingStepProtocol, LevelFilterProcessingStepMixin, ProcessingStep\n):\nclass FlagLevelStep(FlagStepMixin, LevelProcessingStep):\nclass ProcessingStepGroup(LevelFilterProcessingStepMixin, CoreProcessingStepGroup):\n def __post_init__(self):\n def from_assertion_error(\n cls,\n step: \"ProcessingStep\",\n error: AssertionError,\n level: Optional[Level] = None,\n ):\n def from_flag(\n cls,\n flag: Flag,\n step: \"ProcessingStep\",\n targets: Iterable[EntityType],\n level: Optional[Level] = None,\n ):\ndef _intersection(a, b):\ndef _union(a, b):\n def __call__(self, levels: Iterable[Level]) -> Set[Level]:\n def __repr__(self) -> str:\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def _combined(\n self, other: \"LevelFilter\", func: Callable[[Set, Set], Set]\n ) -> \"LevelFilter\":\n def _match(levels: Iterable[Level]) -> Set[Level]:\n def _repr() -> str:\n def __and__(self, other: \"LevelFilter\") -> \"LevelFilter\":\n def __or__(self, other: \"LevelFilter\") -> \"LevelFilter\":\n def __invert__(self) -> \"LevelFilter\":\n def _inverted_filter(levels: Iterable[Level]) -> Set[Level]:\n def _repr() -> str:\n def __init__(self, level_ids: MultipleLevelIdsType):\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def assert_valid_level(self, level: Level, reading: Reading, **kwargs):\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def get_level_flag_targets(\n self, level: Level, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def __init__(self, *args, **kwargs):\n def get_level_filter(self) -> LevelFilter:\n def set_level_filter(self, level_filter: LevelFilterType):\n def inject_level_filter_from_step(self, step: \"LevelFilterProcessingStepMixin\"):\n def _get_level_filter(inner_self) -> LevelFilter:\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def assert_valid_level(self, level: Level, reading: Reading, **kwargs):\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def __init__(\n self,\n level_filter: Optional[LevelFilterType] = None,\n task_name: Optional[Union[AV, str]] = None,\n flag_name: Optional[Union[AV, str]] = None,\n flag_type: Optional[Union[FlagType, str]] = None,\n flag_severity: Optional[Union[FlagSeverity, str]] = None,\n reason: Optional[Union[AV, str]] = None,\n stop_processing: bool = False,\n flagging_function: Optional[Callable[..., bool]] = None,\n ):\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def get_level_flag_targets(\n self, level: Level, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def get_flag_targets(\n self, reading: Reading, level: Optional[Level] = None, **kwargs\n ) -> Iterable[EntityType]:\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def set_steps(self, steps: List[ProcessingStep]):\n def inject_level_filter_from_step(self, step: LevelFilterProcessingStepMixin):"
},
{
"identifier": "LimbModality",
"path": "dispel/processing/modalities.py",
"snippet": "class LimbModality(AVEnum):\n \"\"\"Type of limb exercises enumerator.\"\"\"\n\n UPPER_LIMB = (\"upper limb\", \"upper\")\n LOWER_LIMB = (\"lower limb\", \"lower\")"
},
{
"identifier": "SensorModality",
"path": "dispel/processing/modalities.py",
"snippet": "class SensorModality(AVEnum):\n # FIXME remove class\n \"\"\"Sensor types enumerator.\"\"\"\n\n def unit(self, order: int = 1) -> str:\n \"\"\"Get the unit of the sensor signal.\n\n Parameters\n ----------\n order\n The unit order.\n\n Returns\n -------\n str\n The unit of the sensor.\n \"\"\"\n basis = {\"acc\": \"G\", \"gyr\": \"rad/s\", \"itrem\": \"pixel\"}[self.abbr]\n if order == 1:\n return basis\n return \"/\".join([x + f\"^{order}\" for x in basis.split(\"/\")])\n\n ACCELEROMETER = (\"accelerometer\", \"acc\")\n GYROSCOPE = (\"gyroscope\", \"gyr\")\n INTENTIONAL = (\"intentional tremors\", \"itrem\")"
},
{
"identifier": "Apply",
"path": "dispel/processing/transform.py",
"snippet": "class Apply(TransformStep):\n r\"\"\"Apply a method onto columns of a raw data set.\n\n Parameters\n ----------\n data_set_id\n The data set id of the data set on which the method is to be applied\n method\n The method in question. This can be any method that accepts a pandas series and\n returns an array of same length. See also :meth:`pandas.DataFrame.apply`.\n method_kwargs\n Optional arguments required for the methods.\n columns\n The columns to be considered during the method application.\n drop_nan\n ```True`` if NaN values are to be droped after transformation.\n level_filter\n An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels\n to be transformed. If no filter is provided, all levels will be transformed. The\n ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\\ s\n and lists of either and passes them to a\n :class:`~dispel.processing.level.LevelIdFilter` for convenience.\n new_data_set_id\n The ``id`` used for the :class:`~dispel.data.raw.RawDataSetDefinition`.\n\n Examples\n --------\n Assuming you want to low-pass filter your gyroscope data of a ``reading`` you can\n create the following step to do so (note that the filtering expects a\n time-index-based and constant frequency-based data frame, so you might have to\n leverage :class:`~dispel.providers.generic.sensor.SetTimestampIndex` and\n :class:`~dispel.providers.generic.sensor.Resample` first):\n\n >>> from dispel.processing.transform import Apply\n >>> from dispel.signal.filter import butterworth_low_pass_filter\n >>> step = Apply(\n ... 'gyroscope_ts_resampled',\n ... butterworth_low_pass_filter,\n ... dict(cutoff=1.5, order=2),\n ... list('xyz'),\n ... )\n\n This step will apply a 2. order butterworth low pass filter to the columns ``x``,\n ``y``, and ``z`` with a cut-off frequency of 1.5Hz.\n \"\"\"\n\n def __init__(\n self,\n data_set_id: str,\n method: Callable[..., Any],\n method_kwargs: Optional[Dict[str, Any]] = None,\n columns: Optional[List[str]] = None,\n new_data_set_id: Optional[str] = None,\n drop_nan: Optional[bool] = False,\n level_filter: Optional[LevelFilterType] = None,\n ):\n method_kwargs = method_kwargs or {}\n columns = columns or DEFAULT_COLUMNS\n\n def _transform_function(data: pd.DataFrame) -> pd.DataFrame:\n res = data[columns].apply(method, **method_kwargs)\n if drop_nan:\n return res.dropna()\n return res\n\n def _definition_factory(column: str) -> RawDataValueDefinition:\n return RawDataValueDefinition(\n column, f\"{method.__name__} applied on {column}\"\n )\n\n super().__init__(\n data_set_id,\n _transform_function,\n new_data_set_id or f\"{data_set_id}_{method.__name__}\",\n [_definition_factory(column) for column in columns],\n level_filter=level_filter,\n )"
},
{
"identifier": "ComputeGravityRotationMatrices",
"path": "dispel/providers/generic/sensor.py",
"snippet": "class ComputeGravityRotationMatrices(TransformStep):\n r\"\"\"Compute a series of rotation matrices to align sensors to gravity.\n\n This transformation step creates a series of rotation matrices based on the\n gravity information contained in the accelerometer sensor. This allows to\n rotate other sensors on a desired orientation related to gravity. This is\n in particular of interest if we want to measure physical interactions with\n devices around the plane perpendicular to gravity.\n\n Parameters\n ----------\n target_gravity\n The target gravity vector, e.g. ``(-1, 0, 0)`` to create rotation\n matrices that rotate the x-axis of a device onto gravity.\n level_filter\n An optional :class:`~dispel.processing.level.LevelFilter` to determine the\n levels to be transformed. If no filter is provided, all levels will be\n transformed. The ``level_filter`` also accepts :class:`str`,\n :class:`~dispel.data.core.LevelId`\\ s and lists of either and passes them\n to a :class:`~dispel.processing.level.LevelFilter` for convenience.\n \"\"\"\n\n def __init__(\n self, data_set_id: str, target_gravity: Tuple[float, float, float], **kwargs\n ):\n def _transform_function(data: pd.DataFrame) -> pd.Series:\n return compute_rotation_matrices_quaternion(\n data[GRAVITY_COLUMNS], target_gravity\n )\n\n super().__init__(\n data_set_id,\n _transform_function,\n \"gravity_rotation_matrices\",\n [RawDataValueDefinition(\"rotation_matrix\", \"Rotation Matrix\")],\n **kwargs,\n )"
},
{
"identifier": "Resample",
"path": "dispel/providers/generic/sensor.py",
"snippet": "class Resample(NotEmptyDataSetAssertionMixin, TransformStep):\n r\"\"\"Resample a time-based raw data set to a specific sampling frequency.\n\n The resampling creates a new raw data set which is accessible via the\n data set comprised of the original one concatenated with ``_resampled``.\n\n Parameters\n ----------\n data_set_id\n The data set to be resampled. This has to be a data set that uses a\n time-based index. You might first have to apply the\n :class:`SetTimestampIndex` processing step before you can apply\n this step.\n aggregations\n A list of resampling methods to be applied in order. Each can be any\n method that is also accepted by :meth:`pandas.DataFrame.agg`.\n columns\n The columns to be considered during the resampling.\n freq\n The frequency to resample to. See also\n :meth:`pandas.DataFrame.resample` for details. If freq is not provided\n the frequency is estimated automatically taking the median frequency.\n max_frequency_distance\n An optional integer specifying the maximum accepted\n distance between the expected frequency and the estimated frequency\n above which we raise an error.\n level_filter\n An optional :class:`dispel.processing.level.LevelFilter` to determine the\n levels to be transformed. If no filter is provided, all levels will be\n transformed. The ``level_filter`` also accepts :class:`str`,\n :class:`~dispel.data.core.LevelId`\\ s and lists of either and passes them\n to a :class:`~dispel.processing.level.LevelFilter` for convenience.\n \"\"\"\n\n def __init__(\n self,\n data_set_id: str,\n aggregations: Iterable[str],\n columns: Iterable[str],\n freq: Optional[Union[float, str]] = None,\n max_frequency_distance: Optional[int] = None,\n level_filter: Optional[LevelFilterType] = None,\n ):\n def _resample(\n data: pd.DataFrame, sampling_frequency: Optional[Union[float, str]] = None\n ) -> pd.DataFrame:\n # Check if a sampling frequency is provided\n # If not, we discretized the sampling frequency\n if sampling_frequency is None:\n discretize_args = [data, VALID_FREQ_LIST]\n if max_frequency_distance:\n discretize_args.append(max_frequency_distance)\n sampling_frequency = discretize_sampling_frequency(*discretize_args)\n # Convert the float sampling frequency to a Timedelta format\n if not isinstance(sampling_frequency, str):\n sampling_frequency = pd.Timedelta(1 / sampling_frequency, unit=\"s\")\n resample_obj = data[columns].resample(sampling_frequency)\n for method in aggregations:\n resample_obj = resample_obj.agg(method)\n return resample_obj\n\n def _definition_factory(column: str) -> RawDataValueDefinition:\n return RawDataValueDefinition(\n column, f\"{column} resampled with {aggregations}\"\n )\n\n super().__init__(\n data_set_id,\n partial(_resample, sampling_frequency=freq),\n f\"{data_set_id}_resampled\",\n [_definition_factory(column) for column in columns],\n level_filter=level_filter,\n )"
},
{
"identifier": "RotateSensorWithGravityRotationMatrices",
"path": "dispel/providers/generic/sensor.py",
"snippet": "class RotateSensorWithGravityRotationMatrices(TransformStep):\n r\"\"\"Apply a series of rotation matrices to a sensor.\n\n This is a complementary step to :class:`ComputeGravityRotationMatrices` and\n applies the rotation matrices to the specified sensor.\n\n Parameters\n ----------\n data_set_id\n The id of the sensor data set to be rotated.\n columns\n The columns of the sensor data set to be considered in the rotation.\n level_filter\n An optional :class:`~dispel.processing.level.LevelFilter` to determine the\n levels to be transformed. If no filter is provided, all levels will be\n transformed. The ``level_filter`` also accepts :class:`str`,\n :class:`~dispel.data.core.LevelId`\\ s and lists of either and passes them\n to a :class:`~dispel.processing.level.LevelFilter` for convenience.\n\n Examples\n --------\n Assuming you want to rotate the gyroscope vector onto gravity you can\n achieve this by chaining the following steps:\n\n .. doctest:: processing\n\n >>> from dispel.data.raw import DEFAULT_COLUMNS\n >>> from dispel.processing import process\n >>> from dispel.providers.generic.sensor import (\n ... ComputeGravityRotationMatrices,\n ... RotateSensorWithGravityRotationMatrices\n ... )\n >>> cols = DEFAULT_COLUMNS\n >>> steps = [\n ... ComputeGravityRotationMatrices('accelerometer', (-1, 0, 0)),\n ... RotateSensorWithGravityRotationMatrices('gyroscope', cols)\n ... ]\n >>> _ = process(reading, steps) # doctest: +SKIP\n\n The results of the roation are available in the raw data set with the id\n ``<data_set_id>_rotated``:\n\n .. doctest:: processing\n :options: +NORMALIZE_WHITESPACE\n\n >>> level = reading.get_level(level_id) # doctest: +SKIP\n >>> level.get_raw_data_set('gyroscope').data.head() # doctest: +SKIP\n x y z ts\n 0 0.035728 -0.021515 0.014879 2020-05-04 17:31:38.574\n 1 -0.012046 0.005010 -0.009029 2020-05-04 17:31:38.625\n 2 0.006779 0.000761 -0.003253 2020-05-04 17:31:38.680\n 3 0.032636 -0.020272 -0.021915 2020-05-04 17:31:38.729\n 4 0.007495 -0.014061 0.012886 2020-05-04 17:31:38.779\n >>> level.get_raw_data_set(\n ... 'gyroscope_rotated'\n ... ).data.head() # doctest: +SKIP\n x y z\n 0 -0.002309 -0.042509 -0.012182\n 1 -0.003754 0.014983 0.003624\n 2 -0.002237 -0.002116 -0.006901\n 3 -0.030461 -0.021654 -0.023656\n 4 0.001203 -0.019580 0.005924\n \"\"\"\n\n def __init__(\n self,\n data_set_id: str,\n columns: Iterable[str],\n level_filter: Optional[LevelFilterType] = None,\n ):\n def _transform_function(\n sensor_df: pd.DataFrame, matrices: pd.DataFrame\n ) -> pd.DataFrame:\n return apply_rotation_matrices(\n matrices[\"rotation_matrix\"], sensor_df[columns]\n )\n\n def _definition_factory(column: str) -> RawDataValueDefinition:\n return RawDataValueDefinition(column, f\"{column} rotated\")\n\n super().__init__(\n [data_set_id, \"gravity_rotation_matrices\"],\n _transform_function,\n f\"{data_set_id}_rotated\",\n [_definition_factory(column) for column in columns],\n level_filter=level_filter,\n )"
},
{
"identifier": "SetTimestampIndex",
"path": "dispel/providers/generic/sensor.py",
"snippet": "class SetTimestampIndex(TransformStep):\n r\"\"\"Create a new time series based on a date time or time delta column.\n\n Parameters\n ----------\n data_set_id\n The data set id of the time series to be transformed.\n columns\n The columns to consider in the new raw data set.\n time_stamp_column\n The time series column name to use as index.\n level_filter\n An optional :class:`dispel.processing.level.LevelFilter` to determine the\n levels to be transformed. If no filter is provided, all levels will be\n transformed. The ``level_filter`` also accepts :class:`str`,\n :class:`~dispel.data.core.LevelId`\\ s and lists of either and passes them\n to a :class:`~dispel.processing.level.LevelFilter` for convenience.\n duplicates\n The strategy used to handle duplicates.\n Has to be one of ``ignore``, ``raise``, ``first``, ``last``.\n \"\"\"\n\n def __init__(\n self,\n data_set_id: str,\n columns: List[str],\n time_stamp_column: str = \"ts\",\n level_filter: Optional[LevelFilterType] = None,\n duplicates: Optional[str] = None,\n ):\n def _transform_function(\n data: pd.DataFrame, rm_duplicate: Optional[str]\n ) -> pd.DataFrame:\n if rm_duplicate is None:\n return data.set_index(time_stamp_column)[columns].copy()\n res = data.set_index(time_stamp_column)[columns].copy()\n return res[~res.index.duplicated(keep=duplicates)]\n\n super().__init__(\n data_set_id,\n lambda x: _transform_function(x, duplicates),\n f\"{data_set_id}_ts\",\n [RawDataValueDefinition(column, column) for column in columns],\n level_filter=level_filter,\n )"
},
{
"identifier": "TransformGyroscope",
"path": "dispel/providers/generic/sensor.py",
"snippet": "class TransformGyroscope(TransformStep):\n r\"\"\"Format gyroscope data to ADS format if not already the case.\n\n On ADS format, the gyroscope is synchronized with the accelerometer. Here\n we make sure gyroscope is synchronized with the acc data set.\n\n Parameters\n ----------\n level_filter\n An optional :class:`dispel.processing.level.LevelFilter` to determine the\n levels to be transformed. If no filter is provided, all levels will be\n transformed. The ``level_filter`` also accepts :class:`str`,\n :class:`~dispel.data.core.LevelId`\\ s and lists of either and passes them\n to a :class:`~dispel.processing.level.LevelFilter` for convenience.\n \"\"\"\n\n data_set_ids = [\"acc\", \"gyroscope\"]\n new_data_set_id = \"gyroscope\"\n\n definitions = [\n RawDataValueDefinition(\n axis, f\"Rotation speed along the {axis} axis.\", data_type=\"float\"\n )\n for axis in \"xyz\"\n ] + [RawDataValueDefinition(\"ts\", \"time index\")]\n\n @staticmethod\n @transformation\n def _synchronize_gyroscope(\n accelerometer: pd.DataFrame, gyroscope: pd.DataFrame, reading: Reading\n ) -> pd.DataFrame:\n if isinstance(reading, BDHReading):\n # Merging on the timestamps vs. on the indexes\n acc_renamed = accelerometer.rename(\n mapper={\n \"x\": \"userAccelerationX\",\n \"y\": \"userAccelerationY\",\n \"z\": \"userAccelerationZ\",\n },\n axis=1,\n )\n return pd.merge_asof(acc_renamed, gyroscope, on=\"ts\", direction=\"nearest\")[\n [\"ts\", \"x\", \"y\", \"z\"]\n ]\n return gyroscope"
},
{
"identifier": "TransformUserAcceleration",
"path": "dispel/providers/generic/sensor.py",
"snippet": "class TransformUserAcceleration(TransformStep):\n r\"\"\"Format accelerometer data to ADS format if not already the case.\n\n Prior to formatting, linear acceleration and gravity are decoupled\n from acceleration.\n\n Parameters\n ----------\n level_filter\n An optional :class:`dispel.processing.level.LevelFilter` to determine the\n levels to be transformed. If no filter is provided, all levels will be\n transformed. The ``level_filter`` also accepts :class:`str`,\n :class:`~dispel.data.core.LevelId`\\ s and lists of either and passes them\n to a :class:`~dispel.processing.level.LevelFilter` for convenience.\n \"\"\"\n\n data_set_ids = \"accelerometer\"\n new_data_set_id = \"acc\"\n\n definitions = (\n [\n RawDataValueDefinition(\n f\"userAcceleration{axis}\",\n f\"Linear Acceleration along the {axis} axis.\",\n data_type=\"float\",\n )\n for axis in \"XYZ\"\n ]\n + [\n RawDataValueDefinition(\n f\"gravity{axis}\",\n f\"gravity component along the {axis} axis.\",\n data_type=\"float\",\n )\n for axis in \"XYZ\"\n ]\n + [RawDataValueDefinition(\"ts\", \"time index\")]\n )\n\n @staticmethod\n def add_gravity(\n accelerometer: pd.DataFrame,\n level: Level,\n gravity: Optional[pd.DataFrame] = None,\n ) -> pd.DataFrame:\n \"\"\"Format gravity data to ADS format.\"\"\"\n if gravity is None:\n cols = [\"x\", \"y\", \"z\"]\n raw_acc = level.get_raw_data_set(\"raw_accelerometer\").data\n accelerometer = raw_acc\n if level.has_raw_data_set(\"attitude\"):\n ori = level.get_raw_data_set(\"attitude\").data\n ori_cols = [\"w\", \"x\", \"y\", \"z\"]\n lin_accelerometer, gravity = remove_gravity_component_ori(\n accelerometer[cols].values, ori[ori_cols].values\n )\n lin_accelerometer = pd.DataFrame(lin_accelerometer, columns=cols)\n gravity = pd.DataFrame(gravity, columns=cols)\n else:\n lin_accelerometer, gravity = remove_gravity_component(\n accelerometer[cols]\n )\n\n res = pd.DataFrame(\n {\n \"userAccelerationX\": lin_accelerometer[\"x\"],\n \"userAccelerationY\": lin_accelerometer[\"y\"],\n \"userAccelerationZ\": lin_accelerometer[\"z\"],\n }\n )\n res[\"gravityX\"] = gravity[\"x\"]\n res[\"gravityY\"] = gravity[\"y\"]\n res[\"gravityZ\"] = gravity[\"z\"]\n res[\"ts\"] = accelerometer[\"ts\"]\n else:\n # Merging on the timestamps vs. on the indexes\n acc_renamed = accelerometer.rename(\n mapper={\n \"x\": \"userAccelerationX\",\n \"y\": \"userAccelerationY\",\n \"z\": \"userAccelerationZ\",\n },\n axis=1,\n )\n gravity_renamed = gravity.rename(\n mapper={\"x\": \"gravityX\", \"y\": \"gravityY\", \"z\": \"gravityZ\"}, axis=1\n )\n merged = acc_renamed.merge(gravity_renamed, how=\"outer\")\n merged = merged.set_index(\"ts\")\n merged_sorted = merged.sort_index()\n merged_sorted_interpolated = merged_sorted.interpolate(\n method=\"nearest\", limit_direction=\"both\"\n )\n res = merged_sorted_interpolated.loc[acc_renamed.ts].reset_index()\n return res.dropna()\n\n @staticmethod\n @transformation\n def _reformat(accelerometer: pd.DataFrame, level: Level) -> pd.DataFrame:\n target_cols = {\n f\"{sensor}{axis}\"\n for sensor in (\"userAcceleration\", \"gravity\")\n for axis in \"XYZ\"\n }\n if not target_cols.issubset(accelerometer.columns):\n try:\n return TransformUserAcceleration.add_gravity(\n accelerometer, level, level.get_raw_data_set(\"gravity\").data\n )\n except ValueError:\n # Happens in BDH pinch\n return TransformUserAcceleration.add_gravity(accelerometer, level)\n return accelerometer"
},
{
"identifier": "butterworth_high_pass_filter",
"path": "dispel/signal/filter.py",
"snippet": "def butterworth_high_pass_filter(\n data: pd.Series,\n cutoff: float,\n order: int = 2,\n freq: Optional[float] = None,\n zero_phase: Optional[bool] = True,\n) -> pd.Series:\n \"\"\"Filter a series with a butterworth high-pass filter.\n\n Parameters\n ----------\n data\n The time series to be filtered\n cutoff\n The upper bound of frequencies to filter\n freq\n The sampling frequency of the time series in Hz. If the passed ``data`` has\n an evenly spaced time series index it will be determined automatically.\n order\n The order of the filter\n zero_phase\n Boolean indicating whether zero phase filter (filtfilt) to be used\n\n Returns\n -------\n pandas.Series\n The filtered ``data``.\n \"\"\"\n return _butterworth_filter(data, \"high\", cutoff, order, freq, zero_phase)"
},
{
"identifier": "savgol_filter",
"path": "dispel/signal/filter.py",
"snippet": "def savgol_filter(data: pd.Series, window: int = 41, order: int = 3) -> pd.Series:\n \"\"\"Apply the Savitzky-Golay filter on a class:`~pandas.Series`.\n\n Parameters\n ----------\n data\n Input data\n window\n the length of the filter window\n order\n The order of the polynomial used to fit the samples\n\n Returns\n -------\n pandas.Series\n Filtered data\n \"\"\"\n # apply filter.\n res = pd.Series(\n signal.savgol_filter(data, window, order), index=data.index, name=data.name\n )\n return res"
},
{
"identifier": "check_amplitude",
"path": "dispel/signal/sensor.py",
"snippet": "def check_amplitude(\n data: pd.DataFrame, min_amplitude: float, max_amplitude: float\n) -> bool:\n \"\"\"Check if the signal amplitudes belong to a reasonable range.\n\n The function will return true only if all the values of each column are between the\n min and max amplitude bounds.\n\n Parameters\n ----------\n data\n A data frame containing one column or more. The data contains in columns must\n all have the same nature as the bounds are applied on the entire data frame.\n min_amplitude\n The expected min amplitude.\n max_amplitude\n The expected max amplitude.\n\n Returns\n -------\n bool\n ``True`` if all the values are in the range. ``False`` otherwise.\n \"\"\"\n amplitude = data.max() - data.min()\n return amplitude.between(left=min_amplitude, right=max_amplitude).all()"
},
{
"identifier": "detrend_signal",
"path": "dispel/signal/sensor.py",
"snippet": "def detrend_signal(signal: pd.Series) -> pd.Series:\n \"\"\"Detrend signal and remove offset component.\n\n The final signal will end up centered on zero and stationary. This function is based\n on :func:`scipy.stats.linregress`.\n\n Parameters\n ----------\n signal: pandas.Series\n The raw signal.\n\n Returns\n -------\n pandas.Series\n The detrended signal.\n \"\"\"\n original_x = signal.index.to_numpy(float)\n signal_without_na = signal.dropna()\n y = signal_without_na.to_numpy(float)\n x = signal_without_na.index.to_numpy(float)\n (\n slope,\n intercept,\n *_,\n ) = stats.linregress(x, y)\n y_estimate = slope * original_x + intercept\n return signal - y_estimate"
}
] | from typing import Iterable, List, Optional, Set, Tuple
from dispel.data.levels import Level
from dispel.data.raw import DEFAULT_COLUMNS, GRAVITY_COLUMNS
from dispel.processing import ProcessingStep
from dispel.processing.level import (
DefaultLevelFilter,
LevelFilter,
LevelFilterType,
LevelIdFilter,
ProcessingStepGroup,
)
from dispel.processing.modalities import LimbModality, SensorModality
from dispel.processing.transform import Apply
from dispel.providers.generic.sensor import (
ComputeGravityRotationMatrices,
Resample,
RotateSensorWithGravityRotationMatrices,
SetTimestampIndex,
TransformGyroscope,
TransformUserAcceleration,
)
from dispel.signal.filter import butterworth_high_pass_filter, savgol_filter
from dispel.signal.sensor import check_amplitude, detrend_signal | 10,636 | """Core functionalities to preprocess signal data."""
class FilterSensorNoise(Apply):
r"""Apply a filter that will remove any sensor noise into a given dataset.
This filter is a Savitzky-Golay one.
Parameters
----------
data_set_id
The data set id on which the transformation is to be performed ('accelerometer',
'gyroscope').
columns
The columns onto which the filtering step has to be applied.
kwargs
Additional arguments that are passed to the
:meth:`~dispel.processing.core.ProcessingStep.process` function of each step. This
allows to provide additional values, such as placeholder values in value
definitions to the actual processing function.
Notes
-----
The Savitzky-Golay is tuned as in [Martinez et. al. 2012]_ to remove sensor noise
and to smooth the signal. The windows size is thus set up to 41 points and the
filter is of order-3.
"""
def __init__(self, data_set_id: str, columns: Optional[List[str]] = None, **kwargs):
| """Core functionalities to preprocess signal data."""
class FilterSensorNoise(Apply):
r"""Apply a filter that will remove any sensor noise into a given dataset.
This filter is a Savitzky-Golay one.
Parameters
----------
data_set_id
The data set id on which the transformation is to be performed ('accelerometer',
'gyroscope').
columns
The columns onto which the filtering step has to be applied.
kwargs
Additional arguments that are passed to the
:meth:`~dispel.processing.core.ProcessingStep.process` function of each step. This
allows to provide additional values, such as placeholder values in value
definitions to the actual processing function.
Notes
-----
The Savitzky-Golay is tuned as in [Martinez et. al. 2012]_ to remove sensor noise
and to smooth the signal. The windows size is thus set up to 41 points and the
filter is of order-3.
"""
def __init__(self, data_set_id: str, columns: Optional[List[str]] = None, **kwargs): | columns = columns or DEFAULT_COLUMNS | 1 | 2023-11-14 10:06:46+00:00 | 12k |
NevermindNilas/TheAnimeScripter | src/segment/train.py | [
{
"identifier": "InSPyReNet",
"path": "src/segment/model/inspyrenet.py",
"snippet": "class InSPyReNet(nn.Module):\n def __init__(\n self,\n backbone,\n in_channels,\n depth=64,\n base_size=(384, 384),\n threshold: Optional[int] = 512,\n **kwargs,\n ):\n super(InSPyReNet, self).__init__()\n self.backbone = backbone\n self.in_channels = in_channels\n self.depth = depth\n self.base_size = base_size\n self.threshold = threshold\n\n self.context1 = PAA_e(\n self.in_channels[0], self.depth, base_size=self.base_size, stage=0\n )\n self.context2 = PAA_e(\n self.in_channels[1], self.depth, base_size=self.base_size, stage=1\n )\n self.context3 = PAA_e(\n self.in_channels[2], self.depth, base_size=self.base_size, stage=2\n )\n self.context4 = PAA_e(\n self.in_channels[3], self.depth, base_size=self.base_size, stage=3\n )\n self.context5 = PAA_e(\n self.in_channels[4], self.depth, base_size=self.base_size, stage=4\n )\n\n self.decoder = PAA_d(\n self.depth * 3, depth=self.depth, base_size=base_size, stage=2\n )\n\n self.attention0 = SICA(\n self.depth,\n depth=self.depth,\n base_size=self.base_size,\n stage=0,\n lmap_in=True,\n )\n self.attention1 = SICA(\n self.depth * 2,\n depth=self.depth,\n base_size=self.base_size,\n stage=1,\n lmap_in=True,\n )\n self.attention2 = SICA(\n self.depth * 2, depth=self.depth, base_size=self.base_size, stage=2\n )\n\n self.sod_loss_fn = lambda x, y: weighted_bce_loss_with_logits(\n x, y, reduction=\"mean\"\n ) + iou_loss_with_logits(x, y, reduction=\"mean\")\n self.pc_loss_fn = nn.L1Loss()\n\n self.ret = lambda x, target: F.interpolate(\n x, size=target.shape[-2:], mode=\"bilinear\", align_corners=False\n )\n self.res = lambda x, size: F.interpolate(\n x, size=size, mode=\"bilinear\", align_corners=False\n )\n self.des = lambda x, size: F.interpolate(x, size=size, mode=\"nearest\")\n\n self.image_pyramid = ImagePyramid(7, 1)\n\n self.transition0 = Transition(17)\n self.transition1 = Transition(9)\n self.transition2 = Transition(5)\n\n self.forward = self.forward_inference\n\n def _apply(self, fn):\n super(InSPyReNet, self)._apply(fn)\n self.image_pyramid._apply(fn)\n self.transition0._apply(fn)\n self.transition1._apply(fn)\n self.transition2._apply(fn)\n return self\n\n def train(self, mode=True):\n super(InSPyReNet, self).train(mode)\n self.forward = self.forward_train if mode else self.forward_inference\n return self\n\n def forward_inspyre(self, x):\n B, _, H, W = x.shape\n\n x1, x2, x3, x4, x5 = self.backbone(x)\n\n x1 = self.context1(x1) # 4\n x2 = self.context2(x2) # 4\n x3 = self.context3(x3) # 8\n x4 = self.context4(x4) # 16\n x5 = self.context5(x5) # 32\n\n f3, d3 = self.decoder([x3, x4, x5]) # 16\n f3 = self.res(f3, (H // 4, W // 4))\n f2, p2 = self.attention2(torch.cat([x2, f3], dim=1), d3.detach())\n\n d2 = self.image_pyramid.reconstruct(d3.detach(), p2) # 4\n\n x1 = self.res(x1, (H // 2, W // 2))\n f2 = self.res(f2, (H // 2, W // 2))\n\n f1, p1 = self.attention1(\n torch.cat([x1, f2], dim=1), d2.detach(), p2.detach()\n ) # 2\n d1 = self.image_pyramid.reconstruct(d2.detach(), p1) # 2\n\n f1 = self.res(f1, (H, W))\n _, p0 = self.attention0(f1, d1.detach(), p1.detach()) # 2\n d0 = self.image_pyramid.reconstruct(d1.detach(), p0) # 2\n\n out = dict()\n out[\"saliency\"] = [d3, d2, d1, d0]\n out[\"laplacian\"] = [p2, p1, p0]\n\n return out\n\n def forward_train(self, x, y):\n B, _, H, W = x.shape\n out = self.forward_inspyre(x)\n\n d3, d2, d1, d0 = out[\"saliency\"]\n p2, p1, p0 = out[\"laplacian\"]\n\n y1 = self.image_pyramid.reduce(y)\n y2 = self.image_pyramid.reduce(y1)\n y3 = self.image_pyramid.reduce(y2)\n\n loss = (\n self.pc_loss_fn(\n self.des(d3, (H, W)),\n self.des(self.image_pyramid.reduce(d2), (H, W)).detach(),\n )\n * 0.0001\n )\n\n loss += (\n self.pc_loss_fn(\n self.des(d2, (H, W)),\n self.des(self.image_pyramid.reduce(d1), (H, W)).detach(),\n )\n * 0.0001\n )\n\n loss += (\n self.pc_loss_fn(\n self.des(d1, (H, W)),\n self.des(self.image_pyramid.reduce(d0), (H, W)).detach(),\n )\n * 0.0001\n )\n\n loss += self.sod_loss_fn(self.des(d3, (H, W)), self.des(y3, (H, W)))\n loss += self.sod_loss_fn(self.des(d2, (H, W)), self.des(y2, (H, W)))\n loss += self.sod_loss_fn(self.des(d1, (H, W)), self.des(y1, (H, W)))\n loss0 = self.sod_loss_fn(self.des(d0, (H, W)), self.des(y, (H, W)))\n loss += loss0\n\n pred = torch.sigmoid(d0)\n\n pred = (pred - pred.min()) / (pred.max() - pred.min() + 1e-8)\n sample = {\n \"pred\": pred,\n \"loss\": loss,\n \"loss0\": loss0,\n \"saliency\": [d3, d2, d1, d0],\n \"laplacian\": [p2, p1, p0],\n }\n return sample\n\n def forward_inference(self, x):\n B, _, H, W = x.shape\n\n if self.threshold is None:\n out = self.forward_inspyre(x)\n d3, d2, d1, d0 = out[\"saliency\"]\n p2, p1, p0 = out[\"laplacian\"]\n\n elif H <= self.threshold or W <= self.threshold:\n out = self.forward_inspyre(self.res(x, self.base_size))\n\n d3, d2, d1, d0 = out[\"saliency\"]\n p2, p1, p0 = out[\"laplacian\"]\n\n else:\n # LR Saliency Pyramid\n lr_out = self.forward_inspyre(self.res(x, self.base_size))\n lr_d3, lr_d2, lr_d1, lr_d0 = lr_out[\"saliency\"]\n lr_p2, lr_p1, lr_p0 = lr_out[\"laplacian\"]\n\n # HR Saliency Pyramid\n if H % 32 != 0 or W % 32 != 0:\n x = self.res(x, ((H // 32) * 32, (W // 32) * 32))\n hr_out = self.forward_inspyre(x)\n hr_d3, hr_d2, hr_d1, hr_d0 = hr_out[\"saliency\"]\n hr_p2, hr_p1, hr_p0 = hr_out[\"laplacian\"]\n\n # Pyramid Blending\n d3 = self.ret(lr_d0, hr_d3)\n\n t2 = self.ret(self.transition2(d3), hr_p2)\n p2 = t2 * hr_p2\n d2 = self.image_pyramid.reconstruct(d3, p2)\n\n t1 = self.ret(self.transition1(d2), hr_p1)\n p1 = t1 * hr_p1\n d1 = self.image_pyramid.reconstruct(d2, p1)\n\n t0 = self.ret(self.transition0(d1), hr_p0)\n p0 = t0 * hr_p0\n d0 = self.image_pyramid.reconstruct(d1, p0)\n\n if d0.shape[2] != H or d0.shape[3] != 2:\n d0 = self.res(d0, (H, W))\n pred = torch.sigmoid(d0)\n pred = (pred - pred.min()) / (pred.max() - pred.min() + 1e-8)\n sample = {\n \"pred\": pred,\n \"loss\": 0,\n \"saliency\": [d3, d2, d1, d0],\n \"laplacian\": [p2, p1, p0],\n }\n return sample\n\n @staticmethod\n def compute_loss(sample):\n return sample[\"loss0\"], sample[\"loss\"]"
},
{
"identifier": "InSPyReNet_Res2Net50",
"path": "src/segment/model/inspyrenet.py",
"snippet": "def InSPyReNet_Res2Net50(\n depth=64,\n pretrained=True,\n base_size: Optional[Union[int, Tuple[int, int]]] = None,\n **kwargs,\n):\n if base_size is None:\n base_size = (384, 384)\n if isinstance(base_size, int):\n base_size = (base_size, base_size)\n return InSPyReNet(\n res2net50_v1b(pretrained=pretrained),\n [64, 256, 512, 1024, 2048],\n depth,\n base_size,\n threshold=None,\n **kwargs,\n )"
},
{
"identifier": "InSPyReNet_SwinB",
"path": "src/segment/model/inspyrenet.py",
"snippet": "def InSPyReNet_SwinB(\n depth=64,\n pretrained=False,\n base_size: Optional[Union[int, Tuple[int, int]]] = None,\n **kwargs,\n):\n if base_size is None:\n base_size = (384, 384)\n if isinstance(base_size, int):\n base_size = (base_size, base_size)\n return InSPyReNet(\n SwinB(pretrained=pretrained),\n [128, 128, 256, 512, 1024],\n depth,\n base_size,\n **kwargs,\n )"
},
{
"identifier": "ISNetDIS",
"path": "src/segment/model/isnet.py",
"snippet": "class ISNetDIS(nn.Module):\n def __init__(self, in_ch=3, out_ch=1):\n super(ISNetDIS, self).__init__()\n\n self.conv_in = nn.Conv2d(in_ch, 64, 3, stride=2, padding=1)\n self.pool_in = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage1 = RSU7(64, 32, 64)\n self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage2 = RSU6(64, 32, 128)\n self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage3 = RSU5(128, 64, 256)\n self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage4 = RSU4(256, 128, 512)\n self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage5 = RSU4F(512, 256, 512)\n self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage6 = RSU4F(512, 256, 512)\n\n # decoder\n self.stage5d = RSU4F(1024, 256, 512)\n self.stage4d = RSU4(1024, 128, 256)\n self.stage3d = RSU5(512, 64, 128)\n self.stage2d = RSU6(256, 32, 64)\n self.stage1d = RSU7(128, 16, 64)\n\n self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)\n self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)\n self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)\n self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)\n self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)\n self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)\n\n # self.outconv = nn.Conv2d(6*out_ch,out_ch,1)\n\n @staticmethod\n def compute_loss_kl(preds, targets, dfs, fs, mode=\"MSE\"):\n return muti_loss_fusion_kl(preds, targets, dfs, fs, mode=mode)\n\n @staticmethod\n def compute_loss(args):\n if len(args) == 3:\n ds, dfs, labels = args\n return muti_loss_fusion(ds, labels)\n else:\n ds, dfs, labels, fs = args\n return muti_loss_fusion_kl(ds, labels, dfs, fs, mode=\"MSE\")\n\n def forward(self, x):\n hx = x\n\n hxin = self.conv_in(hx)\n hx = self.pool_in(hxin)\n\n # stage 1\n hx1 = self.stage1(hxin)\n hx = self.pool12(hx1)\n\n # stage 2\n hx2 = self.stage2(hx)\n hx = self.pool23(hx2)\n\n # stage 3\n hx3 = self.stage3(hx)\n hx = self.pool34(hx3)\n\n # stage 4\n hx4 = self.stage4(hx)\n hx = self.pool45(hx4)\n\n # stage 5\n hx5 = self.stage5(hx)\n hx = self.pool56(hx5)\n\n # stage 6\n hx6 = self.stage6(hx)\n hx6up = _upsample_like(hx6, hx5)\n\n # -------------------- decoder --------------------\n hx5d = self.stage5d(torch.cat((hx6up, hx5), 1))\n hx5dup = _upsample_like(hx5d, hx4)\n\n hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1))\n hx4dup = _upsample_like(hx4d, hx3)\n\n hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1))\n hx3dup = _upsample_like(hx3d, hx2)\n\n hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1))\n hx2dup = _upsample_like(hx2d, hx1)\n\n hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1))\n\n # side output\n d1 = self.side1(hx1d)\n d1 = _upsample_like(d1, x)\n\n d2 = self.side2(hx2d)\n d2 = _upsample_like(d2, x)\n\n d3 = self.side3(hx3d)\n d3 = _upsample_like(d3, x)\n\n d4 = self.side4(hx4d)\n d4 = _upsample_like(d4, x)\n\n d5 = self.side5(hx5d)\n d5 = _upsample_like(d5, x)\n\n d6 = self.side6(hx6)\n d6 = _upsample_like(d6, x)\n\n # d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))\n\n # return [torch.sigmoid(d1), torch.sigmoid(d2), torch.sigmoid(d3), torch.sigmoid(d4), torch.sigmoid(d5), torch.sigmoid(d6)], [hx1d, hx2d, hx3d, hx4d, hx5d, hx6]\n return [d1, d2, d3, d4, d5, d6], [hx1d, hx2d, hx3d, hx4d, hx5d, hx6]"
},
{
"identifier": "ISNetGTEncoder",
"path": "src/segment/model/isnet.py",
"snippet": "class ISNetGTEncoder(nn.Module):\n def __init__(self, in_ch=1, out_ch=1):\n super(ISNetGTEncoder, self).__init__()\n\n self.conv_in = myrebnconv(\n in_ch, 16, 3, stride=2, padding=1\n ) # nn.Conv2d(in_ch,64,3,stride=2,padding=1)\n\n self.stage1 = RSU7(16, 16, 64)\n self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage2 = RSU6(64, 16, 64)\n self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage3 = RSU5(64, 32, 128)\n self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage4 = RSU4(128, 32, 256)\n self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage5 = RSU4F(256, 64, 512)\n self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage6 = RSU4F(512, 64, 512)\n\n self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)\n self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)\n self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)\n self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)\n self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)\n self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)\n\n @staticmethod\n def compute_loss(args):\n preds, targets = args\n return muti_loss_fusion(preds, targets)\n\n def forward(self, x):\n hx = x\n\n hxin = self.conv_in(hx)\n # hx = self.pool_in(hxin)\n\n # stage 1\n hx1 = self.stage1(hxin)\n hx = self.pool12(hx1)\n\n # stage 2\n hx2 = self.stage2(hx)\n hx = self.pool23(hx2)\n\n # stage 3\n hx3 = self.stage3(hx)\n hx = self.pool34(hx3)\n\n # stage 4\n hx4 = self.stage4(hx)\n hx = self.pool45(hx4)\n\n # stage 5\n hx5 = self.stage5(hx)\n hx = self.pool56(hx5)\n\n # stage 6\n hx6 = self.stage6(hx)\n\n # side output\n d1 = self.side1(hx1)\n d1 = _upsample_like(d1, x)\n\n d2 = self.side2(hx2)\n d2 = _upsample_like(d2, x)\n\n d3 = self.side3(hx3)\n d3 = _upsample_like(d3, x)\n\n d4 = self.side4(hx4)\n d4 = _upsample_like(d4, x)\n\n d5 = self.side5(hx5)\n d5 = _upsample_like(d5, x)\n\n d6 = self.side6(hx6)\n d6 = _upsample_like(d6, x)\n\n # d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))\n\n # return [torch.sigmoid(d1), torch.sigmoid(d2), torch.sigmoid(d3), torch.sigmoid(d4), torch.sigmoid(d5), torch.sigmoid(d6)], [hx1, hx2, hx3, hx4, hx5, hx6]\n return [d1, d2, d3, d4, d5, d6], [hx1, hx2, hx3, hx4, hx5, hx6]"
},
{
"identifier": "MODNet",
"path": "src/segment/model/modnet.py",
"snippet": "class MODNet(nn.Module):\n \"\"\"Architecture of MODNet\"\"\"\n\n def __init__(\n self,\n in_channels=3,\n hr_channels=32,\n backbone_arch=\"mobilenetv2\",\n backbone_pretrained=False,\n ):\n super(MODNet, self).__init__()\n\n self.in_channels = in_channels\n self.hr_channels = hr_channels\n self.backbone_arch = backbone_arch\n self.backbone_pretrained = backbone_pretrained\n\n self.backbone = SUPPORTED_BACKBONES[self.backbone_arch](self.in_channels)\n\n self.lr_branch = LRBranch(self.backbone)\n self.hr_branch = HRBranch(self.hr_channels, self.backbone.enc_channels)\n self.f_branch = FusionBranch(self.hr_channels, self.backbone.enc_channels)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n self._init_conv(m)\n elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.InstanceNorm2d):\n self._init_norm(m)\n\n if self.backbone_pretrained:\n self.backbone.load_pretrained_ckpt()\n\n def forward(self, img, inference):\n pred_semantic, lr8x, [enc2x, enc4x] = self.lr_branch(img, inference)\n pred_detail, hr2x = self.hr_branch(img, enc2x, enc4x, lr8x, inference)\n pred_matte = self.f_branch(img, lr8x, hr2x)\n\n return pred_semantic, pred_detail, pred_matte\n\n @staticmethod\n def compute_loss(args):\n pred_semantic, pred_detail, pred_matte, image, trimap, gt_matte = args\n semantic_loss, detail_loss, matte_loss = loss_func(\n pred_semantic, pred_detail, pred_matte, image, trimap, gt_matte\n )\n loss = semantic_loss + detail_loss + matte_loss\n return matte_loss, loss\n\n def freeze_norm(self):\n norm_types = [nn.BatchNorm2d, nn.InstanceNorm2d]\n for m in self.modules():\n for n in norm_types:\n if isinstance(m, n):\n m.eval()\n continue\n\n def _init_conv(self, conv):\n nn.init.kaiming_uniform_(conv.weight, a=0, mode=\"fan_in\", nonlinearity=\"relu\")\n if conv.bias is not None:\n nn.init.constant_(conv.bias, 0)\n\n def _init_norm(self, norm):\n if norm.weight is not None:\n nn.init.constant_(norm.weight, 1)\n nn.init.constant_(norm.bias, 0)\n\n def _apply(self, fn):\n super(MODNet, self)._apply(fn)\n blurer._apply(fn) # let blurer's device same as modnet\n return self"
},
{
"identifier": "U2NET",
"path": "src/segment/model/u2net.py",
"snippet": "class U2NET(nn.Module):\n def __init__(self, cfgs, out_ch):\n super(U2NET, self).__init__()\n self.out_ch = out_ch\n self._make_layers(cfgs)\n\n def forward(self, x):\n sizes = _size_map(x, self.height)\n maps = [] # storage for maps\n\n # side saliency map\n def unet(x, height=1):\n if height < 6:\n x1 = getattr(self, f\"stage{height}\")(x)\n x2 = unet(getattr(self, \"downsample\")(x1), height + 1)\n x = getattr(self, f\"stage{height}d\")(torch.cat((x2, x1), 1))\n side(x, height)\n return _upsample_like(x, sizes[height - 1]) if height > 1 else x\n else:\n x = getattr(self, f\"stage{height}\")(x)\n side(x, height)\n return _upsample_like(x, sizes[height - 1])\n\n def side(x, h):\n # side output saliency map (before sigmoid)\n x = getattr(self, f\"side{h}\")(x)\n x = _upsample_like(x, sizes[1])\n maps.append(x)\n\n def fuse():\n # fuse saliency probability maps\n maps.reverse()\n x = torch.cat(maps, 1)\n x = getattr(self, \"outconv\")(x)\n maps.insert(0, x)\n # return [torch.sigmoid(x) for x in maps]\n return [x for x in maps]\n\n unet(x)\n maps = fuse()\n return maps\n\n @staticmethod\n def compute_loss(args):\n preds, labels_v = args\n d0, d1, d2, d3, d4, d5, d6 = preds\n loss0 = bce_loss(d0, labels_v)\n loss1 = bce_loss(d1, labels_v)\n loss2 = bce_loss(d2, labels_v)\n loss3 = bce_loss(d3, labels_v)\n loss4 = bce_loss(d4, labels_v)\n loss5 = bce_loss(d5, labels_v)\n loss6 = bce_loss(d6, labels_v)\n\n loss = loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6\n\n return loss0, loss\n\n def _make_layers(self, cfgs):\n self.height = int((len(cfgs) + 1) / 2)\n self.add_module(\"downsample\", nn.MaxPool2d(2, stride=2, ceil_mode=True))\n for k, v in cfgs.items():\n # build rsu block\n self.add_module(k, RSU(v[0], *v[1]))\n if v[2] > 0:\n # build side layer\n self.add_module(\n f\"side{v[0][-1]}\", nn.Conv2d(v[2], self.out_ch, 3, padding=1)\n )\n # build fuse layer\n self.add_module(\n \"outconv\", nn.Conv2d(int(self.height * self.out_ch), self.out_ch, 1)\n )"
},
{
"identifier": "U2NET_full2",
"path": "src/segment/model/u2net.py",
"snippet": "def U2NET_full2():\n full = {\n # cfgs for building RSUs and sides\n # {stage : [name, (height(L), in_ch, mid_ch, out_ch, dilated), side]}\n \"stage1\": [\"En_1\", (8, 3, 32, 64), -1],\n \"stage2\": [\"En_2\", (7, 64, 32, 128), -1],\n \"stage3\": [\"En_3\", (6, 128, 64, 256), -1],\n \"stage4\": [\"En_4\", (5, 256, 128, 512), -1],\n \"stage5\": [\"En_5\", (5, 512, 256, 512, True), -1],\n \"stage6\": [\"En_6\", (5, 512, 256, 512, True), 512],\n \"stage5d\": [\"De_5\", (5, 1024, 256, 512, True), 512],\n \"stage4d\": [\"De_4\", (5, 1024, 128, 256), 256],\n \"stage3d\": [\"De_3\", (6, 512, 64, 128), 128],\n \"stage2d\": [\"De_2\", (7, 256, 32, 64), 64],\n \"stage1d\": [\"De_1\", (8, 128, 16, 64), 64],\n }\n return U2NET(cfgs=full, out_ch=1)"
},
{
"identifier": "U2NET_lite2",
"path": "src/segment/model/u2net.py",
"snippet": "def U2NET_lite2():\n lite = {\n # cfgs for building RSUs and sides\n # {stage : [name, (height(L), in_ch, mid_ch, out_ch, dilated), side]}\n \"stage1\": [\"En_1\", (8, 3, 16, 64), -1],\n \"stage2\": [\"En_2\", (7, 64, 16, 64), -1],\n \"stage3\": [\"En_3\", (6, 64, 16, 64), -1],\n \"stage4\": [\"En_4\", (5, 64, 16, 64), -1],\n \"stage5\": [\"En_5\", (5, 64, 16, 64, True), -1],\n \"stage6\": [\"En_6\", (5, 64, 16, 64, True), 64],\n \"stage5d\": [\"De_5\", (5, 128, 16, 64, True), 64],\n \"stage4d\": [\"De_4\", (5, 128, 16, 64), 64],\n \"stage3d\": [\"De_3\", (6, 128, 16, 64), 64],\n \"stage2d\": [\"De_2\", (7, 128, 16, 64), 64],\n \"stage1d\": [\"De_1\", (8, 128, 16, 64), 64],\n }\n return U2NET(cfgs=lite, out_ch=1)"
}
] | import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import torch.optim as optim
from pytorch_lightning import Trainer
from .model import ISNetDIS, ISNetGTEncoder, U2NET, U2NET_full2, U2NET_lite2, MODNet \
, InSPyReNet, InSPyReNet_Res2Net50, InSPyReNet_SwinB | 8,241 |
# warnings.filterwarnings("ignore")
net_names = ["isnet_is", "isnet", "isnet_gt", "u2net", "u2netl", "modnet", "inspyrnet_res", "inspyrnet_swin"]
def get_net(net_name, img_size):
if net_name == "isnet":
return ISNetDIS()
elif net_name == "isnet_is":
return ISNetDIS()
elif net_name == "isnet_gt":
return ISNetGTEncoder()
elif net_name == "u2net":
return U2NET_full2()
elif net_name == "u2netl":
return U2NET_lite2()
elif net_name == "modnet":
return MODNet()
elif net_name == "inspyrnet_res":
return InSPyReNet_Res2Net50(base_size=img_size)
elif net_name == "inspyrnet_swin":
return InSPyReNet_SwinB(base_size=img_size)
raise NotImplemented
class AnimeSegmentation(pl.LightningModule):
def __init__(self, net_name, img_size=None, lr=1e-3):
super().__init__()
assert net_name in net_names
self.img_size = img_size
self.lr = lr
self.net = get_net(net_name, img_size)
if net_name == "isnet_is":
self.gt_encoder = get_net("isnet_gt", img_size)
self.gt_encoder.requires_grad_(False)
else:
self.gt_encoder = None
@classmethod
def try_load(cls, net_name, ckpt_path, map_location=None, img_size=None):
state_dict = torch.load(ckpt_path, map_location=map_location)
if "epoch" in state_dict:
return cls.load_from_checkpoint(ckpt_path, net_name=net_name, img_size=img_size, map_location=map_location)
else:
model = cls(net_name, img_size)
if any([k.startswith("net.") for k, v in state_dict.items()]):
model.load_state_dict(state_dict)
else:
model.net.load_state_dict(state_dict)
return model
def configure_optimizers(self):
optimizer = optim.Adam(self.net.parameters(), lr=self.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
return optimizer
def forward(self, x):
if isinstance(self.net, ISNetDIS):
return self.net(x)[0][0].sigmoid()
if isinstance(self.net, ISNetGTEncoder):
return self.net(x)[0][0].sigmoid()
|
# warnings.filterwarnings("ignore")
net_names = ["isnet_is", "isnet", "isnet_gt", "u2net", "u2netl", "modnet", "inspyrnet_res", "inspyrnet_swin"]
def get_net(net_name, img_size):
if net_name == "isnet":
return ISNetDIS()
elif net_name == "isnet_is":
return ISNetDIS()
elif net_name == "isnet_gt":
return ISNetGTEncoder()
elif net_name == "u2net":
return U2NET_full2()
elif net_name == "u2netl":
return U2NET_lite2()
elif net_name == "modnet":
return MODNet()
elif net_name == "inspyrnet_res":
return InSPyReNet_Res2Net50(base_size=img_size)
elif net_name == "inspyrnet_swin":
return InSPyReNet_SwinB(base_size=img_size)
raise NotImplemented
class AnimeSegmentation(pl.LightningModule):
def __init__(self, net_name, img_size=None, lr=1e-3):
super().__init__()
assert net_name in net_names
self.img_size = img_size
self.lr = lr
self.net = get_net(net_name, img_size)
if net_name == "isnet_is":
self.gt_encoder = get_net("isnet_gt", img_size)
self.gt_encoder.requires_grad_(False)
else:
self.gt_encoder = None
@classmethod
def try_load(cls, net_name, ckpt_path, map_location=None, img_size=None):
state_dict = torch.load(ckpt_path, map_location=map_location)
if "epoch" in state_dict:
return cls.load_from_checkpoint(ckpt_path, net_name=net_name, img_size=img_size, map_location=map_location)
else:
model = cls(net_name, img_size)
if any([k.startswith("net.") for k, v in state_dict.items()]):
model.load_state_dict(state_dict)
else:
model.net.load_state_dict(state_dict)
return model
def configure_optimizers(self):
optimizer = optim.Adam(self.net.parameters(), lr=self.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
return optimizer
def forward(self, x):
if isinstance(self.net, ISNetDIS):
return self.net(x)[0][0].sigmoid()
if isinstance(self.net, ISNetGTEncoder):
return self.net(x)[0][0].sigmoid() | elif isinstance(self.net, U2NET): | 6 | 2023-11-14 22:10:11+00:00 | 12k |
chuzhumin98/LLM_Eval | PRE/process.py | [
{
"identifier": "DataLoader",
"path": "PRE/data.py",
"snippet": "class DataLoader:\n '''\n The loader to load for evaluated task, with given prompt template to generate a series of prompts feeding for each LLM\n '''\n def __init__(self, args):\n self.path_data = args['path_data'] # the load path for the data\n self.format = args['format'] # the data format, csv (need a title line) or json (each line is a single data item)\n self.path_prompt = args['path_prompt'] if 'path_prompt' in args else None # the path of prompt template. In the prompt template, using {{key}} for the replacement of the key. For example, in the prompt \"You need answer a question: {{question}}\", the \"question\" field need to be included in the data\n if not os.path.exists(self.path_data):\n raise FileExistsError(\"Load task data failed: file not exist!\")\n assert self.format in ['csv', 'json']\n \n \n def generate_reader(self):\n if self.format == 'csv':\n with open(self.path_data, encoding='utf-8') as f:\n gen = csv.DictReader(f, skipinitialspace=True)\n elif self.format == 'json':\n gen = open(self.path_data, encoding='utf-8')\n else:\n raise Exception(\"Invalid data format\")\n return gen\n \n def get_prompt(self):\n if self.path_prompt is None:\n raise Exception(\"Exception: missing argument path_prompt\")\n if not os.path.exists(self.path_prompt):\n raise FileExistsError(\"Load task prompt template failed: file not exist!\")\n self.template_prompt = open(self.path_prompt, encoding='utf-8').read().strip()\n \n gen = self.generate_reader()\n \n for row in gen:\n if self.format == 'json':\n item = json.loads(row.strip())\n else:\n item = row\n \n prompt = self.template_prompt\n for key in item:\n prompt = prompt.replace(\"{{\" + key + \"}}\", item[key])\n yield prompt # a generator to return each prompt\n \n def get_task_items(self):\n data_list = []\n gen = self.generate_reader()\n for row in gen:\n if self.format == 'json':\n item = json.loads(row.strip())\n elif self.format == 'csv':\n item = dict(row)\n data_list.append(item)\n return data_list"
},
{
"identifier": "Auto_API",
"path": "PRE/api.py",
"snippet": "class Auto_API:\n @staticmethod\n def instantiate_api(api_type, args) -> LLM_API:\n for at, _API in API_type2class_list:\n if api_type == at:\n return _API(args)\n raise Exception(f\"Invalid api_type: {api_type}\")"
},
{
"identifier": "EXAM",
"path": "PRE/exam.py",
"snippet": "class EXAM:\n '''\n Conduct qualified exam, filtering qualified LLMs to become peer reviewers\n '''\n def __init__(self, args) -> None:\n self.source = args['source'] # same or others; same: the evaluated task and responses, others: independent prompts, no need for refer item\n self.mode = args['mode'] # pointwise, pairwise\n self.parser_type = args['parser_type'] # int, float, str\n '''\n If the source is same,\n In pointwise mode, the data consists key \"#index\" (the line index of the task) and key \"#source\" (the LLM to generate the response). The expected evaulate response is an integer or float number;\n In pairwise mode, the data consists key \"#index\" (the line index of the task), key \"#source1\" (the LLM 1 to generate the response) and key \"#source2\" (the LLM 2 to generate the response). The expected evaluate response is three possible token, meaning -1 (1 is better), 0 (tied), 1 (2 is better) respectively\n also, if we conduct reference exam, for each exam data item, it requires key \"#answer\" denotes the gold standard (integer for the pairwise mode)\n '''\n assert self.source in ['same', 'others']\n assert self.mode in ['pointwise', 'pairwise']\n assert self.parser_type in ['int', 'float', 'str']\n if self.parser_type == 'str':\n self.nominal_list = [nn.strip() for nn in args['nominal_list'].split(',')]\n self.nominal_ticks = [int(nn.strip()) for nn in args['nominal_list'].split(',')]\n else:\n self.nominal_list, self.nominal_ticks = None, None\n \n if self.source == 'same': # load generated task data and responses\n path_config_task_data = args['config_task_data']\n self.task_name = args['task_name']\n self.save_dir = args['save_dir'] # the exam result save dir, the exam evaluation save filename = [save_dir] / exam_responses / [task_name]_[model_name].json, each line is one result with json {response: str, result: float/int}\n if not os.path.exists(path_config_task_data):\n raise FileExistsError(\"Load task_data config failed: file not exist!\")\n\n config_task = yaml.load(open(path_config_task_data, 'r'), Loader=yaml.FullLoader) # single task config\n data_loader = DataLoader(config_task) # a task data loader\n self.task_data = data_loader.get_task_items()\n self.path_exam_same_data = args['path_exam_same_data']\n self.format_exam_same_data = args['format_exam_same_data']\n else: # load other exam data\n self.path_exam_others_data = args['path_exam_others_data']\n self.format_exam_others_data = args['format_exam_others_data']\n if not os.path.exists(self.path_exam_others_data):\n raise FileExistsError(\"Load exam others mode data failed: file not exist!\")\n self.reference_exam = args['conduct_reference_exam'] # True or False, whether to compare the responses v.s. gold standard\n self.inner_consistency_exam = args['conduct_inner_consistency_exam'] # True or False, whether to conduct inner-consistency exam\n if self.mode == 'pairwise':\n if self.reference_exam:\n self.p_gold = float(args['p_gold']) if 'p_gold' in args else 0.6 # accuarcy v.s. gold standard\n if self.inner_consistency_exam:\n self.p_cons = float(args['p_cons']) if 'p_cons' in args else 0.6 # consistency between two kinds of prompts\n elif self.mode == 'pointwise':\n self.metric_pointwise = args['metric_pointwise'] if 'metric_pointwise' in args else 'EM' # EM (exact match, proportion >= threshold) or MSE (mean square error, mse <= threshold)\n assert self.metric_pointwise in ['EM', \"MSE\"]\n if self.reference_exam:\n if self.metric_pointwise == 'EM':\n self.p_gold = float(args['p_gold']) if 'p_gold' in args else 0.6 # accuarcy v.s. gold standard\n elif self.metric_pointwise == 'MSE':\n self.MSE_acc = float(args['MSE_gold']) if 'MSE_gold' in args else 1. # MSE v.s. gold standard\n \n if self.inner_consistency_exam:\n if self.metric_pointwise == 'EM':\n self.p_cons = float(args['p_cons']) if 'p_cons' in args else 0.6 # consistency between two kinds of prompts\n elif self.metric_pointwise == 'MSE':\n self.MSE_cons = float(args['MSE_cons']) if 'MSE_cons' in args else 1. # MSE between two kinds of prompts\n\n path_prompt = args['path_exam_prompt']\n if not os.path.exists(path_prompt):\n raise FileExistsError(\"Load exam prompt template failed: file not exist!\")\n self.template_prompt = open(path_prompt, encoding='utf-8').read().strip()\n if self.inner_consistency_exam:\n path_prompt2 = args['path_exam_prompt2'] # used in inner consistency exam\n if not os.path.exists(path_prompt2):\n raise FileExistsError(\"Load exam prompt template 2 (used in inner-consistency exam) failed: file not exist!\")\n self.template_prompt2 = open(path_prompt2, encoding='utf-8').read().strip()\n \n if not self.inner_consistency_exam and not self.reference_exam:\n warnings.warn(\"Have not set any qualified exam!\", RuntimeWarning)\n \n \n def load_exam_prompts(self, prompt_template):\n if self.source == 'others':\n loader = DataLoader({\"path_data\": self.path_exam_others_data,\n \"format\": self.format_exam_others_data,})\n data_others = loader.get_task_items()\n prompts = []\n for item in data_others:\n prompt = prompt_template\n for key in item:\n prompt = prompt.replace(\"{{\" + key + \"}}\", item[key])\n prompts.append(prompt)\n if self.reference_exam:\n answers = [item['#answer'] for item in data_others]\n else:\n answers = None\n return prompts, answers\n elif self.source == 'same':\n loader = DataLoader({\"path_data\": self.path_exam_same_data,\n \"format\": self.format_exam_same_data,})\n samples_same = loader.get_task_items()\n evaluatees_list = set()\n if self.mode == 'pointwise':\n for sample in samples_same:\n evaluatees_list.add(sample['#source'])\n elif self.mode == 'pairwise':\n for sample in samples_same:\n evaluatees_list.add(sample['#source1'])\n evaluatees_list.add(sample['#source2'])\n responses_evaluatee_dict = dict()\n for ev in evaluatees_list:\n responses = [] # responses list for evaluatee ev\n path = f\"{self.save_dir}/task_responses/{self.task_name}_{ev}.json\"\n if not os.path.exists(path):\n raise FileExistsError(f\"Load {path} failed: file not exist!\")\n with open(path, 'r') as f:\n while True:\n line = f.readline().strip()\n if line:\n response = json.loads(line)['response']\n responses.append(response)\n else:\n break\n responses_evaluatee_dict[ev] = responses\n \n prompts = []\n for sample in samples_same:\n sidx = sample['#index']\n task = dict(self.task_data[sidx])\n if self.mode == 'pointwise':\n src = sample['#source']\n task['#source'] = responses_evaluatee_dict[src][sidx]\n elif self.mode == 'pairwise':\n src1 = sample['#source1']\n src2 = sample['#source2']\n task['#source1'] = responses_evaluatee_dict[src1][sidx]\n task['#source2'] = responses_evaluatee_dict[src2][sidx]\n prompt = prompt_template\n for key in task:\n prompt = prompt.replace(\"{{\" + key + \"}}\", task[key])\n prompts.append(prompt)\n \n if self.reference_exam:\n answers = [item['#answer'] for item in samples_same]\n else:\n answers = None\n return prompts, answers\n \n def calculate_metric(self, resultsA, resultsB) -> float: \n '''\n Calculate the evaluation metric between resultsA and resultsB\n pointwise or pairwise; EM/accuary or MSE (minus)\n '''\n assert len(resultsA) == len(resultsB)\n assert len(resultsA) > 0\n N = len(resultsA)\n p = 0.\n if self.mode == 'pairwise':\n for j in range(N):\n r, a = resultsA[j], resultsB[j]\n if r * a > 0:\n p += 1.\n elif r * a == 0:\n p += .5\n \n elif self.mode == 'pointwise':\n if self.metric_pointwise == 'EM':\n for j in range(N):\n r, a = resultsA[j], resultsB[j]\n if r == a:\n p += 1.\n elif self.metric_pointwise == 'MSE':\n for j in range(N):\n r, a = resultsA[j], resultsB[j]\n p -= (r - a) ** 2\n\n p /= float(N)\n return p\n \n \n def conduct_exam(self, config_api_evaluator):\n '''\n Conduct qualified exam, return a list of qualified apis with the same format of list config_api_evaluator, and their scores [score_list (refer acc, inner acc) for each qualified LLM], MSE will put the minus one\n '''\n apis = [Auto_API.instantiate_api(config_api['api_type'], config_api) for config_api in config_api_evaluator]\n if not self.inner_consistency_exam and not self.reference_exam:\n return config_api_evaluator, [[] for _ in config_api_evaluator]\n \n prompts, answers = self.load_exam_prompts(self.template_prompt)\n if self.inner_consistency_exam:\n prompts2, answers2 = self.load_exam_prompts(self.template_prompt2)\n \n os.makedirs(f\"{self.save_dir}/exam_responses\", exist_ok=True)\n qualified_apis, scores_qualified = [], [] # configs of these qualified apis, its corresponding api\n for i, api in enumerate(apis):\n path_out = f\"{self.save_dir}/exam_responses/{self.task_name}_{api.model_name}.json\"\n\n if os.path.exists(path_out):\n data = open(path_out).readlines()\n else:\n data = []\n if len(data) < len(prompts):\n fout = open(path_out, 'w')\n for line in data:\n fout.write(line)\n for prompt in prompts[len(data):]:\n response_orig = api.chat(prompt)\n result_parse = parse_response(response_orig, self.parser_type, self.nominal_list, self.nominal_ticks)\n line = json.dumps({\"response\": response_orig,\n 'result': result_parse})\n data.append(line)\n fout.write(line + '\\n')\n fout.close()\n results = [json.loads(line.strip())['result'] for line in data]\n \n eval_this = [config_api_evaluator[i]]\n \n if self.reference_exam:\n p_refer = self.calculate_metric(results, answers)\n p_thre = None\n if self.mode == 'pairwise':\n p_thre = self.p_gold\n elif self.mode == 'pointwise':\n if self.metric_pointwise == 'EM':\n p_thre = self.p_gold\n elif self.metric_pointwise == 'MSE':\n p_thre = -self.MSE_acc\n \n if p_refer < p_thre:\n print(f'model {api.model_name} failed to pass the reference exam')\n continue\n eval_this.append(p_refer)\n \n if self.inner_consistency_exam:\n path_out = f\"{self.save_dir}/exam_responses/{self.task_name}_{api.model_name}__prompt2.json\"\n\n if os.path.exists(path_out):\n data = open(path_out).readlines()\n else:\n data = []\n if len(data) < len(prompts2):\n fout = open(path_out, 'w')\n for line in data:\n fout.write(line)\n for prompt in prompts2[len(data):]:\n response_orig = api.chat(prompt)\n result_parse = parse_response(response_orig, self.parser_type, self.nominal_list, self.nominal_ticks)\n line = json.dumps({\"response\": response_orig,\n 'result': result_parse})\n data.append(line)\n fout.write(line + '\\n')\n fout.close()\n results2 = [json.loads(line.strip())['result'] for line in data]\n\n p_inner = self.calculate_metric(results, results2)\n p_thre = None\n if self.mode == 'pairwise':\n p_thre = self.p_cons\n elif self.mode == 'pointwise':\n if self.metric_pointwise == 'EM':\n p_thre = self.p_cons\n elif self.metric_pointwise == 'MSE':\n p_thre = -self.MSE_cons\n \n if p_inner < p_thre:\n print(f'model {api.model_name} failed to pass the inner-consistency exam')\n continue\n eval_this.append(p_inner)\n \n qualified_apis.append(config_api_evaluator[i])\n scores_qualified.append(eval_this)\n return qualified_apis, scores_qualified"
},
{
"identifier": "PRE",
"path": "PRE/eval.py",
"snippet": "class PRE:\n def __init__(self, args) -> None:\n path_config_eval = args['config_eval']\n if not os.path.exists(path_config_eval):\n raise FileExistsError(\"Load config eval failed: file not exist!\")\n args = copy.deepcopy(args)\n config_eval = yaml.load(open(path_config_eval, 'r'), Loader=yaml.FullLoader)\n args.update(config_eval)\n self.strategy = args['strategy'] # full, ELO, Glicko\n self.mode = args['mode'] # pointwise, pairwise\n if self.strategy in ['ELO', 'Glicko']:\n self.mode = 'pairwise' # sampling strategy, default with pairwise mode\n args['mode'] = 'pairwise'\n assert self.strategy in ['full', 'ELO', 'Glicko']\n assert self.mode in ['pointwise', 'pairwise']\n self.weighted_method = args['weighted_method'] # uniform, log (only accuary/consistency), exp, poly (only accuary/consistency)\n '''\n uniform: the equal weight\n log: log(p) - log(1-p)\n exp: exp(alpha * p)\n poly: p ^ alpha\n '''\n self.alpha = args['alpha'] if 'alpha' in args else 1.\n self.w_gold = args['w_gold'] if 'w_gold' in args else 0.5 # w_gold * s_gold + (1-w_gold) * s_consistency, only used when both of them are used in exam module\n self.evaluators_config = yaml.load_all(open(args['config_api_evaluator'], 'r'), Loader=yaml.FullLoader) # the config of evaluators\n self.evaluators_config = [cf for cf in self.evaluators_config]\n self.evaluator_model_names = [ev['model_name'] for ev in self.evaluators_config]\n self.save_dir = args['save_dir']\n self.task_name = args['task_name']\n # print(f\"evaluatee config: {args['config_api_evaluatee']}\")\n if 'config_api_evaluatee' in args:\n config_apis = yaml.load_all(open(args['config_api_evaluatee'], 'r'), Loader=yaml.FullLoader) # series of APIs\n self.evaluatee_LLM_names = [config_api['model_name'] for config_api in config_apis]\n else:\n self.evaluatee_LLM_names = args['evaluatee_names'].split(',')\n \n self.loader_data = EvalDataLoader(args)\n self.review = PEER_REVIEW(args)\n self.weights = self.weighted_function(args['scores_evaluators']) # the pre-compute weights of each evaluator based on their scores\n return\n \n def load_batch_data(self):\n prompts = self.loader_data.get_full_prompts()\n self.review.peer_review_batch(self.evaluators_config, prompts) # generate the peer review results of each evaluator\n ### load evaluation results\n results = dict()\n for ev_model_name in self.evaluator_model_names:\n path_ev = f\"{self.save_dir}/evaluation_responses/{self.task_name}_{ev_model_name}.json\"\n results_thisllm = []\n with open(path_ev, 'r') as f:\n while True:\n line = f.readline().strip()\n if line:\n results_thisllm.append(json.loads(line))\n else:\n break\n results[ev_model_name] = results_thisllm\n return results\n \n def evaluate(self):\n '''\n the unified api for evaluate, control the whole evaluation procedure\n '''\n if self.strategy == 'full':\n self.evaluate_full()\n else:\n self.evaluate_sample()\n\n def evaluate_full(self):\n '''\n evaluate with the full strategy\n '''\n results = self.load_batch_data()\n ### evaluate with majority voting\n os.makedirs(f\"{self.save_dir}/evaluation_results\", exist_ok=True)\n print(self.evaluatee_LLM_names)\n if self.mode == 'pointwise':\n results_perllm = dict() # evaluate dict of each evaluatee\n for ev in self.evaluator_model_names:\n results_ev = results[ev]\n for item in results_ev:\n model, task_id, label = item['model'], item['task_id'], item['result']\n if model not in results_perllm:\n results_perllm[model] = dict()\n if task_id not in results_perllm[model]:\n results_perllm[model][task_id] = []\n results_perllm[model][task_id].append(label)\n outputs = dict()\n for model in results_perllm:\n outputs[model] = []\n for task_id in results_perllm[model]:\n outputs[model].append(self.aggregate_reviewers_results(results_perllm[model][task_id], self.weights))\n path_res = f\"{self.save_dir}/evaluation_results/{self.task_name}_result_detail.json\"\n json.dump(outputs, open(path_res, 'w'))\n with open(f\"{self.save_dir}/evaluation_results/{self.task_name}_result_overview.txt\", 'w') as f:\n for model in outputs:\n mean_val = np.mean(outputs[model])\n print(f'model {model}: {mean_val}')\n f.write(f'model {model}: {mean_val}\\n')\n elif self.mode == 'pairwise':\n results_perllm = dict() # evaluate dict of each evaluatee\n for i, ev in enumerate(self.evaluator_model_names):\n results_ev = results[ev]\n for item in results_ev:\n modelA, modelB, task_id, label = item['modelA'], item['modelB'], item['task_id'], item['result']\n if modelA <= modelB:\n key = f'{modelA}%{modelB}'\n else:\n key = f'{modelB}%{modelA}'\n label = -label # reversed the preference label if modelB v.s. modelA\n \n if key not in results_perllm:\n results_perllm[key] = dict()\n if task_id not in results_perllm[key]:\n results_perllm[key][task_id] = []\n if len(results_perllm[key][task_id]) < i + 1:\n results_perllm[key][task_id].append([])\n results_perllm[key][task_id][i].append(label)\n outputs = dict()\n for key in results_perllm:\n outputs[key] = []\n for task_id in results_perllm[key]:\n outputs[key].append(self.aggregate_reviewers_results(results_perllm[key][task_id], self.weights))\n path_res = f\"{self.save_dir}/evaluation_results/{self.task_name}_result_detail.json\"\n json.dump(outputs, open(path_res, 'w'))\n with open(f\"{self.save_dir}/evaluation_results/{self.task_name}_result_overview.csv\", 'w') as f:\n evaluatees_dict = {ev: i for i, ev in enumerate(self.evaluatee_LLM_names)}\n accs = np.zeros([len(self.evaluatee_LLM_names), len(self.evaluatee_LLM_names)], dtype=np.float)\n for key in outputs:\n mA, mB = key.split('%')\n idxA, idxB = evaluatees_dict[mA], evaluatees_dict[mB]\n res = np.array(outputs[key])\n mean_val = np.mean(res == 1) + np.mean(res == 0) * 0.5\n accs[idxA, idxB] = mean_val\n accs[idxB, idxA] = 1. - mean_val\n f.write(','.join(['']+self.evaluatee_LLM_names) + '\\n')\n for i in range(len(self.evaluatee_LLM_names)):\n f.write(','.join([self.evaluatee_LLM_names[i]] + [str(num) for num in accs[i]]) + '\\n')\n lines = open(f\"{self.save_dir}/evaluation_results/{self.task_name}_result_overview.csv\", 'r').readlines()\n print(''.join(lines))\n \n def evaluate_sample(self):\n '''\n evaluate with sampling strategies (e.g. ELO, Glicko)\n '''\n results = self.load_batch_data()\n ### only for pairwise mode\n os.makedirs(f\"{self.save_dir}/evaluation_results\", exist_ok=True)\n results_perllm = dict() # evaluate dict of each evaluatee\n for i, ev in enumerate(self.evaluator_model_names):\n results_ev = results[ev]\n for item in results_ev:\n print(item)\n modelA, modelB, task_id, label = item['modelA'], item['modelB'], item['task_id'], item['result']\n if modelA <= modelB:\n key = f'{modelA}%{modelB}'\n else:\n key = f'{modelB}%{modelA}'\n label = -label # reversed the preference label if modelB v.s. modelA\n \n if key not in results_perllm:\n results_perllm[key] = dict()\n if task_id not in results_perllm[key]:\n results_perllm[key][task_id] = []\n if len(results_perllm[key][task_id]) < i + 1:\n results_perllm[key][task_id].append([])\n results_perllm[key][task_id][i].append(label)\n games_list = []\n evaluatees_dict = {ev: i for i, ev in enumerate(self.evaluatee_LLM_names)}\n\n for key in results_perllm:\n mA, mB = key.split('%')\n idxA, idxB = evaluatees_dict[mA], evaluatees_dict[mB]\n for task_id in results_perllm[key]:\n games_list.append([idxA, idxB, self.aggregate_reviewers_results(results_perllm[key][task_id], self.weights)])\n indexes = np.array(range(len(games_list)))\n np.random.shuffle(indexes) # randomize the game order\n path_res = f\"{self.save_dir}/evaluation_results/{self.task_name}_result_detail.txt\"\n fout = open(path_res, 'w')\n if self.strategy == 'ELO': # we set K = 16\n def elo_expect_win_rate(x): # x is the ELO difference\n return 1. / (1. + 10. ** (x / 400.))\n rates = [1000., 1000.]\n K = 16.\n for r, idx in enumerate(indexes):\n roleA, roleB, label = games_list[idx]\n eA = elo_expect_win_rate(rates[roleB] - rates[roleA])\n eB = 1. - eA\n sB = (1. + label) / 2. # -1 -> 0, 0 -> 0.5, 1 -> 1\n sA = 1. - sB\n rates[roleA] += K * (sA - eA)\n rates[roleB] += K * (sB - eB)\n fout.write(f\"After round {r}, ELO rate: {rates}\\n\")\n elif self.strategy == 'Glicko':\n # TODO\n pass\n fout.close()\n \n with open(f\"{self.save_dir}/evaluation_results/{self.task_name}_result_overview.csv\", 'w') as f:\n f.write(f\"Final {self.strategy} rate leaderboard:\\n\")\n ranks = np.argsort(-np.array(rates))\n for r in ranks:\n f.write(f\"{self.evaluatee_LLM_names[r]}: {rates[r]}\\n\")\n lines = open(f\"{self.save_dir}/evaluation_results/{self.task_name}_result_overview.csv\", 'r').readlines()\n print(''.join(lines))\n \n def weighted_function(self, scores):\n '''\n return the weight (normalized) of each LLM, with the given weighted method and parameter (alpha and w_gold)\n '''\n assert len(scores) > 0\n N = len(scores)\n if len(scores[0]) == 0 or self.weighted_method == 'uniform': # when no exam or uniform strategy, equal weight\n p = 1. / float(N)\n return np.array([p for _ in range(N)])\n elif self.weighted_method == 'log':\n ws = np.log([s[0] for s in scores]) - np.log([1. - s[0] for s in scores])\n if len(scores[0]) > 1:\n ws2 = np.log([s[1] for s in scores]) - np.log([1. - s[1] for s in scores])\n ws = self.w_gold * ws + (1-self.w_gold) * ws2\n ws /= np.sum(ws)\n return ws\n elif self.weighted_method == 'exp':\n ws = np.exp(self.alpha * np.array([s[0] for s in scores]))\n if len(scores[0]) > 1:\n ws2 = np.exp(self.alpha * np.array([s[1] for s in scores]))\n ws = self.w_gold * ws + (1-self.w_gold) * ws2\n ws /= np.sum(ws)\n return ws\n elif self.weighted_method == 'poly':\n ws = np.array([s[0] for s in scores]) ** self.alpha\n if len(scores[0]) > 1:\n ws2 = np.array([s[1] for s in scores]) ** self.alpha\n ws = self.w_gold * ws + (1-self.w_gold) * ws2\n ws /= np.sum(ws)\n return ws\n else:\n raise Exception(\"Unexpected parameter weighted_method!\")\n\n \n def aggregate_reviewers_results(self, results, weights):\n '''\n aggregate results with the given weights\n if mode == 'pointwise', results and weights are all (N) array, N is the size of evaluators; weighted sum\n if mode == 'pairwise', results are (N, 2) array, and weights are (N) array; majority voting, pairwise is already aligned, i.e., if B ~ A is better, then convert into A ~ B is worse\n '''\n assert len(results) == len(weights)\n if self.mode == 'pointwise':\n return sum([results[i] * weights[i] for i in range(len(weights))])\n elif self.mode == 'pairwise':\n cnt_pos, cnt_neg = 0., 0.\n for items in results:\n for item in items:\n if item > 0:\n cnt_pos += 1.\n elif item < 0:\n cnt_neg += 1.\n if cnt_pos > cnt_neg:\n return 1\n elif cnt_pos < cnt_neg:\n return -1\n else:\n return 0"
}
] | import os
import yaml
import json, csv
import copy
import sys
from PRE.data import DataLoader
from PRE.api import Auto_API
from PRE.exam import EXAM
from PRE.eval import PRE | 7,709 | '''
The procedure of the whole peer review framework
'''
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
class Process:
'''
The control of the whole peer review process
'''
@staticmethod
def run(args): # the API used for automatic evaluation
Process.collect_task_response(args)
qualified_apis, scores_qualified = Process.conduct_qualified_exam(args)
args['config_evaluators'] = qualified_apis
args['scores_evaluators'] = scores_qualified
# print(scores_qualified)
Process.peer_review_and_evaluate(args)
return None
@staticmethod
def collect_task_response(args):
path_config_api_evaluatee = args['config_api_evaluatee']
path_config_task_data = args['config_task_data']
task_name = args['task_name']
save_dir = args['save_dir'] # the task result save dir, the task save filename = [save_dir] / task_responses / [task_name]_[model_name].json, each line is one result with json {response: str}
os.makedirs(os.path.join(save_dir, "task_responses"), exist_ok=True)
if not os.path.exists(path_config_api_evaluatee):
raise FileExistsError("Load api_evaluatee config failed: file not exist!")
if not os.path.exists(path_config_task_data):
raise FileExistsError("Load task_data config failed: file not exist!")
config_apis = yaml.load_all(open(path_config_api_evaluatee, 'r'), Loader=yaml.FullLoader) # series of APIs
config_task = yaml.load(open(path_config_task_data, 'r'), Loader=yaml.FullLoader) # single task config
process_num = args['process_num'] # multi-process or not
| '''
The procedure of the whole peer review framework
'''
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
class Process:
'''
The control of the whole peer review process
'''
@staticmethod
def run(args): # the API used for automatic evaluation
Process.collect_task_response(args)
qualified_apis, scores_qualified = Process.conduct_qualified_exam(args)
args['config_evaluators'] = qualified_apis
args['scores_evaluators'] = scores_qualified
# print(scores_qualified)
Process.peer_review_and_evaluate(args)
return None
@staticmethod
def collect_task_response(args):
path_config_api_evaluatee = args['config_api_evaluatee']
path_config_task_data = args['config_task_data']
task_name = args['task_name']
save_dir = args['save_dir'] # the task result save dir, the task save filename = [save_dir] / task_responses / [task_name]_[model_name].json, each line is one result with json {response: str}
os.makedirs(os.path.join(save_dir, "task_responses"), exist_ok=True)
if not os.path.exists(path_config_api_evaluatee):
raise FileExistsError("Load api_evaluatee config failed: file not exist!")
if not os.path.exists(path_config_task_data):
raise FileExistsError("Load task_data config failed: file not exist!")
config_apis = yaml.load_all(open(path_config_api_evaluatee, 'r'), Loader=yaml.FullLoader) # series of APIs
config_task = yaml.load(open(path_config_task_data, 'r'), Loader=yaml.FullLoader) # single task config
process_num = args['process_num'] # multi-process or not
| data_loader = DataLoader(config_task) # a task data loader | 0 | 2023-11-16 18:40:23+00:00 | 12k |
believethehype/nostrdvm | nostr_dvm/utils/mediasource_utils.py | [
{
"identifier": "get_event_by_id",
"path": "nostr_dvm/utils/nostr_utils.py",
"snippet": "def get_event_by_id(event_id: str, client: Client, config=None) -> Event | None:\n split = event_id.split(\":\")\n if len(split) == 3:\n pk = PublicKey.from_hex(split[1])\n id_filter = Filter().author(pk).custom_tag(Alphabet.D, [split[2]])\n events = client.get_events_of([id_filter], timedelta(seconds=config.RELAY_TIMEOUT))\n else:\n if str(event_id).startswith('note'):\n event_id = EventId.from_bech32(event_id)\n elif str(event_id).startswith(\"nevent\"):\n event_id = Nip19Event.from_bech32(event_id).event_id()\n elif str(event_id).startswith('nostr:note'):\n event_id = EventId.from_nostr_uri(event_id)\n elif str(event_id).startswith(\"nostr:nevent\"):\n event_id = Nip19Event.from_nostr_uri(event_id).event_id()\n\n else:\n event_id = EventId.from_hex(event_id)\n\n id_filter = Filter().id(event_id).limit(1)\n events = client.get_events_of([id_filter], timedelta(seconds=config.RELAY_TIMEOUT))\n if len(events) > 0:\n\n return events[0]\n else:\n return None"
},
{
"identifier": "OvercastDownload",
"path": "nostr_dvm/utils/scrapper/media_scrapper.py",
"snippet": "def OvercastDownload(source_url, target_location):\n def get_title(html_str):\n \"\"\"Get the title from the meta tags\"\"\"\n\n title = re.findall(r\"<meta name=\\\"og:title\\\" content=\\\"(.+)\\\"\", html_str)\n if len(title) == 1:\n return title[0].replace(\"—\", \"-\")\n return None\n\n def get_description(html_str):\n \"\"\"Get the description from the Meta tag\"\"\"\n\n desc_re = r\"<meta name=\\\"og:description\\\" content=\\\"(.+)\\\"\"\n description = re.findall(desc_re, html_str)\n if len(description) == 1:\n return description[0]\n return None\n\n def get_url(html_string):\n \"\"\"Find the URL from the <audio><source>.... tag\"\"\"\n\n url = re.findall(r\"<source src=\\\"(.+?)\\\"\", html_string)\n if len(url) == 1:\n # strip off the last 4 characters to cater for the #t=0 in the URL\n # which urlretrieve flags as invalid\n return url[0][:-4]\n return None\n\n \"\"\"Given a Overcast source URL fetch the file it points to\"\"\"\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) \"\n \"AppleWebKit/537.11 (KHTML, like Gecko) \"\n \"Chrome/23.0.1271.64 Safari/537.11\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Charset\": \"ISO-8859-1,utf-8;q=0.7,*;q=0.3\",\n \"Accept-Encoding\": \"none\",\n \"Accept-Language\": \"en-US,en;q=0.8\",\n \"Connection\": \"keep-alive\",\n }\n req = Request(source_url, None, headers)\n source_data = urlopen(req).read().decode('utf-8')\n title = get_title(source_data)\n url = get_url(source_data)\n\n if url is None or title is None:\n sys.exit(\"Could not find parse URL\")\n if not os.path.exists(target_location):\n req = requests.get(url)\n file = open(target_location, 'wb')\n for chunk in req.iter_content(100000):\n file.write(chunk)\n file.close()"
},
{
"identifier": "XitterDownload",
"path": "nostr_dvm/utils/scrapper/media_scrapper.py",
"snippet": "def XitterDownload(source_url, target_location):\n script_dir = os.path.dirname(os.path.realpath(__file__))\n request_details_file = f\"{script_dir}{os.sep}request_details.json\"\n request_details = json.load(open(request_details_file, \"r\")) # test\n features, variables = request_details[\"features\"], request_details[\"variables\"]\n\n def get_tokens(tweet_url):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0\",\n \"Accept\": \"*/*\",\n \"Accept-Language\": \"de,en-US;q=0.7,en;q=0.3\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"TE\": \"trailers\",\n }\n\n html = requests.get(tweet_url, headers=headers)\n\n assert (\n html.status_code == 200\n ), f\"Failed to get tweet page. If you are using the correct Twitter URL this suggests a bug in the script. Please open a GitHub issue and copy and paste this message. Status code: {html.status_code}. Tweet url: {tweet_url}\"\n\n mainjs_url = re.findall(\n r\"https://abs.twimg.com/responsive-web/client-web-legacy/main.[^\\.]+.js\",\n html.text,\n )\n\n assert (\n mainjs_url is not None and len(mainjs_url) > 0\n ), f\"Failed to find main.js file. If you are using the correct Twitter URL this suggests a bug in the script. Please open a GitHub issue and copy and paste this message. Tweet url: {tweet_url}\"\n\n mainjs_url = mainjs_url[0]\n mainjs = requests.get(mainjs_url)\n\n assert (\n mainjs.status_code == 200\n ), f\"Failed to get main.js file. If you are using the correct Twitter URL this suggests a bug in the script. Please open a GitHub issue and copy and paste this message. Status code: {mainjs.status_code}. Tweet url: {tweet_url}\"\n\n bearer_token = re.findall(r'AAAAAAAAA[^\"]+', mainjs.text)\n\n assert (\n bearer_token is not None and len(bearer_token) > 0\n ), f\"Failed to find bearer token. If you are using the correct Twitter URL this suggests a bug in the script. Please open a GitHub issue and copy and paste this message. Tweet url: {tweet_url}, main.js url: {mainjs_url}\"\n\n bearer_token = bearer_token[0]\n\n # get the guest token\n with requests.Session() as s:\n s.headers.update(\n {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0\",\n \"accept\": \"*/*\",\n \"accept-language\": \"de,en-US;q=0.7,en;q=0.3\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"te\": \"trailers\",\n }\n )\n\n s.headers.update({\"authorization\": f\"Bearer {bearer_token}\"})\n\n # activate bearer token and get guest token\n guest_token = s.post(\"https://api.twitter.com/1.1/guest/activate.json\").json()[\n \"guest_token\"\n ]\n\n assert (\n guest_token is not None\n ), f\"Failed to find guest token. If you are using the correct Twitter URL this suggests a bug in the script. Please open a GitHub issue and copy and paste this message. Tweet url: {tweet_url}, main.js url: {mainjs_url}\"\n\n return bearer_token, guest_token\n\n def get_details_url(tweet_id, features, variables):\n # create a copy of variables - we don't want to modify the original\n variables = {**variables}\n variables[\"tweetId\"] = tweet_id\n\n return f\"https://twitter.com/i/api/graphql/0hWvDhmW8YQ-S_ib3azIrw/TweetResultByRestId?variables={urllib.parse.quote(json.dumps(variables))}&features={urllib.parse.quote(json.dumps(features))}\"\n # return f\"https://api.twitter.com/graphql/ncDeACNGIApPMaqGVuF_rw/TweetResultByRestId?variables={urllib.parse.quote(json.dumps(variables))}&features={urllib.parse.quote(json.dumps(features))}\"\n\n def get_tweet_details(tweet_url, guest_token, bearer_token):\n tweet_id = re.findall(r\"(?<=status/)\\d+\", tweet_url)\n\n assert (\n tweet_id is not None and len(tweet_id) == 1\n ), f\"Could not parse tweet id from your url. Make sure you are using the correct url. If you are, then file a GitHub issue and copy and paste this message. Tweet url: {tweet_url}\"\n\n tweet_id = tweet_id[0]\n\n # the url needs a url encoded version of variables and features as a query string\n url = get_details_url(tweet_id, features, variables)\n\n details = requests.get(\n url,\n headers={\n \"authorization\": f\"Bearer {bearer_token}\",\n \"x-guest-token\": guest_token,\n },\n )\n\n max_retries = 10\n cur_retry = 0\n while details.status_code == 400 and cur_retry < max_retries:\n try:\n error_json = json.loads(details.text)\n except json.JSONDecodeError:\n assert (\n False\n ), f\"Failed to parse json from details error. details text: {details.text} If you are using the correct Twitter URL this suggests a bug in the script. Please open a GitHub issue and copy and paste this message. Status code: {details.status_code}. Tweet url: {tweet_url}\"\n\n assert (\n \"errors\" in error_json\n ), f\"Failed to find errors in details error json. If you are using the correct Twitter URL this suggests a bug in the script. Please open a GitHub issue and copy and paste this message. Status code: {details.status_code}. Tweet url: {tweet_url}\"\n\n needed_variable_pattern = re.compile(r\"Variable '([^']+)'\")\n needed_features_pattern = re.compile(\n r'The following features cannot be null: ([^\"]+)'\n )\n\n for error in error_json[\"errors\"]:\n needed_vars = needed_variable_pattern.findall(error[\"message\"])\n for needed_var in needed_vars:\n variables[needed_var] = True\n\n needed_features = needed_features_pattern.findall(error[\"message\"])\n for nf in needed_features:\n for feature in nf.split(\",\"):\n features[feature.strip()] = True\n\n url = get_details_url(tweet_id, features, variables)\n\n details = requests.get(\n url,\n headers={\n \"authorization\": f\"Bearer {bearer_token}\",\n \"x-guest-token\": guest_token,\n },\n )\n\n cur_retry += 1\n\n if details.status_code == 200:\n # save new variables\n request_details[\"variables\"] = variables\n request_details[\"features\"] = features\n\n with open(request_details_file, \"w\") as f:\n json.dump(request_details, f, indent=4)\n\n assert (\n details.status_code == 200\n ), f\"Failed to get tweet details. If you are using the correct Twitter URL this suggests a bug in the script. Please open a GitHub issue and copy and paste this message. Status code: {details.status_code}. Tweet url: {tweet_url}\"\n\n return details\n\n def get_tweet_status_id(tweet_url):\n sid_patern = r\"https://x\\.com/[^/]+/status/(\\d+)\"\n if tweet_url[len(tweet_url) - 1] != \"/\":\n tweet_url = tweet_url + \"/\"\n\n match = re.findall(sid_patern, tweet_url)\n if len(match) == 0:\n print(\"error, could not get status id from this tweet url :\", tweet_url)\n exit()\n status_id = match[0]\n return status_id\n\n def get_associated_media_id(j, tweet_url):\n sid = get_tweet_status_id(tweet_url)\n pattern = (\n r'\"expanded_url\"\\s*:\\s*\"https://x\\.com/[^/]+/status/'\n + sid\n + r'/[^\"]+\",\\s*\"id_str\"\\s*:\\s*\"\\d+\",'\n )\n matches = re.findall(pattern, j)\n if len(matches) > 0:\n target = matches[0]\n target = target[0: len(target) - 1] # remove the coma at the end\n return json.loads(\"{\" + target + \"}\")[\"id_str\"]\n return None\n\n def extract_mp4s(j, tweet_url, target_all_mp4s=False):\n # pattern looks like https://video.twimg.com/amplify_video/1638969830442237953/vid/1080x1920/lXSFa54mAVp7KHim.mp4?tag=16 or https://video.twimg.com/ext_tw_video/1451958820348080133/pu/vid/720x1280/GddnMJ7KszCQQFvA.mp4?tag=12\n amplitude_pattern = re.compile(\n r\"(https://video.twimg.com/amplify_video/(\\d+)/vid/(\\d+x\\d+)/[^.]+.mp4\\?tag=\\d+)\"\n )\n ext_tw_pattern = re.compile(\n r\"(https://video.twimg.com/ext_tw_video/(\\d+)/pu/vid/(avc1/)?(\\d+x\\d+)/[^.]+.mp4\\?tag=\\d+)\"\n )\n # format - https://video.twimg.com/tweet_video/Fvh6brqWAAQhU9p.mp4\n tweet_video_pattern = re.compile(r'https://video.twimg.com/tweet_video/[^\"]+')\n\n # https://video.twimg.com/ext_tw_video/1451958820348080133/pu/pl/b-CiC-gZClIwXgDz.m3u8?tag=12&container=fmp4\n container_pattern = re.compile(r'https://video.twimg.com/[^\"]*container=fmp4')\n media_id = get_associated_media_id(j, tweet_url)\n # find all the matches\n matches = amplitude_pattern.findall(j)\n matches += ext_tw_pattern.findall(j)\n container_matches = container_pattern.findall(j)\n\n tweet_video_matches = tweet_video_pattern.findall(j)\n\n if len(matches) == 0 and len(tweet_video_matches) > 0:\n return tweet_video_matches\n\n results = {}\n\n for match in matches:\n url, tweet_id, _, resolution = match\n if tweet_id not in results:\n results[tweet_id] = {\"resolution\": resolution, \"url\": url}\n else:\n # if we already have a higher resolution video, then don't overwrite it\n my_dims = [int(x) for x in resolution.split(\"x\")]\n their_dims = [int(x) for x in results[tweet_id][\"resolution\"].split(\"x\")]\n\n if my_dims[0] * my_dims[1] > their_dims[0] * their_dims[1]:\n results[tweet_id] = {\"resolution\": resolution, \"url\": url}\n\n if media_id:\n all_urls = []\n for twid in results:\n all_urls.append(results[twid][\"url\"])\n all_urls += container_matches\n\n url_with_media_id = []\n for url in all_urls:\n if url.__contains__(media_id):\n url_with_media_id.append(url)\n\n if len(url_with_media_id) > 0:\n return url_with_media_id\n\n if len(container_matches) > 0 and not target_all_mp4s:\n return container_matches\n\n if target_all_mp4s:\n urls = [x[\"url\"] for x in results.values()]\n urls += container_matches\n return urls\n return [x[\"url\"] for x in results.values()]\n\n def extract_mp4_fmp4(j):\n \"\"\"\n Extract the URL of the MP4 video from the detailed information of the tweet.\n Returns a list of URLs, tweet IDs, and resolution information (dictionary type)\n and a list of tweet IDs as return values.\n \"\"\"\n\n # Empty list to store tweet IDs\n tweet_id_list = []\n mp4_info_dict_list = []\n amplitude_pattern = re.compile(\n r\"(https://video.twimg.com/amplify_video/(\\d+)/vid/(avc1/)(\\d+x\\d+)/[^.]+.mp4\\?tag=\\d+)\"\n )\n ext_tw_pattern = re.compile(\n r\"(https://video.twimg.com/ext_tw_video/(\\d+)/pu/vid/(avc1/)?(\\d+x\\d+)/[^.]+.mp4\\?tag=\\d+)\"\n )\n tweet_video_pattern = re.compile(r'https://video.twimg.com/tweet_video/[^\"]+')\n container_pattern = re.compile(r'https://video.twimg.com/[^\"]*container=fmp4')\n\n matches = amplitude_pattern.findall(j)\n matches += ext_tw_pattern.findall(j)\n container_matches = container_pattern.findall(j)\n tweet_video_url_list = tweet_video_pattern.findall(j)\n\n for match in matches:\n url, tweet_id, _, resolution = match\n tweet_id_list.append(int(tweet_id))\n mp4_info_dict_list.append({\"resolution\": resolution, \"url\": url})\n\n tweet_id_list = list(dict.fromkeys(tweet_id_list))\n\n if len(container_matches) > 0:\n for url in container_matches:\n mp4_info_dict_list.append({\"url\": url})\n\n return tweet_id_list, mp4_info_dict_list, tweet_video_url_list\n\n def download_parts(url, output_filename):\n resp = requests.get(url, stream=True)\n pattern = re.compile(r\"(/[^\\n]*/(\\d+x\\d+)/[^\\n]*container=fmp4)\")\n matches = pattern.findall(resp.text)\n max_res = 0\n max_res_url = None\n\n for match in matches:\n url, resolution = match\n width, height = resolution.split(\"x\")\n res = int(width) * int(height)\n if res > max_res:\n max_res = res\n max_res_url = url\n\n assert (\n max_res_url is not None\n ), f\"Could not find a url to download from. Make sure you are using the correct url. If you are, then file a GitHub issue and copy and paste this message. Tweet url: {url}\"\n\n video_part_prefix = \"https://video.twimg.com\"\n\n resp = requests.get(video_part_prefix + max_res_url, stream=True)\n\n mp4_pattern = re.compile(r\"(/[^\\n]*\\.mp4)\")\n mp4_parts = mp4_pattern.findall(resp.text)\n\n assert (\n len(mp4_parts) == 1\n ), f\"There should be exactly 1 mp4 container at this point. Instead, found {len(mp4_parts)}. Please open a GitHub issue and copy and paste this message into it. Tweet url: {url}\"\n\n mp4_url = video_part_prefix + mp4_parts[0]\n\n m4s_part_pattern = re.compile(r\"(/[^\\n]*\\.m4s)\")\n m4s_parts = m4s_part_pattern.findall(resp.text)\n\n with open(output_filename, \"wb\") as f:\n r = requests.get(mp4_url, stream=True)\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n f.flush()\n\n for part in m4s_parts:\n part_url = video_part_prefix + part\n r = requests.get(part_url, stream=True)\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n f.flush()\n\n return True\n\n def repost_check(j, exclude_replies=True):\n try:\n reply_index = j.index('\"conversationthread-')\n except ValueError:\n reply_index = len(j)\n if exclude_replies:\n j = j[0:reply_index]\n\n # We use this regular expression to extract the source status\n source_status_pattern = r'\"source_status_id_str\"\\s*:\\s*\"\\d+\"'\n matches = re.findall(source_status_pattern, j)\n\n if len(matches) > 0 and exclude_replies:\n # We extract the source status id (ssid)\n ssid = json.loads(\"{\" + matches[0] + \"}\")[\"source_status_id_str\"]\n # We plug it in this regular expression to find expanded_url (the original tweet url)\n expanded_url_pattern = (\n r'\"expanded_url\"\\s*:\\s*\"https://x\\.com/[^/]+/status/' + ssid + '[^\"]+\"'\n )\n matches2 = re.findall(expanded_url_pattern, j)\n\n if len(matches2) > 0:\n # We extract the url and return it\n status_url = json.loads(\"{\" + matches2[0] + \"}\")[\"expanded_url\"]\n return status_url\n\n if not exclude_replies:\n # If we include replies we'll have to get all ssids and remove duplicates\n ssids = []\n for match in matches:\n ssids.append(json.loads(\"{\" + match + \"}\")[\"source_status_id_str\"])\n # we remove duplicates (this line is messy but it's the easiest way to do it)\n ssids = list(set(ssids))\n if len(ssids) > 0:\n for ssid in ssids:\n expanded_url_pattern = (\n r'\"expanded_url\"\\s*:\\s*\"https://x\\.com/[^/]+/status/'\n + ssid\n + '[^\"]+\"'\n )\n matches2 = re.findall(expanded_url_pattern, j)\n if len(matches2) > 0:\n status_urls = []\n for match in matches2:\n status_urls.append(\n json.loads(\"{\" + match + \"}\")[\"expanded_url\"]\n )\n # We remove duplicates another time\n status_urls = list(set(status_urls))\n return status_urls\n\n # If we don't find source_status_id_str, the tweet doesn't feature a reposted video\n return None\n\n def download_video_from_x(tweet_url, output_file, target_all_videos=False):\n bearer_token, guest_token = get_tokens(tweet_url)\n resp = get_tweet_details(tweet_url, guest_token, bearer_token)\n mp4s = extract_mp4s(resp.text, tweet_url, target_all_videos)\n\n if target_all_videos:\n video_counter = 1\n original_urls = repost_check(resp.text, exclude_replies=False)\n\n if len(original_urls) > 0:\n for url in original_urls:\n download_video_from_x(\n url, output_file.replace(\".mp4\", f\"_{video_counter}.mp4\")\n )\n video_counter += 1\n if len(mp4s) > 0:\n for mp4 in mp4s:\n output_file = output_file.replace(\".mp4\", f\"_{video_counter}.mp4\")\n if \"container\" in mp4:\n download_parts(mp4, output_file)\n\n else:\n # use a stream to download the file\n r = requests.get(mp4, stream=True)\n with open(output_file, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n f.flush()\n video_counter += 1\n else:\n original_url = repost_check(resp.text)\n\n if original_url:\n download_video_from_x(original_url, output_file)\n else:\n assert (\n len(mp4s) > 0\n ), f\"Could not find any mp4s to download. Make sure you are using the correct url. If you are, then file a GitHub issue and copy and paste this message. Tweet url: {tweet_url}\"\n\n mp4 = mp4s[0]\n if \"container\" in mp4:\n download_parts(mp4, output_file)\n else:\n # use a stream to download the file\n r = requests.get(mp4, stream=True)\n with open(output_file, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n f.flush()\n return target_location\n\n return download_video_from_x(source_url, target_location)"
},
{
"identifier": "TiktokDownloadAll",
"path": "nostr_dvm/utils/scrapper/media_scrapper.py",
"snippet": "def TiktokDownloadAll(linkList, path) -> str:\n parseDict = getDict()\n cookies, headers, data = createHeader(parseDict)\n # linkList = getLinkDict()['tiktok']\n for i in linkList:\n try:\n data['url'] = i\n result = TikTokDownload(cookies, headers, data, \"tiktok\", path) # str(linkList.index(i))\n return result\n except IndexError:\n parseDict = getDict()\n cookies, headers, data = createHeader(parseDict)\n except Exception as err:\n print(err)\n exit(1)"
},
{
"identifier": "InstagramDownload",
"path": "nostr_dvm/utils/scrapper/media_scrapper.py",
"snippet": "def InstagramDownload(url, name, path) -> str:\n obj = instaloader.Instaloader()\n post = instaloader.Post.from_shortcode(obj.context, url.split(\"/\")[-2])\n photo_url = post.url\n video_url = post.video_url\n print(video_url)\n if video_url:\n response = requests.get(video_url)\n with open(path + \"\\\\\" + name + \".mp4\", \"wb\") as f:\n f.write(response.content)\n return path + \"\\\\\" + name + \".mp4\"\n elif photo_url:\n response = requests.get(photo_url)\n with open(path + \"\\\\\" + name + \".jpg\", \"wb\") as f:\n f.write(response.content)\n return path + \"\\\\\" + name + \".jpg\""
},
{
"identifier": "YouTubeDownload",
"path": "nostr_dvm/utils/scrapper/media_scrapper.py",
"snippet": "def YouTubeDownload(link, path, audio_only=True):\n youtubeObject = YouTube(link)\n if audio_only:\n youtubeObject = youtubeObject.streams.get_audio_only()\n youtubeObject.download(path, \"yt.mp3\")\n print(\"Download is completed successfully\")\n return path + \"yt.mp3\"\n else:\n youtubeObject = youtubeObject.streams.get_highest_resolution()\n youtubeObject.download(path, \"yt.mp4\")\n print(\"Download is completed successfully\")\n return path + \"yt.mp4\""
}
] | import os
import urllib
import ffmpegio
import requests
from datetime import time
from urllib.parse import urlparse
from decord import AudioReader, cpu
from nostr_dvm.utils.nostr_utils import get_event_by_id
from nostr_dvm.utils.scrapper.media_scrapper import OvercastDownload, XitterDownload, TiktokDownloadAll, \
InstagramDownload, YouTubeDownload
from moviepy.editor import VideoFileClip
from moviepy.editor import VideoFileClip | 9,121 |
def get_Instagram(input_value, start, end):
filepath = os.path.abspath(os.curdir + r'/outputs/')
try:
filename = download_instagram(input_value, filepath)
print(filename)
except Exception as e:
print(e)
return "", start, end
return filename, start, end
def get_Twitter(input_value, start, end):
filepath = os.path.abspath(os.curdir) + r'/outputs/'
cleanlink = str(input_value).replace("twitter.com", "x.com")
try:
filename = download_twitter(cleanlink, filepath)
except Exception as e:
print(e)
return "", start, end
return filename, start, end
def get_youtube(input_value, start, end, audioonly=True):
filepath = os.path.abspath(os.curdir) + r'/outputs/'
print(filepath)
filename = ""
try:
filename = download_youtube(input_value, filepath, audioonly)
except Exception as e:
print("Youtube " + str(e))
return filename, start, end
try:
o = urlparse(input_value)
q = urllib.parse.parse_qs(o.query)
if start == 0.0:
if o.query.find('?t=') != -1:
start = q['t'][0] # overwrite from link.. why not..
print("Setting start time automatically to " + start)
if end > 0.0:
end = float(q['t'][0]) + end
print("Moving end time automatically to " + str(end))
except Exception as e:
print(e)
return filename, start, end
return filename, start, end
def get_media_link(url) -> (str, str):
req = requests.get(url)
content_type = req.headers['content-type']
print(content_type)
if content_type == 'audio/x-wav' or str(url).lower().endswith(".wav"):
ext = "wav"
file_type = "audio"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'audio/mpeg' or str(url).lower().endswith(".mp3"):
ext = "mp3"
file_type = "audio"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'audio/ogg' or str(url).lower().endswith(".ogg"):
ext = "ogg"
file_type = "audio"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'video/mp4' or str(url).lower().endswith(".mp4"):
ext = "mp4"
file_type = "video"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'video/avi' or str(url).lower().endswith(".avi"):
ext = "avi"
file_type = "video"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'video/quicktime' or str(url).lower().endswith(".mov"):
ext = "mov"
file_type = "video"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
else:
print(str(url).lower())
return None, None
def download_overcast(source_url, target_location):
result = OvercastDownload(source_url, target_location)
return result
def download_twitter(videourl, path):
result = XitterDownload(videourl, path + "x.mp4")
return result
def download_tik_tok(videourl, path):
result = TiktokDownloadAll([videourl], path)
return result
def download_instagram(videourl, path):
result = InstagramDownload(videourl, "insta", path)
return result
def download_youtube(link, path, audioonly=True):
|
def input_data_file_duration(event, dvm_config, client, start=0, end=0):
# print("[" + dvm_config.NIP89.NAME + "] Getting Duration of the Media file..")
input_value = ""
input_type = ""
for tag in event.tags():
if tag.as_vec()[0] == 'i':
input_value = tag.as_vec()[1]
input_type = tag.as_vec()[2]
if input_type == "text":
return len(input_value)
if input_type == "event": # NIP94 event
evt = get_event_by_id(input_value, client=client, config=dvm_config)
if evt is not None:
input_value, input_type = check_nip94_event_for_media(evt, input_value, input_type)
if input_type == "text":
# For now, ingore length of any text, just return 1.
return len(input_value)
if input_type == "url":
source_type = check_source_type(input_value)
filename, start, end, type = get_file_start_end_type(input_value, source_type, start, end, True)
if type != "audio" and type != "video":
return 1
if filename == "" or filename is None:
return 0
try:
file_reader = AudioReader(filename, ctx=cpu(0), mono=False)
duration = float(file_reader.duration())
except Exception as e:
print(e)
return 0
print("Original Duration of the Media file: " + str(duration))
start_time, end_time, new_duration = (
convert_media_length(start, end, duration))
print("New Duration of the Media file: " + str(new_duration))
return new_duration
return 1
def organize_input_media_data(input_value, input_type, start, end, dvm_config, client, process=True,
media_format="audio/mp3") -> str:
if input_type == "event": # NIP94 event
evt = get_event_by_id(input_value, client=client, config=dvm_config)
if evt is not None:
input_value, input_type = check_nip94_event_for_media(evt, input_value, input_type)
if input_type == "url":
source_type = check_source_type(input_value)
audio_only = True
if media_format.split('/')[0] == "video":
audio_only = False
filename, start, end, type = get_file_start_end_type(input_value, source_type, start, end, audio_only)
if filename == "" or filename is None:
return ""
if type != "audio" and type != "video":
return filename
try:
file_reader = AudioReader(filename, ctx=cpu(0), mono=False)
duration = float(file_reader.duration())
except Exception as e:
print(e)
try:
clip = VideoFileClip(filename)
duration = clip.duration
except Exception as e:
print(e)
return ""
print("Original Duration of the Media file: " + str(duration))
start_time, end_time, new_duration = (
convert_media_length(start, end, duration))
print("New Duration of the Media file: " + str(new_duration))
# TODO if already in a working format and time is 0 0, dont convert
# for now, we cut and convert all files to mp3
if process:
# for now we cut and convert all files to mp3
file = r'processed.' + str(media_format.split('/')[1])
final_filename = os.path.abspath(os.curdir + r'/outputs/' + file)
if media_format.split('/')[0] == "audio":
print("Converting Audio from " + str(start_time) + " until " + str(end_time))
fs, x = ffmpegio.audio.read(filename, ss=start_time, to=end_time, sample_fmt='dbl', ac=1)
ffmpegio.audio.write(final_filename, fs, x, overwrite=True)
elif media_format.split('/')[0] == "video":
print("Converting Video from " + str(start_time) + " until " + str(end_time))
ffmpegio.transcode(filename, final_filename, overwrite=True, show_log=True)
elif media_format.split('/')[1] == "gif":
print("Converting Video from " + str(start_time) + " until " + str(end_time))
videoClip = VideoFileClip(filename)
videoClip.write_gif(final_filename, program="ffmpeg")
print(final_filename)
return final_filename
else:
return filename
def check_nip94_event_for_media(evt, input_value, input_type):
# Parse NIP94 event for url, if found, use it.
if evt.kind() == 1063:
for tag in evt.tags():
if tag.as_vec()[0] == 'url':
input_type = "url"
input_value = tag.as_vec()[1]
return input_value, input_type
return input_value, input_type
def convert_media_length(start: float, end: float, duration: float):
if end == 0.0:
end_time = duration
elif end > duration:
end_time = duration
else:
end_time = end
if start <= 0.0 or start > end_time:
start_time = 0.0
else:
start_time = start
dur = end_time - start_time
return start_time, end_time, dur
def get_file_start_end_type(url, source_type, start, end, audio_only=True) -> (str, str):
# Overcast
if source_type == "overcast":
name, start, end = get_overcast(url, start, end)
return name, start, end, "audio"
# Youtube
elif source_type == "youtube":
name, start, end = get_youtube(url, start, end, audio_only)
return name, start, end, "audio"
# Xitter
elif source_type == "xitter":
name, start, end = get_Twitter(url, start, end)
return name, start, end, "video"
# Tiktok
elif source_type == "tiktok":
name, start, end = get_TikTok(url, start, end)
return name, start, end, "video"
# Instagram
elif source_type == "instagram":
name, start, end = get_Instagram(url, start, end)
if name.endswith("jpg"):
type = "image"
else:
type = "video"
return name, start, end, type
# A file link
else:
filename, filetype = get_media_link(url)
return filename, start, end, filetype
def media_source(source_type):
if source_type == "overcast":
return "audio"
elif source_type == "youtube":
return "audio"
elif source_type == "xitter":
return "video"
elif source_type == "tiktok":
return "video"
elif source_type == "instagram":
return "video"
else:
return "url"
def check_source_type(url):
if str(url).startswith("https://overcast.fm/"):
return "overcast"
elif str(url).replace("http://", "").replace("https://", "").replace(
"www.", "").replace("youtu.be/", "youtube.com?v=")[0:11] == "youtube.com":
return "youtube"
elif str(url).startswith("https://x.com") or str(url).startswith("https://twitter.com"):
return "xitter"
elif str(url).startswith("https://vm.tiktok.com") or str(url).startswith(
"https://www.tiktok.com") or str(url).startswith("https://m.tiktok.com"):
return "tiktok"
elif str(url).startswith("https://www.instagram.com") or str(url).startswith(
"https://instagram.com"):
return "instagram"
else:
return "url"
def get_overcast(input_value, start, end):
filename = os.path.abspath(os.curdir + r'/outputs/originalaudio.mp3')
print("Found overcast.fm Link.. downloading")
start_time = start
end_time = end
download_overcast(input_value, filename)
finaltag = str(input_value).replace("https://overcast.fm/", "").split('/')
if start == 0.0:
if len(finaltag) > 1:
t = time.strptime(finaltag[1], "%H:%M:%S")
seconds = t.tm_hour * 60 * 60 + t.tm_min * 60 + t.tm_sec
start_time = float(seconds)
print("Setting start time automatically to " + str(start_time))
if end > 0.0:
end_time = float(seconds + end)
print("Moving end time automatically to " + str(end_time))
return filename, start_time, end_time
def get_TikTok(input_value, start, end):
filepath = os.path.abspath(os.curdir + r'/outputs/')
try:
filename = download_tik_tok(input_value, filepath)
print(filename)
except Exception as e:
print(e)
return "", start, end
return filename, start, end
def get_Instagram(input_value, start, end):
filepath = os.path.abspath(os.curdir + r'/outputs/')
try:
filename = download_instagram(input_value, filepath)
print(filename)
except Exception as e:
print(e)
return "", start, end
return filename, start, end
def get_Twitter(input_value, start, end):
filepath = os.path.abspath(os.curdir) + r'/outputs/'
cleanlink = str(input_value).replace("twitter.com", "x.com")
try:
filename = download_twitter(cleanlink, filepath)
except Exception as e:
print(e)
return "", start, end
return filename, start, end
def get_youtube(input_value, start, end, audioonly=True):
filepath = os.path.abspath(os.curdir) + r'/outputs/'
print(filepath)
filename = ""
try:
filename = download_youtube(input_value, filepath, audioonly)
except Exception as e:
print("Youtube " + str(e))
return filename, start, end
try:
o = urlparse(input_value)
q = urllib.parse.parse_qs(o.query)
if start == 0.0:
if o.query.find('?t=') != -1:
start = q['t'][0] # overwrite from link.. why not..
print("Setting start time automatically to " + start)
if end > 0.0:
end = float(q['t'][0]) + end
print("Moving end time automatically to " + str(end))
except Exception as e:
print(e)
return filename, start, end
return filename, start, end
def get_media_link(url) -> (str, str):
req = requests.get(url)
content_type = req.headers['content-type']
print(content_type)
if content_type == 'audio/x-wav' or str(url).lower().endswith(".wav"):
ext = "wav"
file_type = "audio"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'audio/mpeg' or str(url).lower().endswith(".mp3"):
ext = "mp3"
file_type = "audio"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'audio/ogg' or str(url).lower().endswith(".ogg"):
ext = "ogg"
file_type = "audio"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'video/mp4' or str(url).lower().endswith(".mp4"):
ext = "mp4"
file_type = "video"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'video/avi' or str(url).lower().endswith(".avi"):
ext = "avi"
file_type = "video"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'video/quicktime' or str(url).lower().endswith(".mov"):
ext = "mov"
file_type = "video"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
else:
print(str(url).lower())
return None, None
def download_overcast(source_url, target_location):
result = OvercastDownload(source_url, target_location)
return result
def download_twitter(videourl, path):
result = XitterDownload(videourl, path + "x.mp4")
return result
def download_tik_tok(videourl, path):
result = TiktokDownloadAll([videourl], path)
return result
def download_instagram(videourl, path):
result = InstagramDownload(videourl, "insta", path)
return result
def download_youtube(link, path, audioonly=True): | return YouTubeDownload(link, path, audio_only=audioonly) | 5 | 2023-11-17 18:32:56+00:00 | 12k |
embrake/Aquilify | aquilify/wrappers/request.py | [
{
"identifier": "AwaitableOrContextManager",
"path": "aquilify/utils/_utils.py",
"snippet": "class AwaitableOrContextManager(Protocol[T_co]):\n def __await__(self) -> typing.Generator[typing.Any, None, T_co]:\n ... # pragma: no cover\n\n async def __aenter__(self) -> T_co:\n ... # pragma: no cover\n\n async def __aexit__(\n self,\n __exc_type: typing.Optional[typing.Type[BaseException]],\n __exc_value: typing.Optional[BaseException],\n __traceback: typing.Optional[TracebackType],\n ) -> typing.Union[bool, None]:\n ... # pragma: no cover"
},
{
"identifier": "AwaitableOrContextManagerWrapper",
"path": "aquilify/utils/_utils.py",
"snippet": "class AwaitableOrContextManagerWrapper(typing.Generic[SupportsAsyncCloseType]):\n __slots__ = (\"aw\", \"entered\")\n\n def __init__(self, aw: typing.Awaitable[SupportsAsyncCloseType]) -> None:\n self.aw = aw\n\n def __await__(self) -> typing.Generator[typing.Any, None, SupportsAsyncCloseType]:\n return self.aw.__await__()\n\n async def __aenter__(self) -> SupportsAsyncCloseType:\n self.entered = await self.aw\n return self.entered\n\n async def __aexit__(self, *args: typing.Any) -> typing.Union[None, bool]:\n await self.entered.close()\n return None"
},
{
"identifier": "URL",
"path": "aquilify/datastructure/core.py",
"snippet": "class URL:\n def __init__(\n self,\n url: str = \"\",\n scope: typing.Optional[Scope] = None,\n **components: typing.Any,\n ) -> None:\n if scope is not None:\n assert not url, 'Cannot set both \"url\" and \"scope\".'\n assert not components, 'Cannot set both \"scope\" and \"**components\".'\n scheme = scope.get(\"scheme\", \"http\")\n server = scope.get(\"server\", None)\n path = scope.get(\"root_path\", \"\") + scope[\"path\"]\n query_string = scope.get(\"query_string\", b\"\")\n\n host_header = None\n for key, value in scope[\"headers\"]:\n if key == b\"host\":\n host_header = value.decode(\"latin-1\")\n break\n\n if host_header is not None:\n url = f\"{scheme}://{host_header}{path}\"\n elif server is None:\n url = path\n else:\n host, port = server\n default_port = {\"http\": 80, \"https\": 443, \"ws\": 80, \"wss\": 443}[scheme]\n if port == default_port:\n url = f\"{scheme}://{host}{path}\"\n else:\n url = f\"{scheme}://{host}:{port}{path}\"\n\n if query_string:\n url += \"?\" + query_string.decode()\n elif components:\n assert not url, 'Cannot set both \"url\" and \"**components\".'\n url = URL(\"\").replace(**components).components.geturl()\n\n self._url = url\n\n @property\n def components(self) -> SplitResult:\n if not hasattr(self, \"_components\"):\n self._components = urlsplit(self._url)\n return self._components\n\n @property\n def scheme(self) -> str:\n return self.components.scheme\n\n @property\n def netloc(self) -> str:\n return self.components.netloc\n\n @property\n def path(self) -> str:\n return self.components.path\n\n @property\n def query(self) -> str:\n return self.components.query\n\n @property\n def fragment(self) -> str:\n return self.components.fragment\n\n @property\n def username(self) -> typing.Union[None, str]:\n return self.components.username\n\n @property\n def password(self) -> typing.Union[None, str]:\n return self.components.password\n\n @property\n def hostname(self) -> typing.Union[None, str]:\n return self.components.hostname\n\n @property\n def port(self) -> typing.Optional[int]:\n return self.components.port\n\n @property\n def is_secure(self) -> bool:\n return self.scheme in (\"https\", \"wss\")\n\n def replace(self, **kwargs: typing.Any) -> \"URL\":\n if (\n \"username\" in kwargs\n or \"password\" in kwargs\n or \"hostname\" in kwargs\n or \"port\" in kwargs\n ):\n hostname = kwargs.pop(\"hostname\", None)\n port = kwargs.pop(\"port\", self.port)\n username = kwargs.pop(\"username\", self.username)\n password = kwargs.pop(\"password\", self.password)\n\n if hostname is None:\n netloc = self.netloc\n _, _, hostname = netloc.rpartition(\"@\")\n\n if hostname[-1] != \"]\":\n hostname = hostname.rsplit(\":\", 1)[0]\n\n netloc = hostname\n if port is not None:\n netloc += f\":{port}\"\n if username is not None:\n userpass = username\n if password is not None:\n userpass += f\":{password}\"\n netloc = f\"{userpass}@{netloc}\"\n\n kwargs[\"netloc\"] = netloc\n\n components = self.components._replace(**kwargs)\n return self.__class__(components.geturl())\n\n def include_query_params(self, **kwargs: typing.Any) -> \"URL\":\n params = MultiDict(parse_qsl(self.query, keep_blank_values=True))\n params.update({str(key): str(value) for key, value in kwargs.items()})\n query = urlencode(params.multi_items())\n return self.replace(query=query)\n\n def replace_query_params(self, **kwargs: typing.Any) -> \"URL\":\n query = urlencode([(str(key), str(value)) for key, value in kwargs.items()])\n return self.replace(query=query)\n\n def remove_query_params(\n self, keys: typing.Union[str, typing.Sequence[str]]\n ) -> \"URL\":\n if isinstance(keys, str):\n keys = [keys]\n params = MultiDict(parse_qsl(self.query, keep_blank_values=True))\n for key in keys:\n params.pop(key, None)\n query = urlencode(params.multi_items())\n return self.replace(query=query)\n\n def __eq__(self, other: typing.Any) -> bool:\n return str(self) == str(other)\n\n def __str__(self) -> str:\n return self._url\n\n def __repr__(self) -> str:\n url = str(self)\n if self.password:\n url = str(self.replace(password=\"********\"))\n return f\"{self.__class__.__name__}({repr(url)})\""
},
{
"identifier": "Address",
"path": "aquilify/datastructure/core.py",
"snippet": "class Address(typing.NamedTuple):\n host: str\n port: int"
},
{
"identifier": "FormData",
"path": "aquilify/datastructure/core.py",
"snippet": "class FormData(ImmutableMultiDict[str, typing.Union[UploadFile, str]]):\n def __init__(\n self,\n *args: typing.Union[\n \"FormData\",\n typing.Mapping[str, typing.Union[str, UploadFile]],\n typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]],\n ],\n **kwargs: typing.Union[str, UploadFile],\n ) -> None:\n super().__init__(*args, **kwargs)\n\n async def close(self) -> None:\n for key, value in self.multi_items():\n if isinstance(value, UploadFile):\n await value.close()"
},
{
"identifier": "Headers",
"path": "aquilify/datastructure/core.py",
"snippet": "class Headers(typing.Mapping[str, str]):\n def __init__(\n self,\n headers: typing.Optional[typing.Mapping[str, str]] = None,\n raw: typing.Optional[typing.List[typing.Tuple[bytes, bytes]]] = None,\n scope: typing.Optional[typing.MutableMapping[str, typing.Any]] = None,\n ) -> None:\n self._list: typing.List[typing.Tuple[bytes, bytes]] = []\n if headers is not None:\n assert raw is None, 'Cannot set both \"headers\" and \"raw\".'\n assert scope is None, 'Cannot set both \"headers\" and \"scope\".'\n self._list = [\n (key.lower().encode(\"latin-1\"), value.encode(\"latin-1\"))\n for key, value in headers.items()\n ]\n elif raw is not None:\n assert scope is None, 'Cannot set both \"raw\" and \"scope\".'\n self._list = raw\n elif scope is not None:\n self._list = scope[\"headers\"] = list(scope[\"headers\"])\n\n @property\n def raw(self) -> typing.List[typing.Tuple[bytes, bytes]]:\n return list(self._list)\n\n def keys(self) -> typing.List[str]: # type: ignore[override]\n return [key.decode(\"latin-1\") for key, value in self._list]\n\n def values(self) -> typing.List[str]: # type: ignore[override]\n return [value.decode(\"latin-1\") for key, value in self._list]\n\n def items(self) -> typing.List[typing.Tuple[str, str]]: # type: ignore[override]\n return [\n (key.decode(\"latin-1\"), value.decode(\"latin-1\"))\n for key, value in self._list\n ]\n\n def getlist(self, key: str) -> typing.List[str]:\n get_header_key = key.lower().encode(\"latin-1\")\n return [\n item_value.decode(\"latin-1\")\n for item_key, item_value in self._list\n if item_key == get_header_key\n ]\n\n def mutablecopy(self) -> \"MutableHeaders\":\n return MutableHeaders(raw=self._list[:])\n\n def __getitem__(self, key: str) -> str:\n get_header_key = key.lower().encode(\"latin-1\")\n for header_key, header_value in self._list:\n if header_key == get_header_key:\n return header_value.decode(\"latin-1\")\n raise KeyError(key)\n\n def __contains__(self, key: typing.Any) -> bool:\n get_header_key = key.lower().encode(\"latin-1\")\n for header_key, header_value in self._list:\n if header_key == get_header_key:\n return True\n return False\n\n def __iter__(self) -> typing.Iterator[typing.Any]:\n return iter(self.keys())\n\n def __len__(self) -> int:\n return len(self._list)\n\n def __eq__(self, other: typing.Any) -> bool:\n if not isinstance(other, Headers):\n return False\n return sorted(self._list) == sorted(other._list)\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n as_dict = dict(self.items())\n if len(as_dict) == len(self):\n return f\"{class_name}({as_dict!r})\"\n return f\"{class_name}(raw={self.raw!r})\""
},
{
"identifier": "State",
"path": "aquilify/datastructure/core.py",
"snippet": "class State:\n _state: typing.Dict[str, typing.Any]\n\n def __init__(self, state: typing.Optional[typing.Dict[str, typing.Any]] = None):\n if state is None:\n state = {}\n super().__setattr__(\"_state\", state)\n\n def __setattr__(self, key: typing.Any, value: typing.Any) -> None:\n self._state[key] = value\n\n def __getattr__(self, key: typing.Any) -> typing.Any:\n try:\n return self._state[key]\n except KeyError:\n message = \"'{}' object has no attribute '{}'\"\n raise AttributeError(message.format(self.__class__.__name__, key))\n\n def __delattr__(self, key: typing.Any) -> None:\n del self._state[key]"
},
{
"identifier": "HTTPException",
"path": "aquilify/exception/http_exception.py",
"snippet": "class HTTPException(Exception):\n def __init__(\n self,\n status_code: int,\n detail: typing.Optional[str] = None,\n headers: typing.Optional[dict] = None,\n ) -> None:\n if detail is None:\n detail = http.HTTPStatus(status_code).phrase\n self.status_code = status_code\n self.detail = detail\n self.headers = headers\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n return f\"{class_name}(status_code={self.status_code!r}, detail={self.detail!r})\""
},
{
"identifier": "FormParser",
"path": "aquilify/datastructure/formparser.py",
"snippet": "class FormParser:\n def __init__(\n self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]\n ) -> None:\n assert (\n multipart is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n self.headers = headers\n self.stream = stream\n self.messages: typing.List[typing.Tuple[FormMessage, bytes]] = []\n\n def on_field_start(self) -> None:\n message = (FormMessage.FIELD_START, b\"\")\n self.messages.append(message)\n\n def on_field_name(self, data: bytes, start: int, end: int) -> None:\n message = (FormMessage.FIELD_NAME, data[start:end])\n self.messages.append(message)\n\n def on_field_data(self, data: bytes, start: int, end: int) -> None:\n message = (FormMessage.FIELD_DATA, data[start:end])\n self.messages.append(message)\n\n def on_field_end(self) -> None:\n message = (FormMessage.FIELD_END, b\"\")\n self.messages.append(message)\n\n def on_end(self) -> None:\n message = (FormMessage.END, b\"\")\n self.messages.append(message)\n\n async def parse(self) -> FormData:\n # Callbacks dictionary.\n callbacks = {\n \"on_field_start\": self.on_field_start,\n \"on_field_name\": self.on_field_name,\n \"on_field_data\": self.on_field_data,\n \"on_field_end\": self.on_field_end,\n \"on_end\": self.on_end,\n }\n\n # Create the parser.\n parser = multipart.QuerystringParser(callbacks)\n field_name = b\"\"\n field_value = b\"\"\n\n items: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]] = []\n\n # Feed the parser with data from the request.\n async for chunk in self.stream:\n if chunk:\n parser.write(chunk)\n else:\n parser.finalize()\n messages = list(self.messages)\n self.messages.clear()\n for message_type, message_bytes in messages:\n if message_type == FormMessage.FIELD_START:\n field_name = b\"\"\n field_value = b\"\"\n elif message_type == FormMessage.FIELD_NAME:\n field_name += message_bytes\n elif message_type == FormMessage.FIELD_DATA:\n field_value += message_bytes\n elif message_type == FormMessage.FIELD_END:\n name = unquote_plus(field_name.decode(\"latin-1\"))\n value = unquote_plus(field_value.decode(\"latin-1\"))\n items.append((name, value))\n\n return FormData(items)"
},
{
"identifier": "MultiPartException",
"path": "aquilify/datastructure/formparser.py",
"snippet": "class MultiPartException(Exception):\n def __init__(self, message: str) -> None:\n self.message = message"
},
{
"identifier": "MultiPartParser",
"path": "aquilify/datastructure/formparser.py",
"snippet": "class MultiPartParser:\n max_file_size = 1024 * 1024\n\n def __init__(\n self,\n headers: Headers,\n stream: typing.AsyncGenerator[bytes, None],\n *,\n max_files: typing.Union[int, float] = 1000,\n max_fields: typing.Union[int, float] = 1000,\n ) -> None:\n assert (\n multipart is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n self.headers = headers\n self.stream = stream\n self.max_files = max_files\n self.max_fields = max_fields\n self.items: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]] = []\n self._current_files = 0\n self._current_fields = 0\n self._current_partial_header_name: bytes = b\"\"\n self._current_partial_header_value: bytes = b\"\"\n self._current_part = MultipartPart()\n self._charset = \"\"\n self._file_parts_to_write: typing.List[typing.Tuple[MultipartPart, bytes]] = []\n self._file_parts_to_finish: typing.List[MultipartPart] = []\n self._files_to_close_on_error: typing.List[SpooledTemporaryFile] = []\n\n def on_part_begin(self) -> None:\n self._current_part = MultipartPart()\n\n def on_part_data(self, data: bytes, start: int, end: int) -> None:\n message_bytes = data[start:end]\n if self._current_part.file is None:\n self._current_part.data += message_bytes\n else:\n self._file_parts_to_write.append((self._current_part, message_bytes))\n\n def on_part_end(self) -> None:\n if self._current_part.file is None:\n self.items.append(\n (\n self._current_part.field_name,\n _user_safe_decode(self._current_part.data, self._charset),\n )\n )\n else:\n self._file_parts_to_finish.append(self._current_part)\n self.items.append((self._current_part.field_name, self._current_part.file))\n\n def on_header_field(self, data: bytes, start: int, end: int) -> None:\n self._current_partial_header_name += data[start:end]\n\n def on_header_value(self, data: bytes, start: int, end: int) -> None:\n self._current_partial_header_value += data[start:end]\n\n def on_header_end(self) -> None:\n field = self._current_partial_header_name.lower()\n if field == b\"content-disposition\":\n self._current_part.content_disposition = self._current_partial_header_value\n self._current_part.item_headers.append(\n (field, self._current_partial_header_value)\n )\n self._current_partial_header_name = b\"\"\n self._current_partial_header_value = b\"\"\n\n def on_headers_finished(self) -> None:\n disposition, options = parse_options_header(\n self._current_part.content_disposition\n )\n try:\n self._current_part.field_name = _user_safe_decode(\n options[b\"name\"], self._charset\n )\n except KeyError:\n raise MultiPartException(\n 'The Content-Disposition header field \"name\" must be ' \"provided.\"\n )\n if b\"filename\" in options:\n self._current_files += 1\n if self._current_files > self.max_files:\n raise MultiPartException(\n f\"Too many files. Maximum number of files is {self.max_files}.\"\n )\n filename = _user_safe_decode(options[b\"filename\"], self._charset)\n tempfile = SpooledTemporaryFile(max_size=self.max_file_size)\n self._files_to_close_on_error.append(tempfile)\n self._current_part.file = UploadFile(\n file=tempfile, # type: ignore[arg-type]\n size=0,\n filename=filename,\n headers=Headers(raw=self._current_part.item_headers),\n )\n else:\n self._current_fields += 1\n if self._current_fields > self.max_fields:\n raise MultiPartException(\n f\"Too many fields. Maximum number of fields is {self.max_fields}.\"\n )\n self._current_part.file = None\n\n def on_end(self) -> None:\n pass\n\n async def parse(self) -> FormData:\n # Parse the Content-Type header to get the multipart boundary.\n _, params = parse_options_header(self.headers[\"Content-Type\"])\n charset = params.get(b\"charset\", \"utf-8\")\n if type(charset) == bytes:\n charset = charset.decode(\"latin-1\")\n self._charset = charset\n try:\n boundary = params[b\"boundary\"]\n except KeyError:\n raise MultiPartException(\"Missing boundary in multipart.\")\n\n # Callbacks dictionary.\n callbacks = {\n \"on_part_begin\": self.on_part_begin,\n \"on_part_data\": self.on_part_data,\n \"on_part_end\": self.on_part_end,\n \"on_header_field\": self.on_header_field,\n \"on_header_value\": self.on_header_value,\n \"on_header_end\": self.on_header_end,\n \"on_headers_finished\": self.on_headers_finished,\n \"on_end\": self.on_end,\n }\n\n parser = multipart.MultipartParser(boundary, callbacks)\n try:\n async for chunk in self.stream:\n parser.write(chunk)\n for part, data in self._file_parts_to_write:\n assert part.file # for type checkers\n await part.file.write(data)\n for part in self._file_parts_to_finish:\n assert part.file # for type checkers\n await part.file.seek(0)\n self._file_parts_to_write.clear()\n self._file_parts_to_finish.clear()\n except MultiPartException as exc:\n for file in self._files_to_close_on_error:\n file.close()\n raise exc\n\n parser.finalize()\n return FormData(self.items)"
},
{
"identifier": "UserAgentParser",
"path": "aquilify/datastructure/user_agent.py",
"snippet": "class UserAgentParser:\n def __init__(self, user_agent_string: str) -> None:\n self.user_agent_string: str = user_agent_string\n self.browser: str = ''\n self.browser_version: str = ''\n self.browser_engine: str = ''\n self.os: str = ''\n self.os_version: str = ''\n self.device: str = ''\n self.is_mobile: bool = False\n self.language: str = ''\n self.platform: str = ''\n self.is_bot_or_crawler: bool = False\n self.screen_resolution: str = ''\n self.viewport_size: str = ''\n self.js_enabled: bool = False\n self.referer: str = ''\n self.timezone: str = ''\n self._parse_user_agent()\n\n def _parse_user_agent(self) -> None:\n self.browser, self.browser_version = self._get_browser_info()\n self.browser_engine = self._get_browser_engine()\n self.os, self.os_version = self._get_os_info()\n self.device = self._get_device_info()\n self.is_mobile = self._check_mobile()\n self.language = self._get_language()\n self.platform = self._get_platform()\n self.is_bot_or_crawler = self._check_bot_or_crawler()\n self.screen_resolution = self._get_screen_resolution()\n self.viewport_size = self._get_viewport_size()\n self.js_enabled = self._check_javascript_enabled()\n self.referer = self._get_referer()\n self.timezone = self._get_timezone()\n\n def _get_browser_info(self):\n browsers = {\n 'Opera': r'Opera\\/([0-9.]+)',\n 'Firefox': r'Firefox\\/([0-9.]+)',\n 'Edge': r'(?:Edg(?:e)?)\\/([0-9.]+)',\n 'Chrome': r'Chrome\\/([0-9.]+)',\n 'Safari': r'Safari\\/([0-9.]+)',\n 'IE': r'MSIE ([0-9.]+)|rv:([0-9.]+)'\n }\n \n browser_priority = ['Edge', 'Chrome', 'Firefox', 'Safari', 'Opera', 'IE']\n\n for browser in browser_priority:\n if browser in browsers:\n pattern = browsers[browser]\n match = re.search(pattern, self.user_agent_string)\n if match:\n if browser == 'Edge':\n version = match.group(1) or ''\n else:\n version = match.group(1) or match.group(2) or ''\n return browser, version\n \n return 'Unknown', ''\n \n def _get_browser_engine(self):\n browser_engines = {\n 'Blink': 'Blink',\n 'WebKit': 'WebKit',\n 'Gecko': 'Gecko',\n 'Trident': 'Trident'\n }\n for engine, pattern in browser_engines.items():\n if pattern in self.user_agent_string:\n return engine\n return 'Unknown'\n\n def _get_os_info(self):\n operating_systems = {\n 'Windows': r'Windows NT ([0-9.]+)',\n 'Android': r'Android ([0-9.]+)',\n 'Linux': r'Linux',\n 'iOS': r'OS ([0-9_]+) like Mac',\n 'Mac': r'Mac OS X ([0-9_]+)'\n }\n for os, pattern in operating_systems.items():\n match = re.search(pattern, self.user_agent_string)\n if match:\n version = match.group(1).replace('_', '.') if match.group(1) else ''\n return os, version\n return 'Unknown', ''\n\n def _get_device_info(self):\n devices = {\n 'iPhone': r'iPhone(?:\\sSimulator)?',\n 'iPad': r'iPad(?:\\sSimulator)?',\n 'Mobile': r'Mobile',\n 'Tablet': r'Tablet',\n 'Desktop': r'Windows|Macintosh|Linux'\n }\n for device, pattern in devices.items():\n if re.search(pattern, self.user_agent_string):\n return device\n return 'Unknown'\n\n def _check_mobile(self):\n return 'Mobile' in self.user_agent_string\n\n def _get_language(self):\n match = re.search(r'(?<=\\b(?:language=))(.*?)(?=[;|$])', self.user_agent_string)\n return match.group(1) if match else 'Unknown'\n\n def _get_platform(self):\n platforms = {\n 'Windows': 'Windows',\n 'Linux': 'Linux',\n 'Mac': 'Macintosh'\n }\n for platform, pattern in platforms.items():\n if pattern in self.user_agent_string:\n return platform\n return 'Unknown'\n\n def _check_bot_or_crawler(self):\n bot_patterns = [\n 'bot',\n 'crawler',\n 'spider',\n 'googlebot',\n 'bingbot',\n 'slurp',\n 'duckduckbot',\n 'yandexbot'\n ]\n for bot_pattern in bot_patterns:\n if re.search(bot_pattern, self.user_agent_string, re.IGNORECASE):\n return True\n return False\n\n def _get_screen_resolution(self):\n match = re.search(r'(?<=\\b(?:Screen: ))([0-9]+x[0-9]+)', self.user_agent_string)\n return match.group(1) if match else 'Unknown'\n\n def _get_viewport_size(self):\n match = re.search(r'(?<=\\b(?:Viewport: ))([0-9]+x[0-9]+)', self.user_agent_string)\n return match.group(1) if match else 'Unknown'\n\n def _check_javascript_enabled(self):\n return 'JS' in self.user_agent_string\n\n def _get_referer(self):\n match = re.search(r'(?<=\\b(?:Referer: ))(.*?)(?=[;|$])', self.user_agent_string)\n return match.group(1) if match else 'Unknown'\n\n def _get_timezone(self):\n match = re.search(r'(?<=\\b(?:Timezone: ))(.*?)(?=[;|$])', self.user_agent_string)\n return match.group(1) if match else 'Unknown'\n \n def __str__(self) -> str:\n return str(self.user_agent_string)\n \n def __repr__(self) -> str:\n return f\"UserAgentParser({self.user_agent_string})\"\n\n def to_dict(self) -> dict:\n return {\n 'user_agent_string': self.user_agent_string,\n 'browser': self.browser,\n 'browser_version': self.browser_version,\n 'browser_engine': self.browser_engine,\n 'os': self.os,\n 'os_version': self.os_version,\n 'device': self.device,\n 'is_mobile': self.is_mobile,\n 'language': self.language,\n 'platform': self.platform,\n 'is_bot_or_crawler': self.is_bot_or_crawler,\n 'screen_resolution': self.screen_resolution,\n 'viewport_size': self.viewport_size,\n 'js_enabled': self.js_enabled,\n 'referer': self.referer,\n 'timezone': self.timezone\n }"
},
{
"identifier": "Message",
"path": "aquilify/types.py",
"snippet": "T = typing.TypeVar(\"T\")"
}
] | import json
import typing
import anyio
from http import cookies as http_cookies
from urllib.parse import parse_qs
from ..utils._utils import AwaitableOrContextManager, AwaitableOrContextManagerWrapper
from ..datastructure.core import URL, Address, FormData, Headers, State
from ..exception.http_exception import HTTPException
from ..datastructure.formparser import FormParser, MultiPartException, MultiPartParser
from ..datastructure.user_agent import UserAgentParser
from ..types import Message, Receive, Scope, Send
from multipart.multipart import parse_options_header | 8,788 | @property
def auth(self) -> typing.Any:
assert (
"auth" in self.scope
), "AuthenticationMiddleware must be installed to access request.auth"
return self.scope["auth"]
@property
def user(self) -> typing.Any:
assert (
"user" in self.scope
), "AuthenticationMiddleware must be installed to access request.user"
return self.scope["user"]
@property
def state(self) -> State:
if not hasattr(self, "_state"):
self.scope.setdefault("state", {})
self._state = State(self.scope["state"])
return self._state
async def empty_receive() -> typing.NoReturn:
raise RuntimeError("Receive channel has not been made available")
async def empty_send(message: Message) -> typing.NoReturn:
raise RuntimeError("Send channel has not been made available")
class Request(HTTPConnection):
_form: typing.Optional[FormData]
def __init__(
self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send
):
super().__init__(scope)
assert scope["type"] == "http"
self._receive = receive
self._send = send
self._stream_consumed = False
self._is_disconnected = False
self._form = None
self.path_params = None
self.query_params: typing.Dict[str, str] = {}
self.context: typing.Dict[str, str] = {}
self.executed_middlewares = set()
@property
def method(self) -> str:
return self.scope["method"]
@property
def args(self) -> typing.Dict[str, str]:
self._parse_query_params()
return self.query_params
@property
def receive(self) -> Receive:
return self._receive
async def stream(self) -> typing.AsyncGenerator[bytes, None]:
if hasattr(self, "_body"):
yield self._body
yield b""
return
if self._stream_consumed:
raise RuntimeError("Stream consumed")
self._stream_consumed = True
while True:
message = await self._receive()
if message["type"] == "http.request":
body = message.get("body", b"")
if body:
yield body
if not message.get("more_body", False):
break
elif message["type"] == "http.disconnect":
self._is_disconnected = True
yield b""
async def body(self) -> bytes:
if not hasattr(self, "_body"):
chunks: "typing.List[bytes]" = []
async for chunk in self.stream():
chunks.append(chunk)
self._body = b"".join(chunks)
return self._body
async def json(self) -> typing.Any:
if not hasattr(self, "_json"):
body = await self.body()
self._json = json.loads(body)
return self._json
def _parse_query_params(self):
query_string = self.scope.get('query_string', b'').decode('utf-8')
self.query_params = {k: v[0] for k, v in parse_qs(query_string).items()}
async def _get_form(
self,
*,
max_files: typing.Union[int, float] = 1000,
max_fields: typing.Union[int, float] = 1000,
) -> FormData:
if self._form is None:
assert (
parse_options_header is not None
), "The `python-multipart` library must be installed to use form parsing."
content_type_header = self.headers.get("Content-Type")
content_type: bytes
content_type, _ = parse_options_header(content_type_header)
if content_type == b"multipart/form-data":
try:
multipart_parser = MultiPartParser(
self.headers,
self.stream(),
max_files=max_files,
max_fields=max_fields,
)
self._form = await multipart_parser.parse()
|
try:
except ModuleNotFoundError:
parse_options_header = None
SERVER_PUSH_HEADERS_TO_COPY = {
"accept",
"accept-encoding",
"accept-language",
"cache-control",
"user-agent",
}
def cookie_parser(cookie_string: str) -> typing.Dict[str, str]:
cookie_dict: typing.Dict[str, str] = {}
for chunk in cookie_string.split(";"):
if "=" in chunk:
key, val = chunk.split("=", 1)
else:
key, val = "", chunk
key, val = key.strip(), val.strip()
if key or val:
cookie_dict[key] = http_cookies._unquote(val)
return cookie_dict
class ClientDisconnect(Exception):
pass
class HTTPConnection(typing.Mapping[str, typing.Any]):
def __init__(self, scope: Scope, receive: typing.Optional[Receive] = None) -> None:
assert scope["type"] in ("http", "websocket")
self.scope = scope
def __getitem__(self, key: str) -> typing.Any:
return self.scope[key]
def __iter__(self) -> typing.Iterator[str]:
return iter(self.scope)
def __len__(self) -> int:
return len(self.scope)
__eq__ = object.__eq__
__hash__ = object.__hash__
@property
def app(self) -> typing.Any:
return self.scope["app"]
@property
def url(self) -> URL:
if not hasattr(self, "_url"):
self._url = URL(scope=self.scope)
return self._url
@property
def base_url(self) -> URL:
if not hasattr(self, "_base_url"):
base_url_scope = dict(self.scope)
base_url_scope["path"] = "/"
base_url_scope["query_string"] = b""
base_url_scope["root_path"] = base_url_scope.get(
"app_root_path", base_url_scope.get("root_path", "")
)
self._base_url = URL(scope=base_url_scope)
return self._base_url
@property
def headers(self) -> Headers:
if not hasattr(self, "_headers"):
self._headers = Headers(scope=self.scope)
return self._headers
@property
def origin(self):
return self.headers.get('origin')
@property
def remote_addr(self) -> str:
return self.scope.get('client', ('',))[0]
@property
def scheme(self) -> str:
return self.scope.get('scheme', 'http')
@property
def server(self) -> typing.Dict[str, str]:
return {
'server_protocol': self.scope.get('server_protocol', ''),
'server_name': self.scope.get('server_name', ''),
'server_port': self.scope.get('server_port', ''),
}
@property
def authorization(self) -> typing.Optional[str]:
return self.headers.get('authorization')
@property
def user_agent(self) -> UserAgentParser:
return UserAgentParser(self.headers.get('user-agent', ''))
@property
def referer(self) -> str:
return self.headers.get('referer', '')
@property
def accept(self) -> str:
return self.headers.get('accept', '')
@property
def host(self) -> str:
return self.headers.get('host')
@property
def path(self) -> str:
return self.scope.get('path', '/')
@property
def path_param(self) -> typing.Dict[str, typing.Any]:
return self.scope.get("path_params", {})
@property
def cookies(self) -> typing.Dict[str, str]:
if not hasattr(self, "_cookies"):
cookies: typing.Dict[str, str] = {}
cookie_header = self.headers.get("cookie")
if cookie_header:
cookies = cookie_parser(cookie_header)
self._cookies = cookies
return self._cookies
@property
def client(self) -> typing.Optional[Address]:
# client is a 2 item tuple of (host, port), None or missing
host_port = self.scope.get("client")
if host_port is not None:
return Address(*host_port)
return None
@property
def session(self) -> typing.Dict[str, typing.Any]:
assert (
"session" in self.scope
), "SessionMiddleware must be installed to access request.session"
return self.scope["session"]
@property
def auth(self) -> typing.Any:
assert (
"auth" in self.scope
), "AuthenticationMiddleware must be installed to access request.auth"
return self.scope["auth"]
@property
def user(self) -> typing.Any:
assert (
"user" in self.scope
), "AuthenticationMiddleware must be installed to access request.user"
return self.scope["user"]
@property
def state(self) -> State:
if not hasattr(self, "_state"):
self.scope.setdefault("state", {})
self._state = State(self.scope["state"])
return self._state
async def empty_receive() -> typing.NoReturn:
raise RuntimeError("Receive channel has not been made available")
async def empty_send(message: Message) -> typing.NoReturn:
raise RuntimeError("Send channel has not been made available")
class Request(HTTPConnection):
_form: typing.Optional[FormData]
def __init__(
self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send
):
super().__init__(scope)
assert scope["type"] == "http"
self._receive = receive
self._send = send
self._stream_consumed = False
self._is_disconnected = False
self._form = None
self.path_params = None
self.query_params: typing.Dict[str, str] = {}
self.context: typing.Dict[str, str] = {}
self.executed_middlewares = set()
@property
def method(self) -> str:
return self.scope["method"]
@property
def args(self) -> typing.Dict[str, str]:
self._parse_query_params()
return self.query_params
@property
def receive(self) -> Receive:
return self._receive
async def stream(self) -> typing.AsyncGenerator[bytes, None]:
if hasattr(self, "_body"):
yield self._body
yield b""
return
if self._stream_consumed:
raise RuntimeError("Stream consumed")
self._stream_consumed = True
while True:
message = await self._receive()
if message["type"] == "http.request":
body = message.get("body", b"")
if body:
yield body
if not message.get("more_body", False):
break
elif message["type"] == "http.disconnect":
self._is_disconnected = True
yield b""
async def body(self) -> bytes:
if not hasattr(self, "_body"):
chunks: "typing.List[bytes]" = []
async for chunk in self.stream():
chunks.append(chunk)
self._body = b"".join(chunks)
return self._body
async def json(self) -> typing.Any:
if not hasattr(self, "_json"):
body = await self.body()
self._json = json.loads(body)
return self._json
def _parse_query_params(self):
query_string = self.scope.get('query_string', b'').decode('utf-8')
self.query_params = {k: v[0] for k, v in parse_qs(query_string).items()}
async def _get_form(
self,
*,
max_files: typing.Union[int, float] = 1000,
max_fields: typing.Union[int, float] = 1000,
) -> FormData:
if self._form is None:
assert (
parse_options_header is not None
), "The `python-multipart` library must be installed to use form parsing."
content_type_header = self.headers.get("Content-Type")
content_type: bytes
content_type, _ = parse_options_header(content_type_header)
if content_type == b"multipart/form-data":
try:
multipart_parser = MultiPartParser(
self.headers,
self.stream(),
max_files=max_files,
max_fields=max_fields,
)
self._form = await multipart_parser.parse() | except MultiPartException as exc: | 9 | 2023-11-16 08:26:02+00:00 | 12k |
IBM/oper8 | tests/test_component.py | [
{
"identifier": "constants",
"path": "oper8/constants.py",
"snippet": "PAUSE_ANNOTATION_NAME = \"oper8.org/pause-execution\"\nCONFIG_DEFAULTS_ANNOTATION_NAME = \"oper8.org/config-defaults\"\nLEASE_NAME_ANNOTATION_NAME = \"oper8.org/lease-name\"\nLEASE_TIME_ANNOTATION_NAME = \"oper8.org/lease-time\"\nLOG_DEFAULT_LEVEL_NAME = \"oper8.org/log-default-level\"\nLOG_FILTERS_NAME = \"oper8.org/log-filters\"\nLOG_THREAD_ID_NAME = \"oper8.org/log-thread-id\"\nLOG_JSON_NAME = \"oper8.org/log-json\"\nPASSTHROUGH_ANNOTATIONS = [\n CONFIG_DEFAULTS_ANNOTATION_NAME,\n LOG_DEFAULT_LEVEL_NAME,\n LOG_FILTERS_NAME,\n LOG_JSON_NAME,\n LOG_THREAD_ID_NAME,\n PAUSE_ANNOTATION_NAME,\n]\nALL_ANNOTATIONS = PASSTHROUGH_ANNOTATIONS\nTEMPORARY_PATCHES_ANNOTATION_NAME = \"oper8.org/temporary-patches\"\nINTERNAL_NAME_ANOTATION_NAME = \"oper8.org/internal-name\"\nDEFAULT_NAMESPACE = \"default\"\nNESTED_DICT_DELIM = \".\"\nCONFIG_OVERRIDES = \"configOverrides\""
},
{
"identifier": "Component",
"path": "oper8/component.py",
"snippet": "class Component(Node, abc.ABC):\n def name(self):\n def __init__(\n self,\n session: Session,\n disabled: bool = False,\n ):\n def __str__(self):\n def managed_objects(self) -> List[ManagedObject]:\n def build_chart(self, session: Session): # pylint: disable=unused-argument\n def verify(self, session):\n def render_chart(self, session):\n def update_object_definition(\n self,\n session: Session, # pylint: disable=unused-argument\n internal_name: str, # pylint: disable=unused-argument\n resource_definition: dict,\n ):\n def deploy(self, session):\n def disable(self, session):\n def add_resource(\n self,\n name: str, # pylint: disable=redefined-builtin\n obj: Any,\n ) -> Optional[\n def add_dependency(\n self,\n session: Session,\n *components: \"Component\",\n verify_function: Optional[VERIFY_FUNCTION] = None,\n ):\n def to_dict(self, session):\n def to_config(self, session):\n def to_file(self, session):\n def get_name(cls): # pylint: disable=arguments-differ\n def _default_verify(self, session, is_subsystem=False):\n def _preserve_patch_annotation(session, internal_name, resource_definition):\n def __build_lazy_charts(self, session):\n def __render(self, session):\n def __gather_resources(self, session) -> List[Tuple[str, dict]]:"
},
{
"identifier": "TEMPORARY_PATCHES_ANNOTATION_NAME",
"path": "oper8/constants.py",
"snippet": "TEMPORARY_PATCHES_ANNOTATION_NAME = \"oper8.org/temporary-patches\""
},
{
"identifier": "DryRunDeployManager",
"path": "oper8/deploy_manager/dry_run_deploy_manager.py",
"snippet": "class DryRunDeployManager(DeployManagerBase):\n \"\"\"\n Deploy manager which doesn't actually deploy!\n \"\"\"\n\n def __init__(self, resources=None, owner_cr=None, strict_resource_version=False):\n \"\"\"Construct with a static value to use for whether or not the functions\n should report change.\n \"\"\"\n self._owner_cr = owner_cr\n self._cluster_content = {}\n self.strict_resource_version = strict_resource_version\n\n # Dicts of registered watches and watchers\n self._watches = {}\n self._finalizers = {}\n\n # Deploy provided resources\n self._deploy(resources or [], call_watches=False, manage_owner_references=False)\n\n ## Interface ###############################################################\n\n def deploy(self, resource_definitions, manage_owner_references=True, **_):\n log.info(\"DRY RUN deploy\")\n return self._deploy(\n resource_definitions, manage_owner_references=manage_owner_references\n )\n\n def disable(self, resource_definitions):\n log.info(\"DRY RUN disable\")\n changed = False\n for resource in resource_definitions:\n api_version = resource.get(\"apiVersion\")\n kind = resource.get(\"kind\")\n name = resource.get(\"metadata\", {}).get(\"name\")\n namespace = resource.get(\"metadata\", {}).get(\"namespace\")\n _, content = self.get_object_current_state(\n kind=kind, api_version=api_version, namespace=namespace, name=name\n )\n if content is not None:\n changed = True\n\n # Set resource finalizers\n with DRY_RUN_CLUSTER_LOCK:\n self._cluster_content[namespace][kind][api_version][name][\n \"metadata\"\n ][\"deletionTimestamp\"] = datetime.now().strftime(\n \"%Y-%m-%dT%H:%M:%SZ\"\n )\n self._cluster_content[namespace][kind][api_version][name][\n \"metadata\"\n ][\"deletionGracePeriodSeconds\"] = 0\n\n # Call any registered finalizers\n for key, callback in self._get_registered_watches(\n api_version, kind, namespace, name, finalizer=True\n ):\n log.debug2(\n \"Calling registered finalizer [%s] for [%s]\", callback, key\n )\n callback(self._cluster_content[namespace][kind][api_version][name])\n\n # If finalizers have been cleared and object hasn't already been deleted then\n # remove the key\n current_obj = (\n self._cluster_content.get(namespace, {})\n .get(kind, {})\n .get(api_version, {})\n .get(name, {})\n )\n if current_obj and not current_obj.get(\"metadata\", {}).get(\n \"finalizers\", []\n ):\n with DRY_RUN_CLUSTER_LOCK:\n self._delete_key(namespace, kind, api_version, name)\n\n return True, changed\n\n def get_object_current_state(self, kind, name, namespace=None, api_version=None):\n log.info(\n \"DRY RUN get_object_current_state of [%s/%s] in [%s]\", kind, name, namespace\n )\n\n # Look in the cluster state\n matches = []\n kind_entries = self._cluster_content.get(namespace, {}).get(kind, {})\n log.debug3(\"Kind entries: %s\", kind_entries)\n for api_ver, entries in kind_entries.items():\n log.debug3(\"Checking api_version [%s // %s]\", api_ver, api_version)\n if name in entries and (api_ver == api_version or api_version is None):\n matches.append(entries[name])\n log.debug(\n \"Found %d matches for [%s/%s] in %s\", len(matches), kind, name, namespace\n )\n if len(matches) == 1:\n return True, copy.deepcopy(matches[0])\n return True, None\n\n def filter_objects_current_state(\n self,\n kind,\n namespace=None,\n api_version=None,\n label_selector=None,\n field_selector=None,\n ): # pylint: disable=too-many-arguments\n log.info(\n \"DRY RUN filter_objects_current_state of [%s] in [%s]\", kind, namespace\n )\n # Look in the cluster state\n matches = []\n kind_entries = self._cluster_content.get(namespace, {}).get(kind, {})\n log.debug3(\"Kind entries: %s\", kind_entries)\n for api_ver, entries in kind_entries.items():\n # Make sure api version matches\n log.debug3(\"Checking api_version [%s // %s]\", api_ver, api_version)\n if api_ver != api_version and api_version is not None:\n continue\n\n for resource in entries.values():\n # Make sure Labels Match\n log.debug3(\"Resource: %s\", resource)\n\n labels = resource.get(\"metadata\", {}).get(\"labels\", {})\n log.debug3(\"Checking label_selector [%s // %s]\", labels, label_selector)\n if label_selector is not None and not _match_selector(\n labels, label_selector\n ):\n continue\n\n # Only do the work for field selector if one exists\n log.debug3(\"Checking field_selector [%s]\", field_selector)\n if field_selector is not None and not _match_selector(\n _convert_dict_to_dot(resource),\n field_selector,\n ):\n continue\n\n # Add deep copy of entry to matches list\n matches.append(copy.deepcopy(resource))\n\n return True, matches\n\n def set_status(\n self,\n kind,\n name,\n namespace,\n status,\n api_version=None,\n ): # pylint: disable=too-many-arguments\n log.info(\n \"DRY RUN set_status of [%s.%s/%s] in %s: %s\",\n api_version,\n kind,\n name,\n namespace,\n status,\n )\n object_content = self.get_object_current_state(\n kind, name, namespace, api_version\n )[1]\n if object_content is None:\n log.debug(\"Did not find [%s/%s] in %s\", kind, name, namespace)\n return False, False\n prev_status = object_content.get(\"status\")\n object_content[\"status\"] = status\n self._deploy([object_content], call_watches=False)\n return True, prev_status != status\n\n def watch_objects( # pylint: disable=too-many-arguments,too-many-locals,unused-argument\n self,\n kind: str,\n api_version: Optional[str] = None,\n namespace: Optional[str] = None,\n name: Optional[str] = None,\n label_selector: Optional[str] = None,\n field_selector: Optional[str] = None,\n resource_version: Optional[str] = None,\n timeout: Optional[int] = 15,\n **kwargs,\n ) -> Iterator[KubeWatchEvent]:\n \"\"\"Watch the DryRunDeployManager for resource changes by registering\n callbacks\"\"\"\n\n event_queue = Queue()\n resource_map = {}\n\n def add_event(resource_map: dict, manifest: dict):\n \"\"\"Callback triggered when resources are deployed\"\"\"\n resource = ManagedObject(manifest)\n event_type = KubeEventType.ADDED\n\n watch_key = self._watch_key(\n api_version=resource.api_version,\n kind=resource.kind,\n namespace=resource.namespace,\n name=resource.name,\n )\n if watch_key in resource_map:\n log.debug4(\"Watch key detected, setting Modified event type\")\n event_type = KubeEventType.MODIFIED\n\n resource_map[watch_key] = resource\n event = KubeWatchEvent(\n type=event_type,\n resource=resource,\n )\n event_queue.put(event)\n\n def delete_event(resource_map: dict, manifest: dict):\n \"\"\"Callback triggered when resources are disabled\"\"\"\n resource = ManagedObject(manifest)\n watch_key = self._watch_key(\n api_version=resource.api_version,\n kind=resource.kind,\n namespace=resource.namespace,\n name=resource.name,\n )\n if watch_key in resource_map:\n del resource_map[watch_key]\n\n event = KubeWatchEvent(\n type=KubeEventType.DELETED,\n resource=resource,\n )\n event_queue.put(event)\n\n # Get initial resources\n _, manifests = self.filter_objects_current_state(\n kind=kind,\n api_version=api_version,\n namespace=namespace,\n label_selector=label_selector,\n field_selector=field_selector,\n )\n for manifest in manifests:\n resource = ManagedObject(manifest)\n watch_key = self._watch_key(\n kind=resource.kind,\n api_version=resource.api_version,\n name=resource.name,\n namespace=resource.namespace,\n )\n resource_map[watch_key] = resource\n\n event = KubeWatchEvent(type=KubeEventType.ADDED, resource=resource)\n log.debug2(\"Yielding initial event %s\", event)\n yield event\n\n end_time = datetime.max\n if timeout:\n end_time = datetime.now() + timedelta(seconds=timeout)\n\n # Register callbacks\n self.register_watch(\n api_version=api_version,\n kind=kind,\n namespace=namespace,\n name=name,\n callback=partial(add_event, resource_map),\n )\n self.register_finalizer(\n api_version=api_version,\n kind=kind,\n namespace=namespace,\n name=name,\n callback=partial(delete_event, resource_map),\n )\n\n # Yield any events from the callback queue\n log.debug2(\"Waiting till %s\", end_time)\n while True:\n sec_till_end = (end_time - datetime.now()).seconds or 1\n try:\n event = event_queue.get(timeout=sec_till_end)\n log.debug2(\"Yielding event %s\", event)\n yield event\n except Empty:\n pass\n\n if datetime.now() > end_time:\n return\n\n ## Dry Run Methods #########################################################\n def register_watch( # pylint: disable=too-many-arguments\n self,\n api_version: str,\n kind: str,\n callback: Callable[[dict], None],\n namespace=\"\",\n name=\"\",\n ):\n \"\"\"Register a callback to watch for deploy events on a given\n api_version/kind\n \"\"\"\n watch_key = self._watch_key(\n api_version=api_version, kind=kind, namespace=namespace, name=name\n )\n log.debug(\"Registering watch for %s\", watch_key)\n self._watches.setdefault(watch_key, []).append(callback)\n\n def register_finalizer( # pylint: disable=too-many-arguments\n self,\n api_version: str,\n kind: str,\n callback: Callable[[dict], None],\n namespace=\"\",\n name=\"\",\n ):\n \"\"\"Register a callback to call on deletion events on a given\n api_version/kind\n \"\"\"\n watch_key = self._watch_key(\n api_version=api_version, kind=kind, namespace=namespace, name=name\n )\n log.debug(\"Registering finalizer for %s\", watch_key)\n self._finalizers.setdefault(watch_key, []).append(callback)\n\n ## Implementation Details ##################################################\n\n @staticmethod\n def _watch_key(api_version=\"\", kind=\"\", namespace=\"\", name=\"\"):\n return \":\".join([api_version or \"\", kind or \"\", namespace or \"\", name or \"\"])\n\n def _get_registered_watches( # pylint: disable=too-many-arguments\n self,\n api_version: str = \"\",\n kind: str = \"\",\n namespace: str = \"\",\n name: str = \"\",\n finalizer: bool = False,\n ) -> List[Tuple[str, Callable]]:\n # Get the scoped watch key\n resource_watch_key = self._watch_key(\n api_version=api_version, kind=kind, namespace=namespace, name=name\n )\n namespaced_watch_key = self._watch_key(\n api_version=api_version, kind=kind, namespace=namespace\n )\n global_watch_key = self._watch_key(api_version=api_version, kind=kind)\n\n # Get which watch list we're pulling from\n callback_map = self._watches\n if finalizer:\n callback_map = self._finalizers\n\n output_list = []\n log.debug3(\n \"Looking for resourced key: %s namespace key %s global key %s\",\n resource_watch_key,\n namespaced_watch_key,\n global_watch_key,\n )\n for key, callback_list in callback_map.items():\n if key in [resource_watch_key, namespaced_watch_key, global_watch_key]:\n log.debug3(\"%d Callbacks found for key %s\", len(callback_list), key)\n for callback in callback_list:\n output_list.append((key, callback))\n\n return output_list\n\n def _delete_key(self, namespace, kind, api_version, name):\n del self._cluster_content[namespace][kind][api_version][name]\n if not self._cluster_content[namespace][kind][api_version]:\n del self._cluster_content[namespace][kind][api_version]\n if not self._cluster_content[namespace][kind]:\n del self._cluster_content[namespace][kind]\n if not self._cluster_content[namespace]:\n del self._cluster_content[namespace]\n\n def _deploy(\n self, resource_definitions, call_watches=True, manage_owner_references=True\n ):\n log.info(\"DRY RUN deploy\")\n changes = False\n for resource in resource_definitions:\n api_version = resource.get(\"apiVersion\")\n kind = resource.get(\"kind\")\n name = resource.get(\"metadata\", {}).get(\"name\")\n namespace = resource.get(\"metadata\", {}).get(\"namespace\")\n log.debug(\n \"DRY RUN deploy [%s/%s/%s/%s]\", namespace, kind, api_version, name\n )\n log.debug4(resource)\n\n # If owner CR configured, add ownerReferences\n if self._owner_cr and manage_owner_references:\n log.debug2(\"Adding dry-run owner references\")\n update_owner_references(self, self._owner_cr, resource)\n log.debug3(\n \"All owner references: %s\", resource[\"metadata\"][\"ownerReferences\"]\n )\n\n with DRY_RUN_CLUSTER_LOCK:\n entries = (\n self._cluster_content.setdefault(namespace, {})\n .setdefault(kind, {})\n .setdefault(api_version, {})\n )\n current = copy.deepcopy(entries.get(name, {}))\n old_resource_version = current.get(\"metadata\", {}).pop(\n \"resourceVersion\", None\n )\n changes = changes or (current != resource)\n\n if \"metadata\" not in resource:\n resource[\"metadata\"] = {}\n\n if (\n self.strict_resource_version\n and resource[\"metadata\"].get(\"resourceVersion\")\n and old_resource_version\n and resource[\"metadata\"].get(\"resourceVersion\")\n != old_resource_version\n ):\n log.warning(\n \"Unable to deploy resource. resourceVersion is out of date\"\n )\n return False, False\n\n resource[\"metadata\"][\"creationTimestamp\"] = entries.get(\n \"metadata\", {}\n ).get(\"creationTimestamp\", datetime.now().isoformat())\n resource[\"metadata\"][\"uid\"] = entries.get(\"metadata\", {}).get(\n \"uid\", str(uuid.uuid4())\n )\n resource[\"metadata\"][\"resourceVersion\"] = str(\n random.randint(1, 1000)\n ).zfill(5)\n entries[name] = resource\n\n # Call any registered watches\n if call_watches:\n for key, callback in self._get_registered_watches(\n api_version, kind, namespace, name\n ):\n log.debug2(\"Calling registered watch [%s] for [%s]\", callback, key)\n callback(resource)\n\n # Delete Key if it has already been disabled and doesn't have finalizers\n if self._cluster_content[namespace][kind][api_version][name].get(\n \"metadata\", {}\n ).get(\"deletionTimestamp\") and not self._cluster_content[namespace][kind][\n api_version\n ][\n name\n ].get(\n \"metadata\", {}\n ).get(\n \"finalizers\"\n ):\n with DRY_RUN_CLUSTER_LOCK:\n self._delete_key(namespace, kind, api_version, name)\n\n return True, changes"
},
{
"identifier": "_make_owner_reference",
"path": "oper8/deploy_manager/owner_references.py",
"snippet": "def _make_owner_reference(owner_cr: dict) -> dict:\n \"\"\"Make an owner reference for the given CR instance\n\n Error Semantics: This function makes a best-effort and does not validate the\n content of the owner_cr, so the resulting ownerReference may contain None\n entries.\n\n Args:\n owner_cr: dict\n The full CR manifest for the owning resource\n\n Returns:\n owner_reference: dict\n The dict entry for the `metadata.ownerReferences` entry of the owned\n object\n \"\"\"\n # NOTE: We explicitly don't set controller: True here. If two\n # oper8-managed resources reference the resource, only one can have\n # controller set to True. According to StackOverflow, this field is\n # only used for adoption and not garbage collection.\n # CITE: https://stackoverflow.com/a/65825463\n metadata = owner_cr.get(\"metadata\", {})\n return {\n \"apiVersion\": owner_cr.get(\"apiVersion\"),\n \"kind\": owner_cr.get(\"kind\"),\n \"name\": metadata.get(\"name\"),\n \"uid\": metadata.get(\"uid\"),\n # The parent will not be deleted until this object completes its\n # deletion\n \"blockOwnerDeletion\": True,\n }"
},
{
"identifier": "STRATEGIC_MERGE_PATCH",
"path": "oper8/patch.py",
"snippet": "STRATEGIC_MERGE_PATCH = \"patchStrategicMerge\""
},
{
"identifier": "TEST_NAMESPACE",
"path": "oper8/test_helpers/helpers.py",
"snippet": "TEST_NAMESPACE = \"test\""
},
{
"identifier": "DummyNodeComponent",
"path": "oper8/test_helpers/helpers.py",
"snippet": "class DummyNodeComponent(DummyComponentBase):\n \"\"\"\n Configurable dummy component which will create an abritrary set of\n resource node instances.\n \"\"\"\n\n def __init__(self, session, *args, **kwargs):\n \"\"\"Construct with the additional option to fail build_chart\"\"\"\n super().__init__(*args, session=session, **kwargs)\n self._add_resources(self, session)"
},
{
"identifier": "MockDeployManager",
"path": "oper8/test_helpers/helpers.py",
"snippet": "class MockDeployManager(DryRunDeployManager):\n \"\"\"The MockDeployManager wraps a standard DryRunDeployManager and adds\n configuration options to simulate failures in each of its operations.\n \"\"\"\n\n def __init__(\n self,\n deploy_fail=False,\n deploy_raise=False,\n disable_fail=False,\n disable_raise=False,\n get_state_fail=False,\n get_state_raise=False,\n set_status_fail=False,\n set_status_raise=False,\n auto_enable=True,\n resources=None,\n resource_dir=None,\n **kwargs,\n ):\n \"\"\"This DeployManager can be configured to have various failure cases\n and will mock the state of the cluster so that get_object_current_state\n will pull its information from the local dict.\n \"\"\"\n\n # Add apiVersion to resources that are missing it, then initialize the\n # dry run manager\n\n resources = resources or []\n # Parse pre-populated resources if needed\n resources = resources + (RunOperatorCmd._parse_resource_dir(resource_dir))\n\n for resource in resources:\n resource.setdefault(\"apiVersion\", \"v1\")\n super().__init__(resources, **kwargs)\n\n self.deploy_fail = \"assert\" if deploy_raise else deploy_fail\n self.disable_fail = \"assert\" if disable_raise else disable_fail\n self.get_state_fail = \"assert\" if get_state_raise else get_state_fail\n self.set_status_fail = \"assert\" if set_status_raise else set_status_fail\n\n # If auto-enabling, turn the mocks on now\n if auto_enable:\n self.enable_mocks()\n\n #######################\n ## Helpers for Tests ##\n #######################\n\n def enable_mocks(self):\n \"\"\"Turn the mocks on\"\"\"\n self.deploy = mock.Mock(\n side_effect=get_failable_method(\n self.deploy_fail, super().deploy, (False, False)\n )\n )\n self.disable = mock.Mock(\n side_effect=get_failable_method(\n self.disable_fail, super().disable, (False, False)\n )\n )\n self.get_object_current_state = mock.Mock(\n side_effect=get_failable_method(\n self.get_state_fail, super().get_object_current_state, (False, None)\n )\n )\n self.set_status = mock.Mock(\n side_effect=get_failable_method(\n self.set_status_fail, super().set_status, (False, False)\n )\n )\n\n def get_obj(self, kind, name, namespace=None, api_version=None):\n return self.get_object_current_state(kind, name, namespace, api_version)[1]\n\n def has_obj(self, *args, **kwargs):\n return self.get_obj(*args, **kwargs) is not None"
},
{
"identifier": "configure_logging",
"path": "oper8/test_helpers/helpers.py",
"snippet": "def configure_logging():\n alog.configure(\n os.environ.get(\"LOG_LEVEL\", \"off\"),\n os.environ.get(\"LOG_FILTERS\", \"\"),\n formatter=\"json\"\n if os.environ.get(\"LOG_JSON\", \"\").lower() == \"true\"\n else \"pretty\",\n thread_id=os.environ.get(\"LOG_THREAD_ID\", \"\").lower() == \"true\",\n )"
},
{
"identifier": "library_config",
"path": "oper8/test_helpers/helpers.py",
"snippet": "@contextmanager\ndef library_config(**config_overrides):\n \"\"\"This context manager sets library config values temporarily and reverts\n them on completion\n \"\"\"\n # Override the configs and hang onto the old values\n old_vals = {}\n for key, val in config_overrides.items():\n if key in config_detail_dict:\n old_vals[key] = config_detail_dict[key]\n config_detail_dict[key] = val\n\n # Yield to the context\n yield\n\n # Revert to the old values\n for key in config_overrides:\n if key in old_vals:\n config_detail_dict[key] = old_vals[key]\n else:\n del config_detail_dict[key]"
},
{
"identifier": "make_patch",
"path": "oper8/test_helpers/helpers.py",
"snippet": "def make_patch(\n patch_type,\n body,\n name=\"test\",\n target=None,\n namespace=TEST_NAMESPACE,\n api_version=\"org.oper8/v1\",\n kind=\"TemporaryPatch\",\n):\n \"\"\"Make a sample TemporaryPatch resource body\"\"\"\n target = target or {}\n patch_obj = {\n \"apiVersion\": api_version,\n \"kind\": kind,\n \"metadata\": {\"name\": name},\n \"spec\": {\n \"apiVersion\": target.get(\"apiVersion\", \"fake\"),\n \"kind\": target.get(\"kind\", \"fake\"),\n \"name\": target.get(\"metadata\", {}).get(\"name\", \"fake\"),\n \"patchType\": patch_type,\n \"patch\": body,\n },\n }\n if namespace is not None:\n patch_obj[\"metadata\"][\"namespace\"] = namespace\n return aconfig.Config(\n patch_obj,\n override_env_vars=False,\n )"
},
{
"identifier": "setup_session",
"path": "oper8/test_helpers/helpers.py",
"snippet": "def setup_session(\n version=\"1.2.3\",\n app_config=None,\n deploy_config=None,\n full_cr=None,\n deploy_manager=None,\n namespace=TEST_NAMESPACE,\n deploy_initial_cr=True,\n **kwargs,\n):\n app_config = app_config or aconfig.Config({}, override_env_vars=False)\n deploy_config = deploy_config or aconfig.Config({}, override_env_vars=False)\n full_cr = full_cr or setup_cr(\n deploy_config=deploy_config, version=version, namespace=namespace\n )\n if not deploy_manager:\n deploy_manager = (\n MockDeployManager(resources=[full_cr])\n if deploy_initial_cr\n else MockDeployManager()\n )\n\n return Session(\n reconciliation_id=str(uuid.uuid4()),\n cr_manifest=full_cr,\n config=app_config,\n deploy_manager=deploy_manager,\n **kwargs,\n )"
},
{
"identifier": "merge_configs",
"path": "oper8/utils.py",
"snippet": "def merge_configs(base, overrides) -> dict:\n \"\"\"Helper to perform a deep merge of the overrides into the base. The merge\n is done in place, but the resulting dict is also returned for convenience.\n\n The merge logic is quite simple: If both the base and overrides have a key\n and the type of the key for both is a dict, recursively merge, otherwise\n set the base value to the override value.\n\n Args:\n base: dict\n The base config that will be updated with the overrides\n overrides: dict\n The override config\n\n Returns:\n merged: dict\n The merged results of overrides merged onto base\n \"\"\"\n for key, value in overrides.items():\n if (\n key not in base\n or not isinstance(base[key], dict)\n or not isinstance(value, dict)\n ):\n base[key] = value\n else:\n base[key] = merge_configs(base[key], value)\n\n return base"
}
] | import os
import tempfile
import pytest
import yaml
import alog
from oper8 import constants
from oper8.component import Component, ManagedObject
from oper8.constants import TEMPORARY_PATCHES_ANNOTATION_NAME
from oper8.deploy_manager.dry_run_deploy_manager import DryRunDeployManager
from oper8.deploy_manager.owner_references import _make_owner_reference
from oper8.patch import STRATEGIC_MERGE_PATCH
from oper8.test_helpers.helpers import (
TEST_NAMESPACE,
DummyNodeComponent,
MockDeployManager,
configure_logging,
library_config,
make_patch,
setup_session,
)
from oper8.utils import merge_configs | 7,332 | """
Test the implementations of the default functions in Component
"""
# Standard
# Third Party
# First Party
# Local
configure_logging()
log = alog.use_channel("TEST")
def get_comp_type(name="dummy"):
"""Paramterization helper to get test both standard and legacy components.
This function also wraps the output class type so that name class attributes
are not polluted.
"""
given_name = name
class Wrapped(DummyNodeComponent):
name = given_name
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
return Wrapped
################################################################################
## Tests #######################################################################
################################################################################
##################
## Construction ##
##################
def test_name_validation():
"""Validate that the name validation passes for a class with a valid
name class attribute
"""
class NamedComponent(Component):
name = "foo"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build_chart(self, session):
pass
class UnnamedComponent(Component):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build_chart(self, session):
pass
session = setup_session()
component = NamedComponent(session=session)
assert component.name == NamedComponent.name
with pytest.raises(NotImplementedError):
UnnamedComponent(session=session)
def test_scope():
"""Validate that the scope given to the constructor gets wired into the
component's hierarchy correctly
"""
session = setup_session()
comp = get_comp_type()(session=session)
assert session.get_component(comp.name)
##################
## render_chart ##
##################
def test_managed_objects():
"""Make sure managed_objects matches the objects generated by the
chart
"""
session = setup_session()
comp = get_comp_type()(
session=session,
api_objects=[
("bar", {"kind": "Foo", "metadata": {"name": "bar"}}),
("bat", {"kind": "Baz", "metadata": {"name": "bat"}}),
],
)
comp.render_chart(session)
managed_objs = comp.managed_objects
assert len(managed_objs) == 2
assert managed_objs[0].kind == "Foo"
assert managed_objs[1].kind == "Baz"
def test_apply_patches_ok():
"""Make sure applying patches modifies the managed objects as expected"""
patch_name = "test"
| """
Test the implementations of the default functions in Component
"""
# Standard
# Third Party
# First Party
# Local
configure_logging()
log = alog.use_channel("TEST")
def get_comp_type(name="dummy"):
"""Paramterization helper to get test both standard and legacy components.
This function also wraps the output class type so that name class attributes
are not polluted.
"""
given_name = name
class Wrapped(DummyNodeComponent):
name = given_name
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
return Wrapped
################################################################################
## Tests #######################################################################
################################################################################
##################
## Construction ##
##################
def test_name_validation():
"""Validate that the name validation passes for a class with a valid
name class attribute
"""
class NamedComponent(Component):
name = "foo"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build_chart(self, session):
pass
class UnnamedComponent(Component):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build_chart(self, session):
pass
session = setup_session()
component = NamedComponent(session=session)
assert component.name == NamedComponent.name
with pytest.raises(NotImplementedError):
UnnamedComponent(session=session)
def test_scope():
"""Validate that the scope given to the constructor gets wired into the
component's hierarchy correctly
"""
session = setup_session()
comp = get_comp_type()(session=session)
assert session.get_component(comp.name)
##################
## render_chart ##
##################
def test_managed_objects():
"""Make sure managed_objects matches the objects generated by the
chart
"""
session = setup_session()
comp = get_comp_type()(
session=session,
api_objects=[
("bar", {"kind": "Foo", "metadata": {"name": "bar"}}),
("bat", {"kind": "Baz", "metadata": {"name": "bat"}}),
],
)
comp.render_chart(session)
managed_objs = comp.managed_objects
assert len(managed_objs) == 2
assert managed_objs[0].kind == "Foo"
assert managed_objs[1].kind == "Baz"
def test_apply_patches_ok():
"""Make sure applying patches modifies the managed objects as expected"""
patch_name = "test" | patch = make_patch( | 11 | 2023-11-15 16:43:29+00:00 | 12k |
smrfeld/tsmixer-pytorch | main.py | [
{
"identifier": "plot_preds",
"path": "utils/plotting.py",
"snippet": "def plot_preds(preds: List[List[float]], preds_gt: List[List[float]], no_feats_plot: int, fname_save: Optional[str] = None, inputs: Optional[List[List[float]]] = None, show: bool = True):\n \"\"\"Plot predictions\n\n Args:\n preds (List[List[float]]): Predictions of shape (no_samples, no_feats)\n preds_gt (List[List[float]]): Predictions of shape (no_samples, no_feats)\n no_feats_plot (int): Number of features to plot\n fname_save (Optional[str], optional): File name to save the plot. Defaults to None.\n inputs (Optional[List[List[float]]], optional): Input of shape (no_samples, no_feats)\n show (bool): Show the plot\n \"\"\" \n import plotly.graph_objects as go\n from plotly.subplots import make_subplots\n\n no_feats = len(preds[0])\n if no_feats_plot > no_feats:\n logger.warning(f\"no_feats_plot ({no_feats_plot}) is larger than no_feats ({no_feats}). Setting no_feats_plot to no_feats\")\n no_feats_plot = no_feats\n\n no_cols = 3\n no_rows = int(no_feats_plot / no_cols)\n if no_feats_plot % no_cols != 0:\n no_rows += 1\n\n fig = make_subplots(rows=no_rows, cols=no_cols, subplot_titles=[f\"Feature {ifeat}\" for ifeat in range(no_feats_plot)])\n\n no_inputs = len(inputs) if inputs is not None else 0\n x_preds = list(range(no_inputs, no_inputs + len(preds)))\n for ifeat in range(no_feats_plot):\n row = int(ifeat / no_cols) + 1\n col = (ifeat % no_cols) + 1\n\n if inputs is not None:\n x_inputs = list(range(len(inputs)))\n fig.add_trace(go.Scatter(x=x_inputs, y=[in_y[ifeat] for in_y in inputs], mode=\"lines\", name=f\"Inputs\", line=dict(color=\"black\"), showlegend=ifeat==0), row=row, col=col)\n\n fig.add_trace(go.Scatter(x=x_preds, y=[pred[ifeat] for pred in preds_gt], mode=\"lines\", name=f\"Ground truth\", line=dict(color=\"red\"), showlegend=ifeat==0), row=row, col=col)\n fig.add_trace(go.Scatter(x=x_preds, y=[pred[ifeat] for pred in preds], mode=\"lines\", name=f\"Model\", line=dict(color=\"blue\"), showlegend=ifeat==0), row=row, col=col)\n\n fig.update_layout(\n height=300*no_rows, \n width=400*no_cols, \n title_text=\"Predictions\",\n font=dict(size=18),\n xaxis_title_text=\"Time\",\n yaxis_title_text=\"Signal\",\n )\n\n if fname_save is not None:\n fig.write_image(fname_save)\n logger.info(f\"Saved plot to {fname_save}\")\n\n if show:\n fig.show()\n\n return fig"
},
{
"identifier": "plot_loss",
"path": "utils/plotting.py",
"snippet": "def plot_loss(train_data: TrainingMetadata, fname_save: Optional[str] = None, show: bool = True):\n \"\"\"Plot loss\n\n Args:\n train_data (TSMixer.TrainingMetadata): Training metadata\n fname_save (Optional[str], optional): File name to save the plot. Defaults to None.\n show (bool): Show the plot\n \"\"\" \n import plotly.graph_objects as go\n\n fig = go.Figure()\n x = [ epoch for epoch in train_data.epoch_to_data.keys() ]\n y = [ data.val_loss for data in train_data.epoch_to_data.values() ]\n fig.add_trace(go.Scatter(x=x, y=y, mode=\"lines\", name=\"Val. loss\"))\n y = [ data.train_loss for data in train_data.epoch_to_data.values() ]\n fig.add_trace(go.Scatter(x=x, y=y, mode=\"lines\", name=\"Train loss\"))\n\n fig.update_layout(\n height=500, \n width=700, \n title_text=\"Loss during training\",\n xaxis_title_text=\"Epoch\",\n yaxis_title_text=\"Loss\",\n font=dict(size=18),\n )\n\n if fname_save is not None:\n fig.write_image(fname_save)\n logger.info(f\"Saved plot to {fname_save}\")\n\n if show:\n fig.show()\n\n return fig"
},
{
"identifier": "TSMixerConf",
"path": "utils/tsmixer_conf.py",
"snippet": "class TSMixerConf(DataClassDictMixin):\n\n class Initialize(Enum):\n FROM_LATEST_CHECKPOINT = \"from-latest-checkpoint\"\n \"Load the model from the latest checkpoint\"\n\n FROM_BEST_CHECKPOINT = \"from-best-checkpoint\"\n \"Load the model from the best checkpoint\"\n\n FROM_SCRATCH = \"from-scratch\"\n \"Initialize the model from scratch\"\n\n class DataSrc(Enum):\n\n CSV_FILE = \"csv-file\"\n \"Load the dataset from a CSV file\"\n\n class ValidationSplit(Enum):\n \n TEMPORAL_HOLDOUT = \"temporal-holdout\"\n \"Reserve the last portion (e.g., 10-20%) of your time-ordered data for validation, and use the remaining data for training. This is a simple and widely used approach.\"\n\n output_dir: str\n \"Directory where to save checkpoints and generated images\"\n\n input_length: int\n \"Number of time steps to use as input\"\n\n no_features: int\n \"Number of features in the dataset\"\n\n no_mixer_layers: int\n \"Number of mixer layers\"\n\n prediction_length: int\n \"Number of time steps to predict\"\n\n data_src: DataSrc\n \"Where to load the dataset from\"\n\n device: str = \"mps\"\n \"Device to use for training\"\n\n data_src_csv: Optional[str] = None\n \"Path to the CSV file to load the dataset from. Only used if data_src is CSV_FILE\"\n\n batch_size: int = 64\n \"Batch size\"\n\n shuffle: bool = True\n \"Shuffle the data\"\n\n num_epochs: int = 10\n \"Number of epochs to train for\"\n\n learning_rate: float = 0.001\n \"Learning rate\"\n\n optimizer: str = \"Adam\"\n \"Optimizer to use\"\n\n random_seed: int = 42\n \"Random seed for reproducibility\"\n\n validation_split: ValidationSplit = ValidationSplit.TEMPORAL_HOLDOUT\n \"How to split the data into training and validation\"\n\n validation_split_holdout: float = 0.2\n \"Use the last X% of the data for validation. Only used for TEMPORAL_HOLDOUT\"\n\n initialize: Initialize = Initialize.FROM_SCRATCH\n \"How to initialize the model\"\n\n dropout: float = 0.5\n \"Dropout\"\n\n feat_mixing_hidden_channels: Optional[int] = None\n \"Number of hidden channels in the feature mixing MLP. If None, uses same as input features.\"\n\n early_stopping_patience: Optional[int] = 5\n \"Early stopping patience. If the validation loss does not improve over this many epochs, stop early. If None, no early stopping is used.\"\n\n @property\n def image_dir(self):\n makedirs(self.output_dir)\n makedirs(os.path.join(self.output_dir, \"images\"))\n return os.path.join(self.output_dir, \"images\")\n\n @property\n def checkpoint_init(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"init.pth\")\n\n @property\n def checkpoint_best(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"best.pth\")\n\n @property\n def checkpoint_latest(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"latest.pth\")\n\n @property\n def train_progress_json(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"loss.json\")\n\n @property\n def pred_val_dataset_json(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"pred_val_dataset.json\")\n\n @property\n def data_norm_json(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"data_norm.json\")\n\n def check_valid(self):\n assert 0 <= self.validation_split_holdout <= 1, \"validation_split_holdout must be between 0 and 1\"\n\n # Check device exists\n import torch\n assert self.device in [\"cpu\", \"cuda\", \"cuda:0\", \"cuda:1\", \"cuda:2\", \"cuda:3\", \"mps\"], f\"Device {self.device} not supported\"\n if self.device == \"cuda\":\n assert torch.cuda.is_available(), \"CUDA is not available\"\n assert torch.cuda.device_count() > 1, \"Must have more than one CUDA device to use MPS\"\n elif self.device == \"mps\":\n assert torch.backends.mps.is_available(), \"MPS is not available\"\n \n\n def load_training_metadata_or_new(self, epoch_start: Optional[int] = None) -> \"TrainingMetadata\":\n \"\"\"Load the training progress from a JSON file, or create a new one\n\n Args:\n epoch_start (Optional[int], optional): Starting epoch - earlier epochs will be removed if not None. Defaults to None.\n\n Returns:\n TrainProgress: Training metadata\n \"\"\" \n if os.path.exists(self.train_progress_json):\n with open(self.train_progress_json, \"r\") as f:\n tp = TrainingMetadata.from_dict(json.load(f))\n\n # Remove epochs after epoch_start\n if epoch_start is not None:\n tp.epoch_to_data = { epoch: tp.epoch_to_data[epoch] for epoch in tp.epoch_to_data if epoch < epoch_start }\n \n return tp\n else:\n return TrainingMetadata(epoch_to_data={})\n\n\n def write_data_norm(self, data_norm: DataNormalization):\n \"\"\"Write the data normalization to a JSON file\n\n Args:\n data_norm (DataNormalization): Data normalization\n \"\"\" \n with open(self.data_norm_json, \"w\") as f:\n json.dump(data_norm.to_dict(), f, indent=3)\n logger.debug(f\"Saved data normalization to {f.name}\")\n\n\n def write_training_metadata(self, train_data: \"TrainingMetadata\"):\n \"\"\"Write the training progress to a JSON file\n\n Args:\n train_data (TrainingMetadata): _description_\n \"\"\" \n if os.path.dirname(self.train_progress_json) != \"\":\n makedirs(os.path.dirname(self.train_progress_json))\n with open(self.train_progress_json, \"w\") as f:\n json.dump(train_data.to_dict(), f, indent=3)\n\n\n def create_data_loaders_train_val(self, data_norm: Optional[DataNormalization] = None) -> Tuple[DataLoader, DataLoader, DataNormalization]:\n \"\"\"Create the training and validation data loaders\n\n Args:\n data_norm (Optional[DataNormalization], optional): Data normalization to use, otherwise will be calculated. Defaults to None.\n\n Returns:\n Tuple[DataLoader, DataLoader, DataNormalization]: Training and validation data loaders\n \"\"\" \n\n if self.data_src == self.DataSrc.CSV_FILE:\n assert self.data_src_csv is not None, \"data_src_csv must be set if data_src is CSV_FILE\"\n\n from .load_csv import load_csv_dataset, ValidationSplit\n return load_csv_dataset(\n csv_file=self.data_src_csv,\n batch_size=self.batch_size,\n input_length=self.input_length,\n prediction_length=self.prediction_length,\n val_split=ValidationSplit(self.validation_split.value),\n val_split_holdout=self.validation_split_holdout,\n shuffle=self.shuffle,\n data_norm_exist=data_norm\n )\n else:\n raise NotImplementedError(f\"data_src {self.data_src} not implemented\")"
},
{
"identifier": "TSMixerGridSearch",
"path": "utils/tsmixer_grid_search_conf.py",
"snippet": "class TSMixerGridSearch(DataClassDictMixin):\n \"\"\"Configuration for grid search\n \"\"\" \n\n @dataclass\n class ParamRange(DataClassDictMixin):\n \n learning_rates: List[float]\n \"Learning rates\"\n\n no_mixer_layers: List[int]\n \"Number of mixer layers\"\n\n dropouts: List[float]\n \"Dropout\"\n\n input_lengths: List[int]\n \"Number of time steps to use as input\"\n\n prediction_lengths: List[int]\n \"Number of time steps to predict\"\n\n feat_mixing_hidden_channels: List[Optional[int]] = field(default_factory=lambda: [None])\n \"Number of hidden channels in the feature mixing MLP. If None, uses same as input features.\"\n\n batch_sizes: List[int] = field(default_factory=lambda: [64])\n \"Batch size\"\n\n num_epochs: List[int] = field(default_factory=lambda: [100])\n \"Number of epochs to train for\"\n\n optimizers: List[str] = field(default_factory=lambda: [\"Adam\"])\n \"Optimizer to use\"\n\n @property\n def options_str(self) -> str:\n s = []\n s.append((\"lr\",str(self.learning_rates)))\n s.append((\"nmix\",str(self.no_mixer_layers)))\n s.append((\"drop\",str(self.dropouts)))\n s.append((\"in\",str(self.input_lengths)))\n s.append((\"pred\",str(self.prediction_lengths)))\n s.append((\"hidden\",str(self.feat_mixing_hidden_channels)))\n s.append((\"batch\",str(self.batch_sizes)))\n s.append((\"epochs\",str(self.num_epochs)))\n s.append((\"opt\",str(self.optimizers)))\n\n # Sort by key\n s = sorted(s, key=lambda x: x[0])\n\n return \"_\".join([f\"{k}{v}\" for k,v in s])\n\n param_ranges: List[ParamRange]\n \"Any number of parameter ranges to try\"\n\n output_dir: str\n \"Output directory\"\n\n no_features: int\n \"Number of features in the dataset\"\n\n data_src: TSMixerConf.DataSrc = TSMixerConf.DataSrc.CSV_FILE\n \"Where to load the dataset from\"\n\n data_src_csv: Optional[str] = None\n \"Path to the CSV file to load the dataset from. Only used if data_src is CSV_FILE\"\n\n def iterate(self) -> Iterator[TSMixerConf]:\n \"\"\"Iterate over all configurations\n\n Yields:\n Iterator[TSMixerConf]: Configuration for a single run\n \"\"\" \n for idx,param_range in enumerate(self.param_ranges):\n logger.info(\"===========================================\")\n logger.info(f\"Grid search iteration {idx+1}/{len(self.param_ranges)}\")\n logger.info(\"===========================================\")\n\n for learning_rate in param_range.learning_rates:\n for no_mixer_layers in param_range.no_mixer_layers:\n for dropout in param_range.dropouts:\n for feat_mixing_hidden_channels in param_range.feat_mixing_hidden_channels:\n for input_length in param_range.input_lengths:\n for prediction_length in param_range.prediction_lengths:\n for batch_size in param_range.batch_sizes:\n for num_epochs in param_range.num_epochs:\n for optimizer in param_range.optimizers:\n # Output subdir\n output_dir = os.path.join(self.output_dir, param_range.options_str)\n conf = TSMixerConf(\n input_length=input_length,\n prediction_length=prediction_length,\n no_features=self.no_features,\n no_mixer_layers=no_mixer_layers,\n output_dir=output_dir,\n data_src=self.data_src,\n data_src_csv=self.data_src_csv,\n batch_size=batch_size,\n num_epochs=num_epochs,\n learning_rate=learning_rate,\n optimizer=optimizer,\n dropout=dropout,\n feat_mixing_hidden_channels=feat_mixing_hidden_channels\n )\n logger.info(f\"TSMixer config: {conf}\")\n logger.info(f\"Output sub-dir: {output_dir}\")\n yield conf"
},
{
"identifier": "TSMixer",
"path": "utils/tsmixer.py",
"snippet": "class TSMixer:\n \"\"\"TSMixer including training and prediction methods\n \"\"\" \n\n\n def __init__(self, conf: TSMixerConf):\n \"\"\"Constructor for TSMixer class\n\n Args:\n conf (TSMixerConf): Configuration\n \"\"\" \n conf.check_valid()\n self.conf = conf\n\n # Create the model\n self.model = TSMixerModel(\n input_length=self.conf.input_length,\n forecast_length=self.conf.prediction_length,\n no_feats=self.conf.no_features,\n feat_mixing_hidden_channels=self.conf.feat_mixing_hidden_channels or self.conf.no_features,\n no_mixer_layers=self.conf.no_mixer_layers,\n dropout=self.conf.dropout\n )\n\n # Move to device\n self.model.to(self.conf.device)\n\n # Load the model\n if self.conf.initialize == self.conf.Initialize.FROM_LATEST_CHECKPOINT:\n self.load_checkpoint(fname=self.conf.checkpoint_latest)\n elif self.conf.initialize == self.conf.Initialize.FROM_BEST_CHECKPOINT:\n self.load_checkpoint(fname=self.conf.checkpoint_best)\n elif self.conf.initialize == self.conf.Initialize.FROM_SCRATCH:\n pass\n else:\n raise NotImplementedError(f\"Initialize {self.conf.initialize} not implemented\")\n\n\n def load_checkpoint(self, fname: str, optimizer: Optional[torch.optim.Optimizer] = None) -> Tuple[int,float]:\n \"\"\"Load a checkpoint, optionally including the optimizer state\n\n Args:\n fname (str): File name\n optimizer (Optional[torch.optim.Optimizer], optional): Optimizer to update from checkpoint. Defaults to None.\n\n Returns:\n Tuple[int,float]: Epoch and loss\n \"\"\" \n logger.debug(f\"Loading model weights from {fname}\")\n checkpoint = torch.load(fname)\n self.model.load_state_dict(checkpoint['model_state_dict'])\n\n if optimizer is not None:\n logger.debug(f\"Loading optimizer state from {fname}\")\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n epoch = checkpoint['epoch']\n loss = checkpoint['loss']\n logger.info(f\"Loaded optimizer state from epoch {epoch} with loss {loss}\")\n return epoch, loss\n\n\n def predict(self, batch_input: torch.Tensor) -> torch.Tensor:\n \"\"\"Predict the output for a batch of input data\n\n Args:\n batch_input (torch.Tensor): Input data of shape (batch_size, input_length (time), no_features)\n\n Returns:\n torch.Tensor: Predicted output of shape (batch_size, prediction_length (time), no_features)\n \"\"\" \n self.model.eval()\n\n # Check size\n assert batch_input.shape[1] == self.conf.input_length, f\"Input length {batch_input.shape[1]} does not match configuration {self.conf.input_length}\"\n assert batch_input.shape[2] == self.conf.no_features, f\"Number of features {batch_input.shape[2]} does not match configuration {self.conf.no_features}\"\n\n # Predict\n batch_input = batch_input.to(self.conf.device)\n with torch.no_grad():\n batch_pred_hat = self.model(batch_input)\n return batch_pred_hat\n\n\n def load_data_norm(self) -> Optional[DataNormalization]:\n \"\"\"Load the data normalization from a JSON file\n\n Returns:\n Optional[DataNormalization]: Data normalization, or None if the file does not exist\n \"\"\" \n\n if os.path.exists(self.conf.data_norm_json):\n logger.debug(f\"Loading data normalization from {self.conf.data_norm_json}\")\n with open(self.conf.data_norm_json, \"r\") as f:\n return DataNormalization.from_dict(json.load(f))\n else:\n return None\n\n\n @dataclass\n class PredData(DataClassDictMixin):\n \"\"\"Prediction data\n \"\"\" \n\n pred_gt: List[List[float]]\n \"Ground truth prediction\"\n\n pred: List[List[float]]\n \"Model prediction\"\n\n inputs: Optional[List[List[float]]] = None\n \"Inputs\"\n\n\n def predict_val_dataset(self, max_samples: Optional[int] = None, save_inputs: bool = False) -> List[PredData]:\n \"\"\"Predict on the validation dataset\n\n Args:\n max_samples (Optional[int], optional): Maximum number of samples to predict from the validation dataset. Defaults to None.\n save_inputs (bool, optional): Save the inputs as well as the predictions. Defaults to False.\n\n Returns:\n List[PredData]: List of predictions\n \"\"\" \n\n # Change batch size to 1 and not shuffle data for consistency\n batch_size_save = self.conf.batch_size\n shuffle_save = self.conf.shuffle\n self.conf.batch_size = 1\n self.conf.shuffle = False\n\n # Load the data normalization if it exists and use it\n data_norm = self.load_data_norm()\n\n # Create the loaders\n _, loader_val, _ = self.conf.create_data_loaders_train_val(data_norm)\n \n # Predict\n data_list: List[TSMixer.PredData] = []\n for _ in tqdm(range(max_samples or len(loader_val)), desc=\"Predicting\"):\n batch_input, batch_pred = next(iter(loader_val))\n batch_pred_hat = self.predict(batch_input)\n data = TSMixer.PredData(\n pred_gt=batch_pred.tolist()[0],\n pred=batch_pred_hat.tolist()[0],\n inputs=batch_input.tolist()[0] if save_inputs else None\n )\n data_list.append(data) \n\n # Save data to json\n with open(self.conf.pred_val_dataset_json, \"w\") as f:\n json.dump([ d.to_dict() for d in data_list ], f)\n logger.info(f\"Saved data to {f.name}\")\n\n # Reset options\n self.conf.batch_size = batch_size_save\n self.conf.shuffle = shuffle_save\n\n return data_list\n\n\n def train(self):\n \"\"\"Train the model\n \"\"\" \n\n # Create the optimizer\n optimizer_cls = getattr(torch.optim, self.conf.optimizer)\n optimizer = optimizer_cls(self.model.parameters(), lr=self.conf.learning_rate)\n\n # Load if needed\n if self.conf.initialize == self.conf.Initialize.FROM_LATEST_CHECKPOINT:\n epoch_start, val_loss_best = self.load_checkpoint(fname=self.conf.checkpoint_latest, optimizer=optimizer)\n data_norm = self.load_data_norm()\n elif self.conf.initialize == self.conf.Initialize.FROM_BEST_CHECKPOINT:\n epoch_start, val_loss_best = self.load_checkpoint(fname=self.conf.checkpoint_best, optimizer=optimizer)\n data_norm = self.load_data_norm()\n elif self.conf.initialize == self.conf.Initialize.FROM_SCRATCH:\n epoch_start, val_loss_best = 0, float(\"inf\")\n\n # Clear the output directory\n if os.path.exists(self.conf.output_dir):\n logger.warning(f\"Output directory {self.conf.output_dir} already exists. Deleting it to start over. You have 8 seconds.\")\n for _ in range(8):\n print(\".\", end=\"\", flush=True)\n time.sleep(1)\n print(\"\")\n shutil.rmtree(self.conf.output_dir)\n makedirs(self.conf.output_dir)\n\n # Save initial weights\n self._save_checkpoint(epoch=epoch_start, optimizer=optimizer, loss=val_loss_best, fname=self.conf.checkpoint_init)\n data_norm = None\n\n # Copy the config to the output directory for reference\n fname_conf = os.path.join(self.conf.output_dir, \"conf.yml\")\n makedirs(self.conf.output_dir)\n with open(fname_conf, \"w\") as f:\n yaml.dump(self.conf.to_dict(), f, indent=3)\n logger.info(f\"Saved configuration to {f.name}\")\n \n else:\n raise NotImplementedError(f\"Initialize {self.conf.initialize} not implemented\")\n train_data = self.conf.load_training_metadata_or_new(epoch_start)\n\n # Create the loaders\n loader_train, loader_val, data_norm = self.conf.create_data_loaders_train_val(data_norm)\n\n # Write data normalization\n self.conf.write_data_norm(data_norm)\n\n # Train\n epoch_last_improvement = None\n for epoch in range(epoch_start, self.conf.num_epochs):\n logger.info(f\"Epoch {epoch+1}/{self.conf.num_epochs}\")\n t0 = time.time()\n\n # Training\n train_loss = 0\n for batch_input, batch_pred in tqdm(loader_train, desc=\"Training batches\"):\n batch_input, batch_pred = batch_input.to(self.conf.device), batch_pred.to(self.conf.device)\n train_loss += self._train_step(batch_input, batch_pred, optimizer)\n\n # Validation loss\n self.model.eval()\n with torch.no_grad():\n val_loss = 0\n for batch_input, batch_pred in tqdm(loader_val, desc=\"Validation batches\"):\n batch_input, batch_pred = batch_input.to(self.conf.device), batch_pred.to(self.conf.device)\n val_loss += self._compute_loss(batch_input, batch_pred).item()\n\n # Log\n train_loss /= len(loader_train)\n val_loss /= len(loader_val)\n dur = time.time() - t0\n logger.info(f\"Training loss: {train_loss:.5f} val: {val_loss:.5f} duration: {dur:.2f}s\")\n\n # Store metadata about training\n train_data.epoch_to_data[epoch] = TrainingMetadata.EpochData(epoch=epoch, train_loss=train_loss, val_loss=val_loss, duration_seconds=dur)\n\n # Save checkpoint\n if val_loss < val_loss_best:\n logger.info(f\"New best validation loss: {val_loss:.5f}\")\n self._save_checkpoint(epoch=epoch, optimizer=optimizer, loss=val_loss, fname=self.conf.checkpoint_best)\n val_loss_best = val_loss\n epoch_last_improvement = epoch\n self._save_checkpoint(epoch=epoch, optimizer=optimizer, loss=val_loss, fname=self.conf.checkpoint_latest)\n self.conf.write_training_metadata(train_data)\n\n # Early stopping\n if epoch_last_improvement is not None and self.conf.early_stopping_patience is not None and epoch - epoch_last_improvement >= self.conf.early_stopping_patience:\n logger.info(f\"Stopping early after {epoch - epoch_last_improvement} epochs without improvement in validation loss.\")\n break\n\n\n def _save_checkpoint(self, epoch: int, optimizer: torch.optim.Optimizer, loss: float, fname: str):\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': loss,\n }, fname)\n\n\n def _compute_loss(self, batch_input: torch.Tensor, batch_pred: torch.Tensor) -> torch.Tensor:\n \"\"\"Compute the loss\n\n Args:\n batch_input (torch.Tensor): Batch input of shape (batch_size, input_length (time), no_features)\n batch_pred (torch.Tensor): Batch prediction of shape (batch_size, prediction_length (time), no_features)\n\n Returns:\n torch.Tensor: Loss (MSE)\n \"\"\" \n\n # Forward pass\n batch_pred_hat = self.model(batch_input)\n\n # Compute MSE loss\n loss = torch.nn.functional.mse_loss(batch_pred_hat, batch_pred)\n\n # Normalize the loss by the batch size\n # batch_size = batch_input.size(0)\n # loss /= batch_size\n\n return loss\n\n\n def _train_step(self, batch_input: torch.Tensor, batch_pred: torch.Tensor, optimizer: torch.optim.Optimizer) -> float:\n \"\"\"Training step\n\n Args:\n batch_input (torch.Tensor): Input data of shape (batch_size, input_length (time), no_features)\n batch_pred (torch.Tensor): Prediction data of shape (batch_size, prediction_length (time), no_features)\n optimizer (torch.optim.Optimizer): Optimizer\n\n Returns:\n float: Loss (MSE)\n \"\"\" \n optimizer.zero_grad()\n\n # Train mode\n self.model.train()\n\n # Loss\n loss = self._compute_loss(batch_input, batch_pred)\n\n # Backward pass\n loss.backward()\n\n # Update parameters\n optimizer.step()\n\n return loss.item()"
}
] | from utils import TSMixer, plot_preds, plot_loss, TSMixerConf, TSMixerGridSearch
import argparse
import yaml
import os | 7,320 |
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--command", type=str, required=True, choices=["train", "predict", "loss", "grid-search"], help="Command to run")
parser.add_argument("--conf", type=str, required=False, help="Path to the configuration file")
parser.add_argument("--no-feats-plot", type=int, required=False, default=6, help="Number of features to plot")
parser.add_argument("--show", action="store_true", required=False, help="Show plots")
args = parser.parse_args()
if args.command == "train":
# Load configuration
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
tsmixer = TSMixer(conf)
# Train
tsmixer.train()
elif args.command == "predict":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
# Load best checkpoint
conf.initialize = TSMixerConf.Initialize.FROM_BEST_CHECKPOINT
tsmixer = TSMixer(conf)
# Predict on validation dataset
data = tsmixer.predict_val_dataset(max_samples=10, save_inputs=False)
# Plot predictions
data_plt = data[0]
assert args.no_feats_plot is not None, "Must provide number of features to plot"
plot_preds(
preds=data_plt.pred,
preds_gt=data_plt.pred_gt,
no_feats_plot=args.no_feats_plot,
show=args.show,
fname_save=os.path.join(conf.image_dir, "preds.png")
)
elif args.command == "loss":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
train_data = conf.load_training_metadata_or_new()
plot_loss(
train_data=train_data,
show=args.show,
fname_save=os.path.join(conf.image_dir, "loss.png")
)
elif args.command == "grid-search":
# Load configuration
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
|
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--command", type=str, required=True, choices=["train", "predict", "loss", "grid-search"], help="Command to run")
parser.add_argument("--conf", type=str, required=False, help="Path to the configuration file")
parser.add_argument("--no-feats-plot", type=int, required=False, default=6, help="Number of features to plot")
parser.add_argument("--show", action="store_true", required=False, help="Show plots")
args = parser.parse_args()
if args.command == "train":
# Load configuration
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
tsmixer = TSMixer(conf)
# Train
tsmixer.train()
elif args.command == "predict":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
# Load best checkpoint
conf.initialize = TSMixerConf.Initialize.FROM_BEST_CHECKPOINT
tsmixer = TSMixer(conf)
# Predict on validation dataset
data = tsmixer.predict_val_dataset(max_samples=10, save_inputs=False)
# Plot predictions
data_plt = data[0]
assert args.no_feats_plot is not None, "Must provide number of features to plot"
plot_preds(
preds=data_plt.pred,
preds_gt=data_plt.pred_gt,
no_feats_plot=args.no_feats_plot,
show=args.show,
fname_save=os.path.join(conf.image_dir, "preds.png")
)
elif args.command == "loss":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
train_data = conf.load_training_metadata_or_new()
plot_loss(
train_data=train_data,
show=args.show,
fname_save=os.path.join(conf.image_dir, "loss.png")
)
elif args.command == "grid-search":
# Load configuration
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f: | conf_grid_search = TSMixerGridSearch.from_dict(yaml.safe_load(f)) | 3 | 2023-11-18 19:56:18+00:00 | 12k |
Jisencc/yolov5_dual_weighting | utils/dataloaders.py | [
{
"identifier": "Albumentations",
"path": "utils/augmentations.py",
"snippet": "class Albumentations:\n # YOLOv5 Albumentations class (optional, only used if package is installed)\n def __init__(self, size=640):\n self.transform = None\n prefix = colorstr('albumentations: ')\n try:\n import albumentations as A\n check_version(A.__version__, '1.0.3', hard=True) # version requirement\n\n T = [\n A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0),\n A.Blur(p=0.01),\n A.MedianBlur(p=0.01),\n A.ToGray(p=0.01),\n A.CLAHE(p=0.01),\n A.RandomBrightnessContrast(p=0.0),\n A.RandomGamma(p=0.0),\n A.ImageCompression(quality_lower=75, p=0.0)] # transforms\n self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))\n\n LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))\n except ImportError: # package not installed, skip\n pass\n except Exception as e:\n LOGGER.info(f'{prefix}{e}')\n\n def __call__(self, im, labels, p=1.0):\n if self.transform and random.random() < p:\n new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed\n im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])\n return im, labels"
},
{
"identifier": "augment_hsv",
"path": "utils/augmentations.py",
"snippet": "def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):\n # HSV color-space augmentation\n if hgain or sgain or vgain:\n r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains\n hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))\n dtype = im.dtype # uint8\n\n x = np.arange(0, 256, dtype=r.dtype)\n lut_hue = ((x * r[0]) % 180).astype(dtype)\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))\n cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed"
},
{
"identifier": "classify_albumentations",
"path": "utils/augmentations.py",
"snippet": "def classify_albumentations(\n augment=True,\n size=224,\n scale=(0.08, 1.0),\n ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33\n hflip=0.5,\n vflip=0.0,\n jitter=0.4,\n mean=IMAGENET_MEAN,\n std=IMAGENET_STD,\n auto_aug=False):\n # YOLOv5 classification Albumentations (optional, only used if package is installed)\n prefix = colorstr('albumentations: ')\n try:\n import albumentations as A\n from albumentations.pytorch import ToTensorV2\n check_version(A.__version__, '1.0.3', hard=True) # version requirement\n if augment: # Resize and crop\n T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)]\n if auto_aug:\n # TODO: implement AugMix, AutoAug & RandAug in albumentation\n LOGGER.info(f'{prefix}auto augmentations are currently not supported')\n else:\n if hflip > 0:\n T += [A.HorizontalFlip(p=hflip)]\n if vflip > 0:\n T += [A.VerticalFlip(p=vflip)]\n if jitter > 0:\n color_jitter = (float(jitter), ) * 3 # repeat value for brightness, contrast, satuaration, 0 hue\n T += [A.ColorJitter(*color_jitter, 0)]\n else: # Use fixed crop for eval set (reproducibility)\n T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)]\n T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor\n LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))\n return A.Compose(T)\n\n except ImportError: # package not installed, skip\n LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)')\n except Exception as e:\n LOGGER.info(f'{prefix}{e}')"
},
{
"identifier": "classify_transforms",
"path": "utils/augmentations.py",
"snippet": "def classify_transforms(size=224):\n # Transforms to apply if albumentations not installed\n assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)'\n # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])\n return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])"
},
{
"identifier": "copy_paste",
"path": "utils/augmentations.py",
"snippet": "def copy_paste(im, labels, segments, p=0.5):\n # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)\n n = len(segments)\n if p and n:\n h, w, c = im.shape # height, width, channels\n im_new = np.zeros(im.shape, np.uint8)\n for j in random.sample(range(n), k=round(p * n)):\n l, s = labels[j], segments[j]\n box = w - l[3], l[2], w - l[1], l[4]\n ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area\n if (ioa < 0.30).all(): # allow 30% obscuration of existing labels\n labels = np.concatenate((labels, [[l[0], *box]]), 0)\n segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))\n cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED)\n\n result = cv2.flip(im, 1) # augment segments (flip left-right)\n i = cv2.flip(im_new, 1).astype(bool)\n im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug\n\n return im, labels, segments"
},
{
"identifier": "letterbox",
"path": "utils/augmentations.py",
"snippet": "def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):\n # Resize and pad image while meeting stride-multiple constraints\n shape = im.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better val mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = (new_shape[1], new_shape[0])\n ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return im, ratio, (dw, dh)"
},
{
"identifier": "mixup",
"path": "utils/augmentations.py",
"snippet": "def mixup(im, labels, im2, labels2):\n # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf\n r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0\n im = (im * r + im2 * (1 - r)).astype(np.uint8)\n labels = np.concatenate((labels, labels2), 0)\n return im, labels"
},
{
"identifier": "random_perspective",
"path": "utils/augmentations.py",
"snippet": "def random_perspective(im,\n targets=(),\n segments=(),\n degrees=10,\n translate=.1,\n scale=.1,\n shear=10,\n perspective=0.0,\n border=(0, 0)):\n # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))\n # targets = [cls, xyxy]\n\n height = im.shape[0] + border[0] * 2 # shape(h,w,c)\n width = im.shape[1] + border[1] * 2\n\n # Center\n C = np.eye(3)\n C[0, 2] = -im.shape[1] / 2 # x translation (pixels)\n C[1, 2] = -im.shape[0] / 2 # y translation (pixels)\n\n # Perspective\n P = np.eye(3)\n P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)\n P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)\n\n # Rotation and Scale\n R = np.eye(3)\n a = random.uniform(-degrees, degrees)\n # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations\n s = random.uniform(1 - scale, 1 + scale)\n # s = 2 ** random.uniform(-scale, scale)\n R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n\n # Shear\n S = np.eye(3)\n S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)\n S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)\n\n # Translation\n T = np.eye(3)\n T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)\n T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)\n\n # Combined rotation matrix\n M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT\n if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed\n if perspective:\n im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))\n else: # affine\n im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))\n\n # Visualize\n # import matplotlib.pyplot as plt\n # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()\n # ax[0].imshow(im[:, :, ::-1]) # base\n # ax[1].imshow(im2[:, :, ::-1]) # warped\n\n # Transform label coordinates\n n = len(targets)\n if n:\n use_segments = any(x.any() for x in segments) and len(segments) == n\n new = np.zeros((n, 4))\n if use_segments: # warp segments\n segments = resample_segments(segments) # upsample\n for i, segment in enumerate(segments):\n xy = np.ones((len(segment), 3))\n xy[:, :2] = segment\n xy = xy @ M.T # transform\n xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine\n\n # clip\n new[i] = segment2box(xy, width, height)\n\n else: # warp boxes\n xy = np.ones((n * 4, 3))\n xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1\n xy = xy @ M.T # transform\n xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine\n\n # create new boxes\n x = xy[:, [0, 2, 4, 6]]\n y = xy[:, [1, 3, 5, 7]]\n new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n # clip\n new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)\n new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)\n\n # filter candidates\n i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)\n targets = targets[i]\n targets[:, 1:5] = new[i]\n\n return im, targets"
},
{
"identifier": "DATASETS_DIR",
"path": "utils/general.py",
"snippet": "FILE = Path(__file__).resolve()\nROOT = FILE.parents[1] # YOLOv5 root directory\nRANK = int(os.getenv('RANK', -1))\nNUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads\nDATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory\nAUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode\nVERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode\nTQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format\nFONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf\nLOGGING_NAME = 'yolov5'\nLOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)\nCONFIG_DIR = user_config_dir() # Ultralytics settings dir\ndef is_ascii(s=''):\ndef is_chinese(s='人工智能'):\ndef is_colab():\ndef is_jupyter():\ndef is_kaggle():\ndef is_docker() -> bool:\ndef is_writeable(dir, test=False):\ndef set_logging(name=LOGGING_NAME, verbose=True):\ndef user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):\n def __init__(self, t=0.0):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\n def time(self):\n def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):\n def _timeout_handler(self, signum, frame):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def __init__(self, new_dir):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\ndef methods(instance):\ndef print_args(args: Optional[dict] = None, show_file=True, show_func=False):\ndef init_seeds(seed=0, deterministic=False):\ndef intersect_dicts(da, db, exclude=()):\ndef get_default_args(func):\ndef get_latest_run(search_dir='.'):\ndef file_age(path=__file__):\ndef file_date(path=__file__):\ndef file_size(path):\ndef check_online():\n def run_once():\ndef git_describe(path=ROOT): # path must be a directory\ndef check_git_status(repo='ultralytics/yolov5', branch='master'):\ndef check_git_info(path='.'):\ndef check_python(minimum='3.8.0'):\ndef check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\ndef check_img_size(imgsz, s=32, floor=0):\ndef check_imshow(warn=False):\ndef check_suffix(file='yolov5s.pt', suffix=('.pt', ), msg=''):\ndef check_yaml(file, suffix=('.yaml', '.yml')):\ndef check_file(file, suffix=''):\ndef check_font(font=FONT, progress=False):\ndef check_dataset(data, autodownload=True):\ndef check_amp(model):\n def amp_allclose(model, im):\ndef yaml_load(file='data.yaml'):\ndef yaml_save(file='data.yaml', data={}):\ndef unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):\ndef url2file(url):\ndef download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):\n def download_one(url, dir):\ndef make_divisible(x, divisor):\ndef clean_str(s):\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\ndef colorstr(*input):\ndef labels_to_class_weights(labels, nc=80):\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\ndef segment2box(segment, width=640, height=640):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\ndef scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):\ndef clip_boxes(boxes, shape):\ndef clip_segments(segments, shape):\ndef non_max_suppression(\n prediction,\n conf_thres=0.25,\n iou_thres=0.45,\n classes=None,\n agnostic=False,\n multi_label=False,\n labels=(),\n max_det=300,\n nm=0, # number of masks\n):\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\ndef print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):\ndef apply_classifier(x, model, img, im0):\ndef increment_path(path, exist_ok=False, sep='', mkdir=False):\ndef imread(filename, flags=cv2.IMREAD_COLOR):\ndef imwrite(filename, img):\ndef imshow(path, im):\nclass Profile(contextlib.ContextDecorator):\nclass Timeout(contextlib.ContextDecorator):\nclass WorkingDirectory(contextlib.ContextDecorator):"
},
{
"identifier": "torch_distributed_zero_first",
"path": "utils/torch_utils.py",
"snippet": "@contextmanager\ndef torch_distributed_zero_first(local_rank: int):\n # Decorator to make all processes in distributed training wait for each local_master to do something\n if local_rank not in [-1, 0]:\n dist.barrier(device_ids=[local_rank])\n yield\n if local_rank == 0:\n dist.barrier(device_ids=[0])"
}
] | import contextlib
import glob
import hashlib
import json
import math
import os
import random
import shutil
import time
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import torchvision
import yaml
import mss
import pafy
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste,
letterbox, mixup, random_perspective)
from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements,
check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy,
xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
from utils.torch_utils import torch_distributed_zero_first | 9,569 | for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.dataloaders import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
for x in txt:
if (path.parent / x).exists():
(path.parent / x).unlink() # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file) as f:
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any(len(x) > 6 for x in lb): # is segment
classes = np.array([x[0] for x in lb], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
lb = np.array(lb, dtype=np.float32)
nl = len(lb)
if nl:
assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected'
assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}'
assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}'
_, i = np.unique(lb, axis=0, return_index=True)
if len(i) < nl: # duplicate row check
lb = lb[i] # remove duplicates
if segments:
segments = [segments[x] for x in i]
msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed'
else:
ne = 1 # label empty
lb = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
lb = np.zeros((0, 5), dtype=np.float32)
return im_file, lb, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
class HUBDatasetStats():
""" Class for generating HUB dataset JSON and `-hub` dataset directory
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
Usage
from utils.dataloaders import HUBDatasetStats
stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1
stats = HUBDatasetStats('path/to/coco128.zip') # usage 2
stats.get_json(save=False)
stats.process_images()
"""
def __init__(self, path='coco128.yaml', autodownload=False):
# Initialize class
zipped, data_dir, yaml_path = self._unzip(Path(path))
try:
with open(check_yaml(yaml_path), errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir
except Exception as e:
raise Exception('error/HUB/dataset_stats/yaml_load') from e
| # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
"""
Dataloaders and dataset utils
"""
# Parameters
HELP_URL = 'See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data'
IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv('RANK', -1))
PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.sha256(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
with contextlib.suppress(Exception):
rotation = dict(img._getexif().items())[orientation]
if rotation in [6, 8]: # rotation 270 or 90
s = (s[1], s[0])
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info['exif'] = exif.tobytes()
return image
def seed_worker(worker_id):
# Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
def create_dataloader(path,
imgsz,
batch_size,
stride,
single_cls=False,
hyp=None,
augment=False,
cache=False,
pad=0.0,
rect=False,
rank=-1,
workers=8,
image_weights=False,
quad=False,
prefix='',
shuffle=False,
seed=0):
if rect and shuffle:
LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')
shuffle = False
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = LoadImagesAndLabels(
path,
imgsz,
batch_size,
augment=augment, # augmentation
hyp=hyp, # hyperparameters
rect=rect, # rectangular batches
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nd = torch.cuda.device_count() # number of CUDA devices
nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
generator = torch.Generator()
generator.manual_seed(6148914691236517205 + seed + RANK)
return loader(dataset,
batch_size=batch_size,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
pin_memory=PIN_MEMORY,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn,
worker_init_fn=seed_worker,
generator=generator), dataset
class InfiniteDataLoader(dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for _ in range(len(self)):
yield next(self.iterator)
class _RepeatSampler:
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadScreenshots:
# YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"`
def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):
# source = [screen_number left top width height] (pixels)
check_requirements('mss')
source, *params = source.split()
self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0
if len(params) == 1:
self.screen = int(params[0])
elif len(params) == 4:
left, top, width, height = (int(x) for x in params)
elif len(params) == 5:
self.screen, left, top, width, height = (int(x) for x in params)
self.img_size = img_size
self.stride = stride
self.transforms = transforms
self.auto = auto
self.mode = 'stream'
self.frame = 0
self.sct = mss.mss()
# Parse monitor shape
monitor = self.sct.monitors[self.screen]
self.top = monitor['top'] if top is None else (monitor['top'] + top)
self.left = monitor['left'] if left is None else (monitor['left'] + left)
self.width = width or monitor['width']
self.height = height or monitor['height']
self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height}
def __iter__(self):
return self
def __next__(self):
# mss screen capture: get raw pixels from the screen as np array
im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR
s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: '
if self.transforms:
im = self.transforms(im0) # transforms
else:
im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im) # contiguous
self.frame += 1
return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line
path = Path(path).read_text().rsplit()
files = []
for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:
p = str(Path(p).resolve())
if '*' in p:
files.extend(sorted(glob.glob(p, recursive=True))) # glob
elif os.path.isdir(p):
files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir
elif os.path.isfile(p):
files.append(p) # files
else:
raise FileNotFoundError(f'{p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
self.transforms = transforms # optional
self.vid_stride = vid_stride # video frame-rate stride
if any(videos):
self._new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
for _ in range(self.vid_stride):
self.cap.grab()
ret_val, im0 = self.cap.retrieve()
while not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
path = self.files[self.count]
self._new_video(path)
ret_val, im0 = self.cap.read()
self.frame += 1
# im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
else:
# Read image
self.count += 1
im0 = cv2.imread(path) # BGR
assert im0 is not None, f'Image Not Found {path}'
s = f'image {self.count}/{self.nf} {path}: '
if self.transforms:
im = self.transforms(im0) # transforms
else:
im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im) # contiguous
return path, im, im0, self.cap, s
def _new_video(self, path):
# Create a new video capture object
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees
# self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493
def _cv2_rotate(self, im):
# Rotate a cv2 video manually
if self.orientation == 0:
return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)
elif self.orientation == 180:
return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)
elif self.orientation == 90:
return cv2.rotate(im, cv2.ROTATE_180)
return im
def __len__(self):
return self.nf # number of files
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
self.vid_stride = vid_stride # video frame-rate stride
sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
n = len(sources)
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
st = f'{i + 1}/{n}: {s}... '
if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video
# YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4'
check_requirements(('pafy', 'youtube_dl==2020.12.2'))
s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
if s == 0:
assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.'
assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.'
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'{st}Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)')
self.threads[i].start()
LOGGER.info('') # newline
# check for common shapes
s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
self.auto = auto and self.rect
self.transforms = transforms # optional
if not self.rect:
LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap, stream):
# Read stream `i` frames in daemon thread
n, f = 0, self.frames[i] # frame number, frame array
while cap.isOpened() and n < f:
n += 1
cap.grab() # .read() = .grab() followed by .retrieve()
if n % self.vid_stride == 0:
success, im = cap.retrieve()
if success:
self.imgs[i] = im
else:
LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')
self.imgs[i] = np.zeros_like(self.imgs[i])
cap.open(stream) # re-open stream if signal was lost
time.sleep(0.0) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
im0 = self.imgs.copy()
if self.transforms:
im = np.stack([self.transforms(x) for x in im0]) # transforms
else:
im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize
im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
im = np.ascontiguousarray(im) # contiguous
return self.sources, im, im0, None, ''
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset):
# YOLOv5 train_loader/val_loader, loads images and labels for training and validation
cache_version = 0.6 # dataset labels *.cache version
rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4]
def __init__(self,
path,
img_size=640,
batch_size=16,
augment=False,
hyp=None,
rect=False,
image_weights=False,
cache_images=False,
single_cls=False,
stride=32,
pad=0.0,
min_items=0,
prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.albumentations = Albumentations(size=img_size) if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('*.*')) # pathlib
elif p.is_file(): # file
with open(p) as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib)
else:
raise FileNotFoundError(f'{prefix}{p} does not exist')
self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert self.im_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e
# Check cache
self.label_files = img2label_paths(self.im_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
try:
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
assert cache['version'] == self.cache_version # matches current version
assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash
except Exception:
cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total
if exists and LOCAL_RANK in {-1, 0}:
d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt'
tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results
if cache['msgs']:
LOGGER.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
nl = len(np.concatenate(labels, 0)) # number of labels
assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}'
self.labels = list(labels)
self.shapes = np.array(shapes)
self.im_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
# Filter images
if min_items:
include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int)
LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset')
self.im_files = [self.im_files[i] for i in include]
self.label_files = [self.label_files[i] for i in include]
self.labels = [self.labels[i] for i in include]
self.segments = [self.segments[i] for i in include]
self.shapes = self.shapes[include] # wh
# Create indices
n = len(self.shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Update labels
include_class = [] # filter labels to include only these classes (optional)
self.segments = list(self.segments)
include_class_array = np.array(include_class).reshape(1, -1)
for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
if include_class:
j = (label[:, 0:1] == include_class_array).any(1)
self.labels[i] = label[j]
if segment:
self.segments[i] = [segment[idx] for idx, elem in enumerate(j) if elem]
if single_cls: # single-class training, merge all classes into 0
self.labels[i][:, 0] = 0
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.im_files = [self.im_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.segments = [self.segments[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride
# Cache images into RAM/disk for faster training
if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix):
cache_images = False
self.ims = [None] * n
self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files]
if cache_images:
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
self.im_hw0, self.im_hw = [None] * n, [None] * n
fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image
results = ThreadPool(NUM_THREADS).imap(fcn, range(n))
pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0)
for i, x in pbar:
if cache_images == 'disk':
b += self.npy_files[i].stat().st_size
else: # 'ram'
self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
b += self.ims[i].nbytes
pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})'
pbar.close()
def check_cache_ram(self, safety_margin=0.1, prefix=''):
# Check image caching requirements vs available memory
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
n = min(self.n, 30) # extrapolate from 30 random images
for _ in range(n):
im = cv2.imread(random.choice(self.im_files)) # sample image
ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio
b += im.nbytes * ratio ** 2
mem_required = b * self.n / n # GB required to cache dataset into RAM
mem = psutil.virtual_memory()
cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question
if not cache:
LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, '
f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, '
f"{'caching images ✅' if cache else 'not caching images ⚠️'}")
return cache
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f'{prefix}Scanning {path.parent / path.stem}...'
with Pool(NUM_THREADS) as pool:
pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))),
desc=desc,
total=len(self.im_files),
bar_format=TQDM_BAR_FORMAT)
for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [lb, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt'
pbar.close()
if msgs:
LOGGER.info('\n'.join(msgs))
if nf == 0:
LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.im_files)
x['results'] = nf, nm, ne, nc, len(self.im_files)
x['msgs'] = msgs # warnings
x['version'] = self.cache_version # cache version
try:
np.save(path, x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
LOGGER.info(f'{prefix}New cache created: {path}')
except Exception as e:
LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable
return x
def __len__(self):
return len(self.im_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = self.load_mosaic(index)
shapes = None
# MixUp augmentation
if random.random() < hyp['mixup']:
img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1)))
else:
# Load image
img, (h0, w0), (h, w) = self.load_image(index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
img, labels = random_perspective(img,
labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
nl = len(labels) # number of labels
if nl:
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
if self.augment:
# Albumentations
img, labels = self.albumentations(img, labels)
nl = len(labels) # update after albumentations
# HSV color-space
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
# nl = len(labels) # update after cutout
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.im_files[index], shapes
def load_image(self, i):
# Loads 1 image from dataset index 'i', returns (im, original hw, resized hw)
im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i],
if im is None: # not cached in RAM
if fn.exists(): # load npy
im = np.load(fn)
else: # read image
im = cv2.imread(f) # BGR
assert im is not None, f'Image Not Found {f}'
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA
im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized
def cache_images_to_disk(self, i):
# Saves an image as an *.npy file for faster loading
f = self.npy_files[i]
if not f.exists():
np.save(f.as_posix(), cv2.imread(self.im_files[i]))
def load_mosaic(self, index):
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = self.load_image(index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4,
labels4,
segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
random.shuffle(indices)
hp, wp = -1, -1 # height, width previous
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = self.load_image(index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste'])
img9, labels9 = random_perspective(img9,
labels9,
segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
@staticmethod
def collate_fn(batch):
im, label, path, shapes = zip(*batch) # transposed
for i, lb in enumerate(label):
lb[:, 0] = i # add target image index for build_targets()
return torch.stack(im, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
im, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])
wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear',
align_corners=False)[0].type(im[i].type())
lb = label[i]
else:
im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2)
lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
im4.append(im1)
label4.append(lb)
for i, lb in enumerate(label4):
lb[:, 0] = i # add target image index for build_targets()
return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def flatten_recursive(path=DATASETS_DIR / 'coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(f'{str(path)}_flat')
if os.path.exists(new_path):
shutil.rmtree(new_path) # delete output folder
os.makedirs(new_path) # make new output folder
for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file) as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.dataloaders import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
for x in txt:
if (path.parent / x).exists():
(path.parent / x).unlink() # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file) as f:
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any(len(x) > 6 for x in lb): # is segment
classes = np.array([x[0] for x in lb], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
lb = np.array(lb, dtype=np.float32)
nl = len(lb)
if nl:
assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected'
assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}'
assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}'
_, i = np.unique(lb, axis=0, return_index=True)
if len(i) < nl: # duplicate row check
lb = lb[i] # remove duplicates
if segments:
segments = [segments[x] for x in i]
msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed'
else:
ne = 1 # label empty
lb = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
lb = np.zeros((0, 5), dtype=np.float32)
return im_file, lb, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
class HUBDatasetStats():
""" Class for generating HUB dataset JSON and `-hub` dataset directory
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
Usage
from utils.dataloaders import HUBDatasetStats
stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1
stats = HUBDatasetStats('path/to/coco128.zip') # usage 2
stats.get_json(save=False)
stats.process_images()
"""
def __init__(self, path='coco128.yaml', autodownload=False):
# Initialize class
zipped, data_dir, yaml_path = self._unzip(Path(path))
try:
with open(check_yaml(yaml_path), errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir
except Exception as e:
raise Exception('error/HUB/dataset_stats/yaml_load') from e
| check_dataset(data, autodownload) # download dataset if missing | 8 | 2023-11-12 13:28:26+00:00 | 12k |
BSoD123456/ffta_us_cn | ffta_finder.py | [
{
"identifier": "c_ffta_sect_tab_ref",
"path": "ffta_sect.py",
"snippet": "class c_ffta_sect_tab_ref(c_ffta_sect_tab):\n \n @staticmethod\n def _TAB_REF_CLS():\n return c_ffta_sect\n \n @tabitm()\n def get_entry(self, ofs):\n return self.readval(ofs, self._TAB_WIDTH, False)\n\n @property\n def last_idx(self):\n try:\n return self._last_idx\n except:\n pass\n _last_idx = 0\n _last_ofs = 0\n for i in range(self.tsize):\n sofs = self.get_entry(i)\n if sofs >= _last_ofs:\n _last_ofs = sofs\n _last_idx = i\n self._last_idx = _last_idx\n return _last_idx\n\n @property\n def last_idxs(self):\n try:\n return self._last_idxs\n except:\n pass\n _last_idxs = []\n _last_ofs = 0\n for i in range(self.tsize):\n sofs = self.get_entry(i)\n if sofs == _last_ofs:\n _last_idxs.append(i)\n elif sofs > _last_ofs:\n _last_ofs = sofs\n _last_idxs = [i]\n self._last_idxs = _last_idxs\n return _last_idxs\n\n def _is_last(self, idx):\n return idx in self.last_idxs\n\n def _ref_top_nondeterm(self, idx):\n return self._sect_top_nondeterm and self._is_last(idx)\n\n def _init_ref(self, sub, idx, ofs):\n try:\n top_ofs = self._tab_ref_size[idx]\n except:\n top_ofs = None\n if isinstance(sub, c_ffta_sect_tab_ref_sub):\n sub.set_sub_offset(ofs)\n if self._is_last(idx):\n top_align = self.sect_top_align\n else:\n top_align = sub.sect_align\n sub.parse_size(top_ofs, top_align)\n if self._ref_top_nondeterm(idx):\n sub.set_nondeterm()\n sub.parse()\n\n @property\n def sect_top(self):\n if not self._sect_top is None:\n return self._sect_top\n if not self.tsize:\n return None\n lst_sub = self[self.last_idx]\n lst_top = lst_sub.sect_top\n if lst_top is None:\n return None\n real_top = lst_sub.real_offset - self.real_offset + lst_top\n self.set_real_top(real_top)\n return self._sect_top\n\n def refresh_sect_top(self):\n otop = self._sect_top\n self._sect_top = None\n ntop = self.sect_top\n if ntop is None:\n self._sect_top = otop\n\n @property\n def sect_top_least(self):\n sect_top = self.sect_top\n if not sect_top is None:\n return sect_top\n if not self.tsize:\n return None\n lst_sub = self[self.last_idx]\n return lst_sub.real_offset - self.real_offset + lst_sub.sect_top_least\n \n def get_ref(self, idx):\n ofs = self.get_entry(idx)\n clss = self._TAB_REF_CLS()\n if isinstance(clss, list):\n first_err = None\n for cls in clss:\n sub = self.sub(ofs, cls = cls)\n try:\n self._init_ref(sub, idx, ofs)\n break\n except ValueError as ex:\n if first_err is None:\n first_err = ex\n else:\n assert(first_err)\n raise first_err\n else:\n sub = self.sub(ofs, cls = clss)\n self._init_ref(sub, idx, ofs)\n return sub\n\n @property\n def _tab_acs_top(self):\n return self.accessable_top\n\n def _guess_size(self, top_ofs, upd_sz):\n assert(upd_sz or self.tsize < INF)\n cur_ent = 0\n ofs_min = INF\n ofs_ord = []\n ofs_sort = set()\n acs_top = self._tab_acs_top\n while cur_ent < self.tsize:\n ofs = self.get_entry(cur_ent)\n if ofs < 0:\n raise ValueError('invalid ref tab: tab entry not in range')\n skip = (ofs == 0)\n if (cur_ent * self._TAB_WIDTH == ofs_min or\n (not top_ofs is None and ofs == top_ofs) or\n # all F entry is invalid and last\n (ofs == (1 << self._TAB_WIDTH * 8) - 1)):\n if upd_sz:\n self.tsize = cur_ent\n break\n else:\n skip = True\n elif (cur_ent * self._TAB_WIDTH > ofs_min or\n ofs >= acs_top or\n (not top_ofs is None and ofs > top_ofs) ):\n raise ValueError('invalid ref tab: tab entry not in range')\n cur_ent += 1\n if 0 < ofs < ofs_min:\n ofs_min = ofs\n ofs_ord.append(ofs)\n if not skip:\n ofs_sort.add(ofs)\n if self.tsize == 0:\n return []\n if not ofs_sort:\n raise ValueError('invalid ref tab: empty entries')\n ofs_sort = sorted(ofs_sort)\n rslt = []\n for ofs in ofs_ord:\n if ofs == 0:\n rslt.append(0)\n continue\n i = ofs_sort.index(ofs)\n try:\n nxt_ofs = ofs_sort[i+1]\n except:\n nxt_ofs = top_ofs\n try:\n sz = nxt_ofs - ofs\n except:\n sz = None\n rslt.append(sz)\n return rslt\n\n def parse_size(self, top_ofs, top_align_width):\n super().parse_size(top_ofs, top_align_width)\n self._tab_ref_size = self._guess_size(top_ofs, True)\n\n def _iter_item(self, path, skiprep, refresh):\n wkofs = {}\n for i, sub in enumerate(self):\n npath = path + [i]\n if skiprep and sub:\n sofs = sub.real_offset\n if sofs in wkofs:\n yield npath, wkofs[sofs]\n continue\n wkofs[sofs] = npath\n if isinstance(sub, c_ffta_sect_tab_ref):\n yield from sub._iter_item(npath, skiprep, refresh)\n else:\n yield npath, sub\n if refresh:\n self.refresh_sect_top()\n\n def iter_item(self, skiprep = False, refresh = False):\n yield from self._iter_item([], skiprep, refresh)\n\n def _repack_content(self, tab, base):\n mtab = {}\n maxsi = -1\n for idxp, val in tab.items():\n si = idxp[0]\n sidxp = idxp[1:]\n if si > maxsi:\n maxsi = si\n if not sidxp:\n if si in mtab:\n raise ValueError(f'dumplicate repack tab index: {si}')\n mtab[si] = val\n continue\n if not si in mtab:\n stab = {}\n mtab[si] = stab\n elif isinstance(mtab[si], dict):\n stab = mtab[si]\n else:\n raise ValueError(f'dumplicate repack tab index: {idxp}/{si}')\n stab[sidxp] = val\n rtsize = max(maxsi+1, self.tsize)\n dirty = False\n srmks = []\n ent_cch = {}\n cbase = alignup(self._TAB_WIDTH * rtsize + base, self.sect_align)\n cent = cbase\n ents = []\n for si, subsect in enumerate(self):\n if subsect is None:\n ents.append(0)\n srmks.append(None)\n continue\n sk = subsect.real_offset\n if sk in ent_cch:\n ents.append(ent_cch[sk])\n srmks.append(None)\n continue\n if si in mtab:\n if isinstance(subsect, c_ffta_sect_tab_ref_sub):\n srmk, sdirty = subsect.repack_with(mtab[si], cent)\n else:\n srmk, sdirty = subsect.repack_with(mtab[si])\n if sdirty:\n dirty = True\n else:\n srmk = subsect\n sdirty = False\n if not sdirty:\n srmk = srmk.repack_copy()\n ent_cch[sk] = cent\n ents.append(cent)\n cent = srmk.realign(subsect.sect_top_align, cent)\n srmks.append(srmk)\n if maxsi >= self.tsize:\n for si in range(self.tsize, maxsi+1):\n if not si in mtab:\n ents.append(0)\n srmks.append(None)\n continue\n subsect = self[0]\n if isinstance(subsect, c_ffta_sect_tab_ref_sub):\n srmk, sdirty = subsect.repack_with(mtab[si], cent)\n else:\n srmk, sdirty = subsect.repack_with(mtab[si])\n if sdirty:\n dirty = True\n if not sdirty:\n ents.append(0)\n srmks.append(None)\n continue\n ents.append(cent)\n cent = srmk.realign(subsect.sect_top_align, cent)\n srmks.append(srmk)\n if not dirty:\n return None, None\n cmk = c_mark(bytearray(), 0)\n for srmk in srmks:\n if not srmk is None:\n cmk.concat(srmk)\n assert cmk.accessable_top == cent - cbase\n return cmk, ents\n\n def _repack_with(self, tab):\n cmk, ents = self._repack_content(tab, 0)\n if cmk is None:\n return self, False\n rmk = self.sub(0, 0, cls = type(self))\n ewd = self._TAB_WIDTH\n for si, ent in enumerate(ents):\n rmk.writeval(ent, si * ewd, ewd)\n rmk.concat(cmk)\n return rmk, True"
},
{
"identifier": "c_ffta_sect_tab_ref_addr",
"path": "ffta_sect.py",
"snippet": "class c_ffta_sect_tab_ref_addr(c_ffta_sect_tab_ref):\n \n _TAB_WIDTH = 4\n \n def set_info(self, host, tlen, hole_idxs = None, ignore_invalid_ptr = False):\n self._tab_ref_host = host\n self.tsize = tlen\n if hole_idxs is None:\n hole_idxs = []\n self._tab_hole_idxs = hole_idxs\n self._tab_ref_addr_ignore_invalid_ptr = ignore_invalid_ptr\n \n def _ref_top_nondeterm(self, idx):\n return True\n\n @property\n def _tab_acs_top(self):\n return self._tab_ref_host.accessable_top\n \n def get_entry(self, idx):\n addr = super().get_entry(idx)\n if addr:\n ofs = self._tab_ref_host.aot(addr, 'ao')\n if self._tab_ref_addr_ignore_invalid_ptr and not self.in_sect(ofs):\n ofs = 0\n else:\n ofs = 0\n return ofs\n \n def get_ref(self, idx):\n ofs = self.get_entry(idx)\n if ofs:\n ref = self._tab_ref_host.sub(ofs, cls = self._TAB_REF_CLS())\n self._init_ref(ref, idx, ofs)\n else:\n ref = None\n return ref\n \n def parse_size(self, top_ofs, top_align_width):\n super(c_ffta_sect_tab, self).parse_size(top_ofs, top_align_width)\n tbsz = self._guess_size(top_ofs, False)\n for i in self._tab_hole_idxs:\n if i < len(tbsz):\n tbsz[i] = None\n self._tab_ref_size = tbsz\n self.set_nondeterm()\n\n def _repack_end(self, rmk, base):\n rmk.set_info(rmk, self.tsize,\n self._tab_hole_idxs.copy())\n super()._repack_end(rmk)\n\n def _repack_with(self, tab, base):\n abase = self._tab_ref_host.aot(base, 'oa')\n cmk, ents = self._repack_content(tab, abase)\n if cmk is None:\n return self, False\n rmk = type(self)(bytearray(), base)\n ewd = self._TAB_WIDTH\n for si, ent in enumerate(ents):\n rmk.writeval(ent, si * ewd, ewd)\n rmk.concat(cmk)\n return rmk, True"
},
{
"identifier": "c_ffta_sect_text_line",
"path": "ffta_sect.py",
"snippet": "class c_ffta_sect_text_line(c_ffta_sect):\n \n _SECT_ALIGN = 2\n \n def _gc(self, si):\n c = self.U8(si)\n return c, si + 1\n\n def _bypass(self, si, di, d, l):\n for i in range(l):\n d.append(self.U8(si + i))\n return si + l, di + l\n\n @staticmethod\n def _flip(di, d, l, f):\n for i in range(l):\n #d.append(readval_le(d, di - f + i - 1, 1, False))\n d.append(d[di - f + i - 1])\n return di + l\n\n @staticmethod\n def _bset(di, d, l, v):\n for i in range(l):\n d.append(v)\n return di + l\n\n def _decompress(self, dst, src_idx, dst_len):\n dst_idx = 0\n while dst_idx < dst_len:\n cmd, src_idx = self._gc(src_idx)\n if cmd & 0x80:\n cmd1, src_idx = self._gc(src_idx)\n ln = ((cmd >> 3) & 0xf) + 3\n fl = (((cmd & 0x7) << 8) | cmd1)\n dst_idx = self._flip(dst_idx, dst, ln, fl)\n elif cmd & 0x40:\n ln = (cmd & 0x3f) + 1\n src_idx, dst_idx = self._bypass(src_idx, dst_idx, dst, ln)\n elif cmd & 0x20:\n ln = (cmd & 0x1f) + 2\n dst_idx = self._bset(dst_idx, dst, ln, 0x00)\n elif cmd & 0x10:\n cmd1, src_idx = self._gc(src_idx)\n cmd2, src_idx = self._gc(src_idx)\n ln = (((cmd1 & 0xc0) >> 2) | (cmd & 0xf)) + 4\n fl = (((cmd1 & 0x3f) << 8) | cmd2)\n dst_idx = self._flip(dst_idx, dst, ln, fl)\n elif cmd == 0x2:\n cmd1, src_idx = self._gc(src_idx)\n ln = cmd1 + 3\n dst_idx = self._bset(dst_idx, dst, ln, 0x00)\n elif cmd == 0x1:\n cmd1, src_idx = self._gc(src_idx)\n ln = cmd1 + 3\n dst_idx = self._bset(dst_idx, dst, ln, 0xff)\n elif cmd == 0x0:\n cmd1, src_idx = self._gc(src_idx)\n cmd2, src_idx = self._gc(src_idx)\n cmd3, src_idx = self._gc(src_idx)\n ln = cmd1 + 5\n fl = ((cmd2 << 8) | cmd3)\n dst_idx = self._flip(dst_idx, dst, ln, fl)\n else:\n pass\n assert(len(dst) == dst_len)\n return src_idx\n\n def parse(self):\n super().parse()\n flags = self.U16(0)\n cmpr = not not (flags & 0x2)\n cls_buf = (c_ffta_sect_text_buf, c_ffta_sect_text_buf_ya)[flags & 0x1]\n self.compressed = cmpr\n warn_cnt = 0\n if cmpr:\n dst_len = rvs_endian(self.U32(2), 4, False)\n if dst_len == 0:\n raise ValueError('invalid text line: decompress nothing')\n subsect = self.sub(2, 0, cls = cls_buf)\n try:\n src_len = self._decompress(subsect.mod, 6, dst_len)\n except:\n raise ValueError('invalid text line: decompress error')\n subsect.parse_size(dst_len, self.sect_top_align)\n else:\n subsect = self.sub(2, cls = cls_buf)\n if self.sect_top is None:\n _st = None\n else:\n _st = self.sect_top - 2\n subsect.parse_size(_st, self.sect_top_align)\n if self.sect_top_nondeterm:\n subsect.set_nondeterm()\n subsect.parse()\n self.text = subsect\n if not cmpr and subsect:\n src_len = subsect.raw_len + 2\n self.set_real_top(src_len)\n self.raw_len = src_len\n self.warn_cnt = warn_cnt\n\n def _repack_with(self, toks):\n srmk, dirty = self.text.repack_with(toks)\n if not dirty:\n return self, False\n rmk = self.sub(0, 0, cls = type(self))\n flags = (self.U16(0) & 0xfc)\n rmk.W16(flags, 0)\n rmk.concat(srmk)\n return rmk, True"
},
{
"identifier": "c_ffta_sect_text_buf",
"path": "ffta_sect.py",
"snippet": "class c_ffta_sect_text_buf(c_ffta_sect):\n\n _CTR_TOKBASE = 0x21\n _CTR_TOKLEN = [\n # read 2\n [0x40, 0x41, 0x42, 0x4a, 0x4d, 0x4f, 0x52, 0x54, 0x56, 0x57, 0x58],\n # read 3\n [0x00, 0x1b, 0x1d, 0x46, 0x4b, 0x51, 0x53, 0x32, 0x04],\n # read 4\n [0x45],\n ]\n # read 3 but spec\n _CTR_TOKSPEC = [0x32, 0x04]\n\n def parse(self):\n super().parse()\n self._cidx = 0\n self._half = False\n self._directly = 0\n self._make_ctr_tab()\n self.dec_error_cnt = 0\n self._decode()\n\n def _make_ctr_tab(self):\n ctr_tab = {}\n ctr_spec_tab = {}\n for i, ctis in enumerate(self._CTR_TOKLEN):\n for cti in ctis:\n ctr_tab[cti] = i\n for cti in self._CTR_TOKSPEC:\n ctr_spec_tab[cti] = getattr(self, f'_get_ctr_{cti:0>2x}')\n self._ctr_tab = ctr_tab\n self._ctr_spec_tab = ctr_spec_tab\n\n def _gc(self):\n c = self.U8(self._cidx)\n self._cidx += 1\n return c\n\n def _bc(self):\n self._cidx -= 1\n\n def _directly_mode(self, n):\n if self._directly != 0:\n self.dec_error_cnt += 1\n self._directly = n\n\n # replace ctrl, hero's name do nothing, but others fill dest buff\n # only care src, ignore\n def _get_ctr_04(self):\n return self._gc() | 0x400\n\n # directly copy 2 strings\n def _get_ctr_32(self):\n self._directly_mode(2)\n return self._gc() | 0x3200\n\n def _get_tok(self):\n c = self._gc()\n if c == 0:\n if self._directly > 0:\n self._directly -= 1\n return 'CTR_EOS', 0\n else:\n return 'EOS', 0\n if c == 1:\n self._half = True\n return self._get_tok()\n elif self._half:\n return 'CHR_HALF', c - 1\n elif c == 0x40:\n c = self._gc() - self._CTR_TOKBASE\n if c in self._ctr_spec_tab:\n func = self._ctr_spec_tab[c]\n c = func()\n return 'CTR_FUNC', c\n elif c in self._ctr_tab:\n cmlen = self._ctr_tab[c]\n if c == 0:\n # for record. 0 can not be recorded\n c = 1\n for _ in range(cmlen):\n c <<= 8\n c |= self._gc()\n return 'CTR_FUNC', c\n self._bc()\n self.dec_error_cnt += 1\n return 'ERR_CFUNC', c\n elif c & 0x80:\n c &= 0x7f\n c <<= 8\n c |= self._gc()\n return 'CHR_FULL', c\n else:\n self.dec_error_cnt += 1\n return 'ERR_UNKNOWN', c\n\n def _decode(self):\n toks = []\n top_ofs = self.sect_top\n while top_ofs is None or self._cidx < top_ofs:\n typ, val = self._get_tok()\n if typ == 'EOS':\n break\n toks.append((typ, val))\n if not top_ofs is None:\n if self._cidx > top_ofs:\n self.cidx = top_ofs\n while self._cidx < top_ofs:\n c = self._gc()\n if c != 0:\n self._bc()\n break\n self.tokens = toks\n self.raw_len = self._cidx\n if self.dec_error_cnt > 0:# and not self.sect_top_nondeterm:\n raise ValueError('invalid text buf: decode error')\n self.set_real_top(self.raw_len)\n\n def _encode_tok(self, buf, tok):\n ttyp, tchr = tok\n if ttyp.startswith('CHR_'):\n ch = ((tchr >> 8) & 0x7f)\n cl = (tchr & 0xff)\n buf.append(ch | 0x80)\n buf.append(cl)\n elif ttyp == 'CTR_FUNC':\n tseq = []\n cchr = tchr\n while cchr:\n tseq.append(cchr & 0xff)\n cchr >>= 8\n cmd = tseq.pop()\n if cmd == 1:\n # recorded 1 is actually 0\n cmd = 0\n assert cmd in self._ctr_tab and self._ctr_tab[cmd] == len(tseq)\n buf.append(0x40)\n buf.append(cmd + self._CTR_TOKBASE)\n for tc in reversed(tseq):\n buf.append(tc)\n elif ttyp == 'CTR_EOS':\n buf.append(0)\n else:\n raise ValueError(f'invalid token type to encode: {ttyp}')\n\n def _encode(self, toks):\n buf = []\n for tok in toks:\n self._encode_tok(buf, tok)\n buf.append(0)\n return buf\n\n def _repack_with(self, toks):\n if not toks:\n return self, False\n buf = self._encode(toks)\n rmk = self.sub(0, 0, cls = c_ffta_sect_text_buf)\n rmk.WBYTES(bytearray(buf), 0)\n return rmk, True"
},
{
"identifier": "c_ffta_sect_text",
"path": "ffta_sect.py",
"snippet": "class c_ffta_sect_text(c_ffta_sect_tab_ref):\n _TAB_WIDTH = 4\n @staticmethod\n def _TAB_REF_CLS():\n return [c_ffta_sect_text_page, c_ffta_sect_text_sub]"
},
{
"identifier": "c_ffta_sect_text_page",
"path": "ffta_sect.py",
"snippet": "class c_ffta_sect_text_page(c_ffta_sect_tab_ref):\n _TAB_WIDTH = 2\n _SECT_TOP_ALIGN = 4\n @staticmethod\n def _TAB_REF_CLS():\n return c_ffta_sect_text_line"
},
{
"identifier": "c_ffta_sect_fixed_text",
"path": "ffta_sect.py",
"snippet": "class c_ffta_sect_fixed_text(c_ffta_sect_tab_ref_addr):\n @staticmethod\n def _TAB_REF_CLS():\n return c_ffta_sect_text_page"
},
{
"identifier": "c_ffta_sect_words_text",
"path": "ffta_sect.py",
"snippet": "class c_ffta_sect_words_text(c_ffta_sect_tab_ref_addr):\n @staticmethod\n def _TAB_REF_CLS():\n return c_ffta_sect_text_buf"
}
] | from ffta_sect import (
c_ffta_sect_tab_ref, c_ffta_sect_tab_ref_addr,
c_ffta_sect_text_line, c_ffta_sect_text_buf,
c_ffta_sect_text, c_ffta_sect_text_page,
c_ffta_sect_fixed_text, c_ffta_sect_words_text,
)
from hexdump import hexdump as hd
from pprint import pprint as ppr
from ffta_sect import main as sect_main
from ffta_sect import rom_cn, rom_jp, rom_us
from ffta_font import c_ffta_font_drawer
from ffta_charset import c_ffta_charset_us_dummy as c_charset
import pdb | 8,899 | def _scan(self, brk_out):
st = self.ST_SCAN_I
while self.win_ed + self.wd <= self.top_ofs:
#if self.win_ed % 0x10000 == 0:
# print('scan', hex(self.win_ed))
if st == self.ST_SCAN_I:
#print('in', hex(self.win_ed))
st = self._shift_in()
if st == self.ST_SCAN_O:
#print('out', hex(self.win_ed))
if brk_out:
break
st = self._shift_out()
elif st == self.ST_CHECK:
#print('chk', hex(self.win_ed))
st = self._chk_itm_bot()
elif st == self.ST_CHECK_DROP:
#print('chkdrp', hex(self.win_ed))
st = self._chk_itm_bot()
if st != self.ST_FOUND:
st = self.ST_DROPALL
elif st == self.ST_BYPASS:
#print('bp', hex(self.win_ed))
st = self.ST_SCAN_I
elif st == self.ST_DROPALL:
#print('drp', hex(self.win_ed))
if brk_out:
break
st = self._drop_all()
elif st == self.ST_FOUND:
yield self.win_st, self.win_ed, self.win_len, self.win_max
st = self._shift_out()
#yield False, self.win_st, self.win_ed, self.win_len, self.win_max
def scan(self):
yield from _scan(False)
def check(self, ofs = None):
if ofs is None:
ofs = self.win_ed
if ofs % self.wd:
return False, 0, 0
self.reset(ofs)
for st, ed, ln, mx in self._scan(True):
return True, ln, mx
return False, 0, 0
class c_text_checker:
def __init__(self, sect, thrs = (2, 3, 9-3, 7-3, 3, 3)):
self.sect = sect
self.rtf2 = c_ffta_ref_tab_finder(sect, 0, sect._sect_top, 2)
self.rtf4 = c_ffta_ref_tab_finder(sect, 0, sect._sect_top, 4)
self._thrs = thrs
def _chk_tab(self, ofs, cls):
try:
dst = self.sect.subsect(ofs, cls)
for i in dst.iter_item(refresh = True):
pass
except:
return False, None, None, None
sz = dst.sect_top
if sz is None:
assert(dst.tsize < 2)
return False, dst, dst.tsize, None
return True, dst, dst.tsize, sz
def _chk_item(self, ofs, cls):
try:
dst = self.sect.subsect(ofs, cls)
except:
return False, None, None, None
sz = dst.sect_top
if sz is None:
return False, dst, None, None
return True, dst, sz, sz
def check(self, ofs, typ):
cls = (
c_ffta_sect_text, c_ffta_sect_text_page,
c_ffta_sect_text_line, c_ffta_sect_text_buf)
for i, dtyp in enumerate((1, 2, 4, 8)):
if not (typ & dtyp):
continue
if dtyp & 0x1:
fnd, ln, mx = self.rtf4.check(ofs)
elif dtyp & 0x2:
fnd, ln, mx = self.rtf2.check(ofs)
else:
fnd = True
if not fnd:
continue
if dtyp & 0x3:
r = self._chk_tab(ofs, cls[i])
else:
r = self._chk_item(ofs, cls[i])
if r[0] and r[2] >= self._thrs[i]:
return r
return False, None, None, None
def _chk_atab(self, mn, mx, cls):
sz = mx - mn
ln = sz // 4
subrngs = []
try:
sct = self.sect.subsect(mn, cls, self.sect, ln)
for sub in sct:
if sub is None:
continue
if isinstance(sub, c_ffta_sect_tab_ref):
for i in sub.iter_item(refresh = True):
pass
subrngs.append((sub.real_offset, sub.sect_top))
except:
return False, None, None, None, None
return True, subrngs, sct, ln, sz
def check_atab(self, mn, mx, typ):
cls = (
| #! python3
# coding: utf-8
INF = float('inf')
c_symb = object
class c_range_holder:
def __init__(self):
self.rngs = []
def _find_ridx(self, val):
lst_ridx = -1
lst_mx = None
for i, rng in enumerate(self.rngs):
mn, mx = rng
if val < mn:
return False, lst_ridx, i, lst_mx == val, val == mn - 1
elif mn <= val < mx:
return True, i, i, True, True
else:
lst_ridx = i
lst_mx = mx
return False, lst_ridx, len(self.rngs), lst_mx == val, False
def _hold(self, rng, upd):
rngs = self.rngs
mn, mx = rng
rm_ridx_rng = [None, None]
add_rng = [None, None]
adj_cnt = 0
cv1, prv_ri, nxt_ri, rm_prv, rm_nxt = self._find_ridx(mn)
if rm_prv:
rm_ridx_rng[0] = prv_ri
add_rng[0] = rngs[prv_ri][0]
if not cv1:
adj_cnt += 1
else:
rm_ridx_rng[0] = nxt_ri
add_rng[0] = mn
cv2, prv_ri, nxt_ri, rm_prv, rm_nxt = self._find_ridx(mx-1)
if rm_nxt:
rm_ridx_rng[1] = nxt_ri
add_rng[1] = rngs[nxt_ri][1]
if not cv2:
# adj ridx can not be the same
# so just use 1 counter
adj_cnt += 1
else:
rm_ridx_rng[1] = prv_ri
add_rng[1] = mx
rr_cmn, rr_cmx = rm_ridx_rng
add_rng = tuple(add_rng)
if rr_cmn == rr_cmx and cv1 and cv2:
assert(rngs[rr_cmn] == add_rng)
return True, True # cover, include
elif rr_cmn > rr_cmx:
assert(rr_cmn >= 0 and rr_cmn - rr_cmx == 1 and add_rng[0] == mn and add_rng[1] == mx)
if upd:
rngs.insert(rr_cmn, add_rng)
return False, False # cover, include
else:
if rr_cmn < 0:
rr_cmn = 0
inner_cnt = rr_cmx - rr_cmn + 1 - adj_cnt
assert(inner_cnt >= 0)
if upd:
nrngs = rngs[:rr_cmn]
nrngs.append(add_rng)
nrngs.extend(rngs[rr_cmx+1:])
self.rngs = nrngs
return inner_cnt > 0, False # cover, include
def hold(self, rng):
return self._hold(rng, True)
def peek(self, rng):
return self._hold(rng, False)
def peek1(self, ofs):
return self.peek((ofs, ofs+1))
def iter_rngs(self, arng = None):
if not arng:
arng = (0, None)
st, ed = arng
def chk_in_arng(mn, mx):
if (ed and mn >= ed) or mx <= st:
return None
rmn, rmx = max(st, mn), min(ed, mx) if ed else mx
if rmn >= rmx:
return None
return rmn, rmx
lst_mx = 0
for mn, mx in self.rngs:
drng = chk_in_arng(lst_mx, mn)
lst_mx = mx
if drng:
yield drng, False
drng = chk_in_arng(mn, mx)
if drng:
yield drng, True
if ed and ed > lst_mx:
drng = chk_in_arng(lst_mx, ed)
if drng:
yield drng, False
class c_ffta_ref_addr_finder:
def __init__(self, sect, st_ofs, top_ofs, itm_align = 1):
self.sect = sect
self.top_ofs = top_ofs
self.itm_align = itm_align
self.st_ofs = st_ofs
def scan(self):
cur_ofs = self.st_ofs
sect = self.sect
while cur_ofs + 4 <= self.top_ofs:
adr = sect.U32(cur_ofs)
ofs = sect._addr2offs(adr)
if 0 <= ofs < self.top_ofs:
yield ofs, adr, cur_ofs
cur_ofs += 4
class c_ffta_ref_addr_hold_finder(c_ffta_ref_addr_finder):
def __init__(self, *args, addr_holder = None, ignore_item = False, merge_cn = False, **kargs):
super().__init__(*args, **kargs)
if not addr_holder:
addr_holder = c_range_holder()
self.holder = addr_holder
self.ignore_item = ignore_item
self.merge_cn = merge_cn
self._pre_scan()
def _is_ptr(self, ent):
adr = self.sect.U32(ent)
ofs = self.sect._addr2offs(adr)
return 0 < ofs < self.top_ofs, ofs, adr == 0
def _pre_scan(self, adrtab_min = 5-1):
adrtab_min_sz = adrtab_min * 4
cur_ofs = self.st_ofs
rvs_tab = {}
ptr_tab = {}
itm_tab = set()
while cur_ofs + 4 <= self.top_ofs:
cur_ent = cur_ofs
while not (cur_ent in ptr_tab or cur_ent in itm_tab):
is_ptr, nxt_ent, is_null = self._is_ptr(cur_ent)
if is_ptr:
#self.holder.hold((cur_ent, cur_ent + 4)) # too slow
ptr_tab[cur_ent] = nxt_ent
if not nxt_ent in rvs_tab:
rvs_tab[nxt_ent] = []
rvs_tab[nxt_ent].append(cur_ent)
else:
if is_null:
ptr_tab[cur_ent] = None
if cur_ent != cur_ofs:
itm_tab.add(cur_ent)
break
cur_ent = nxt_ent
cur_ofs += 4
adr_tab = []
ptr_sort = sorted(k for k in ptr_tab)
lst_mn = None
lst_ofs = 0
# insert another last ptr to handle the real last one
_af = (1 << 32) - 1
ptr_sort.append(_af)
for ofs in ptr_sort:
if not ofs < _af:
continue
ofs_p = ptr_tab[ofs]
if not ofs_p is None and not ofs_p in itm_tab and not self.ignore_item:
continue
is_rng = False
if ofs == lst_ofs + 4:
if lst_mn is None:
lst_mn = lst_ofs
elif not lst_mn is None:
mn = lst_mn
mx = lst_ofs + 4
lst_mn = None
is_rng = True
lst_ofs = ofs
if not is_rng:
continue
if mx - mn < adrtab_min_sz:
continue
lst_dofs = None
for dofs in range(mn, mx, 4):
if not dofs in rvs_tab:
continue
if not lst_dofs is None and dofs - lst_dofs >= adrtab_min_sz:
adr_tab.append((lst_dofs, dofs))
lst_dofs = dofs
if self.merge_cn:
break
if not lst_dofs is None and mx - lst_dofs >= adrtab_min_sz:
adr_tab.append((lst_dofs, mx))
self.ptr_tab = ptr_tab
self.rvs_tab = rvs_tab
self.itm_tab = itm_tab
self.adr_tab = adr_tab
def scan_adrtab(self, adrtab_min = 5):
self._last_hold = None
rmati = []
for ati, (mn, mx) in enumerate(self.adr_tab):
yield mn, mx
if mn == self._last_hold:
rmati.append(ati)
for ati in reversed(rmati):
self.adr_tab.pop(ati)
def scan(self):
self._last_hold = None
for ofs in sorted(self.itm_tab):
cv, incld = self.holder.peek1(ofs)
if cv:
continue
yield ofs
if ofs == self._last_hold:
self.itm_tab.remove(ofs)
def hold(self, ofs, top):
if top is None:
top = 1
self.holder.hold((ofs, ofs + top))
self._last_hold = ofs
class c_ffta_ref_tab_finder:
ST_DROPALL = c_symb()
ST_BYPASS = c_symb()
ST_FOUND = c_symb()
ST_SCAN_I = c_symb()
ST_SCAN_O = c_symb()
ST_CHECK = c_symb()
ST_CHECK_DROP = c_symb()
def __init__(self, sect, st_ofs, top_ofs, ent_width, itm_align = 1):
self.sect = sect
self.top_ofs = top_ofs
self.wd = ent_width
if itm_align is None:
itm_align = ent_width
self.itm_align = itm_align
self.ENT_A0 = 0
self.ENT_AF = (1 << self.wd * 8) - 1
st_ofs = (st_ofs // self.wd) * self.wd
self.win = []
self.win_st = st_ofs
self.win_ed = st_ofs
self.win_min = INF
self.win_max = 0
def reset(self, st_ofs):
st_ofs = (st_ofs // self.wd) * self.wd
self.win_ed = st_ofs
self._drop_all()
@property
def win_len(self):
l = self.win_ed - self.win_st
assert(l == len(self.win) * self.wd)
return l // self.wd
def _ent2ofs(self, ent):
return self.win_st + ent
def _ofs2ent(self, ofs):
assert(ofs >= self.win_st)
return ofs - self.win_st
def _hndl_a0(self):
return True
def _hndl_af(self):
return False
def _shift_in(self):
ent = self.sect.readval(self.win_ed, self.wd, False)
self.win_ed += self.wd
self.win.append(ent)
if ent == self.ENT_A0:
bypass = self._hndl_a0()
elif ent == self.ENT_AF:
bypass = self._hndl_af()
else:
bypass = None
if bypass is True:
return self.ST_BYPASS
elif bypass is False:
return self.ST_CHECK_DROP
else:
pass
if self._ent2ofs(ent) % self.itm_align:
return self.ST_DROPALL
if ent > self.win_max:
self.win_max = ent
if ent < self.win_min:
self.win_min = ent
return self.ST_CHECK
def _shift_out(self):
self.win_st += self.wd
if self.win_st == self.win_ed:
return self.ST_DROPALL
ent = self.win.pop(0)
a0 = self.ENT_A0
af = self.ENT_AF
if ent == a0 or ent == af:
return self.ST_CHECK
upd_min = (ent == self.win_min)
upd_max = (ent == self.win_max)
if not (upd_min or upd_max):
return self.ST_CHECK
wmin = INF
wmax = 0
for ent in self.win:
if ent == a0:
continue
elif ent == af:
continue
if upd_min and ent < wmin:
wmin = ent
if upd_max and ent > wmax:
wmax = ent
if upd_min:
self.win_min = wmin
if upd_max:
self.win_max = wmax
return self.ST_CHECK
def _chk_itm_bot(self):
ed = self.win_ed
wmin = self._ent2ofs(self.win_min)
wmax = self._ent2ofs(self.win_max)
if ed == wmin:
return self.ST_FOUND
elif ed > wmin or wmax >= self.top_ofs:
return self.ST_SCAN_O
return self.ST_SCAN_I
def _drop_all(self):
self.win.clear()
self.win_st = self.win_ed
self.win_min = INF
self.win_max = 0
return self.ST_SCAN_I
def _scan(self, brk_out):
st = self.ST_SCAN_I
while self.win_ed + self.wd <= self.top_ofs:
#if self.win_ed % 0x10000 == 0:
# print('scan', hex(self.win_ed))
if st == self.ST_SCAN_I:
#print('in', hex(self.win_ed))
st = self._shift_in()
if st == self.ST_SCAN_O:
#print('out', hex(self.win_ed))
if brk_out:
break
st = self._shift_out()
elif st == self.ST_CHECK:
#print('chk', hex(self.win_ed))
st = self._chk_itm_bot()
elif st == self.ST_CHECK_DROP:
#print('chkdrp', hex(self.win_ed))
st = self._chk_itm_bot()
if st != self.ST_FOUND:
st = self.ST_DROPALL
elif st == self.ST_BYPASS:
#print('bp', hex(self.win_ed))
st = self.ST_SCAN_I
elif st == self.ST_DROPALL:
#print('drp', hex(self.win_ed))
if brk_out:
break
st = self._drop_all()
elif st == self.ST_FOUND:
yield self.win_st, self.win_ed, self.win_len, self.win_max
st = self._shift_out()
#yield False, self.win_st, self.win_ed, self.win_len, self.win_max
def scan(self):
yield from _scan(False)
def check(self, ofs = None):
if ofs is None:
ofs = self.win_ed
if ofs % self.wd:
return False, 0, 0
self.reset(ofs)
for st, ed, ln, mx in self._scan(True):
return True, ln, mx
return False, 0, 0
class c_text_checker:
def __init__(self, sect, thrs = (2, 3, 9-3, 7-3, 3, 3)):
self.sect = sect
self.rtf2 = c_ffta_ref_tab_finder(sect, 0, sect._sect_top, 2)
self.rtf4 = c_ffta_ref_tab_finder(sect, 0, sect._sect_top, 4)
self._thrs = thrs
def _chk_tab(self, ofs, cls):
try:
dst = self.sect.subsect(ofs, cls)
for i in dst.iter_item(refresh = True):
pass
except:
return False, None, None, None
sz = dst.sect_top
if sz is None:
assert(dst.tsize < 2)
return False, dst, dst.tsize, None
return True, dst, dst.tsize, sz
def _chk_item(self, ofs, cls):
try:
dst = self.sect.subsect(ofs, cls)
except:
return False, None, None, None
sz = dst.sect_top
if sz is None:
return False, dst, None, None
return True, dst, sz, sz
def check(self, ofs, typ):
cls = (
c_ffta_sect_text, c_ffta_sect_text_page,
c_ffta_sect_text_line, c_ffta_sect_text_buf)
for i, dtyp in enumerate((1, 2, 4, 8)):
if not (typ & dtyp):
continue
if dtyp & 0x1:
fnd, ln, mx = self.rtf4.check(ofs)
elif dtyp & 0x2:
fnd, ln, mx = self.rtf2.check(ofs)
else:
fnd = True
if not fnd:
continue
if dtyp & 0x3:
r = self._chk_tab(ofs, cls[i])
else:
r = self._chk_item(ofs, cls[i])
if r[0] and r[2] >= self._thrs[i]:
return r
return False, None, None, None
def _chk_atab(self, mn, mx, cls):
sz = mx - mn
ln = sz // 4
subrngs = []
try:
sct = self.sect.subsect(mn, cls, self.sect, ln)
for sub in sct:
if sub is None:
continue
if isinstance(sub, c_ffta_sect_tab_ref):
for i in sub.iter_item(refresh = True):
pass
subrngs.append((sub.real_offset, sub.sect_top))
except:
return False, None, None, None, None
return True, subrngs, sct, ln, sz
def check_atab(self, mn, mx, typ):
cls = ( | c_ffta_sect_fixed_text, c_ffta_sect_words_text) | 6 | 2023-11-12 18:43:53+00:00 | 12k |
civrealm/civrealm | src/civrealm/envs/freeciv_wrapper/tensor_wrapper.py | [
{
"identifier": "TensorAction",
"path": "src/civrealm/envs/freeciv_wrapper/action_wrapper.py",
"snippet": "class TensorAction(Wrapper):\n \"\"\"\n A wrapper that defines tensor action spaces, transforms tensor actions into\n actions that could be handeled by FreecivBaseEnv instance, and adds masks to\n observations.\n\n TensorAction wrapper is composed of five wrappers, including `TruncateDiplCity`,\n `DiplomacyLoop`, `CombineTechResearchGoal`, `PersistentCityProduction`, and `EmbarkWrapper`.\n\n\n\n Parameters\n ----------\n env: TensorBase\n A FreecivBaseEnv instance that has been wrapped by TensorBase.\n\n Attributes\n ----------\n aciton_config: dict\n a dict that configs that specify sizes of mutable entities and action layout.\n mask: dict\n a dict of masks of type numpy ndarray indicating available actions and entities. 0-> unavilalbe, 1->availble.\n available_actions: dict\n cached info['available_actions'], a dict that indicates available actions.\n action_space: gymnasium.spaces.Dict\n a gymnasium.spaces.Dict with keys `['actor_type','city_id','unit_id',\n 'dipl_id','city_action_type','unit_action_type','dipl_action_type',\n 'gov_action_type','tech_action_type']`\n \"\"\"\n\n def __init__(self, env: TensorBase):\n self.action_config = env.get_wrapper_attr(\"config\")\n self.action_config[\"resize\"][\"dipl\"] = self.action_config[\"resize\"][\n \"others_player\"\n ]\n self.actor_type_list = self.action_config[\"actor_type_list\"]\n self.available_actions = {}\n self.mask = {}\n self.__turn = -1\n self.__dealing_with_incoming = False\n\n super().__init__(\n TruncateDiplCity(\n DiplomacyLoop(\n CombineTechResearchGoal(\n PersistentCityProduction(EmbarkWrapper(env))\n )\n )\n )\n )\n\n self.action_space = spaces.Dict(\n {\n \"actor_type\": spaces.Discrete(len(self.actor_type_list)),\n \"city_id\": spaces.Discrete(self.action_config[\"resize\"][\"city\"]),\n \"city_action_type\": spaces.Discrete(\n sum(self.action_config[\"action_layout\"][\"city\"].values())\n ),\n \"unit_id\": spaces.Discrete(self.action_config[\"resize\"][\"unit\"]),\n \"unit_action_type\": spaces.Discrete(\n sum(self.action_config[\"action_layout\"][\"unit\"].values())\n ),\n \"dipl_id\": spaces.Discrete(self.action_config[\"resize\"][\"dipl\"]),\n \"dipl_action_type\": spaces.Discrete(\n sum(self.action_config[\"action_layout\"][\"dipl\"].values())\n ),\n \"gov_action_type\": spaces.Discrete(\n sum(self.action_config[\"action_layout\"][\"gov\"].values())\n ),\n \"tech_action_type\": spaces.Discrete(\n sum(self.action_config[\"action_layout\"][\"tech\"].values())\n ),\n }\n )\n\n def step(self, action):\n # Get {k:value.item()} if value is array\n action = {\n k: (v.item() if isinstance(v, np.ndarray) else v) for k, v in action.items()\n }\n\n base_action = self.action(action)\n if tensor_debug:\n print(base_action)\n obs, reward, terminated, truncated, info = self.env.step(base_action)\n if tensor_debug:\n print(f\"reward:{reward},done:{terminated or truncated}\")\n\n obs = self.update_obs_with_mask(obs, info, action)\n return obs, reward, terminated, truncated, info\n\n def reset(\n self,\n *,\n seed: Optional[int] = None,\n options: Optional[Dict[str, Any]] = None,\n **kwargs,\n ):\n obs, info = self.env.reset(seed=seed, options=options, **kwargs)\n obs = self.update_obs_with_mask(obs, info)\n return obs, info\n\n def action(self, action):\n \"\"\"\n Translate tensor action, a dict of keys `['actor_type','city_id','unit_id',\n 'dipl_id','city_action_type','unit_action_type','dipl_action_type',\n 'gov_action_type','tech_action_type']` to `FreecivBaseEnv` action,\n a tuple `(actor_type, entity_id, action_name)`.\n\n \"\"\"\n if tensor_debug:\n self._check_action_layout()\n\n actor_type = action[\"actor_type\"]\n actor_name = self.actor_type_list[actor_type]\n\n if actor_name == \"turn done\":\n return None\n if actor_name in [\"gov\", \"tech\"]:\n entity_pos = None\n entity_id = self.get_wrapper_attr(\"my_player_id\")\n action_index = action[actor_name + \"_action_type\"]\n else:\n entity_pos, action_index = (\n action[actor_name + \"_id\"],\n action[actor_name + \"_action_type\"],\n )\n entity_id = self.get_wrapper_attr(actor_name + \"_ids\")[\n action[actor_name + \"_id\"]\n ]\n\n if tensor_debug:\n assert (\n self.mask[actor_name + \"_action_type_mask\"][entity_pos, action_index]\n == 1\n ), f\"{actor_name} action of id pos {entity_pos}, \\\n action type index {action_index} is masked\"\n\n action_name = sorted(\n list(self.available_actions[actor_name][entity_id].keys())\n )[action_index]\n\n return (actor_name, entity_id, action_name)\n\n def update_obs_with_mask(self, observation, info, action=None):\n \"\"\"\n Update self.mask using observation, info and action from the unwrapped env,\n and add self.mask to the observation of the wrapped env.\n \"\"\"\n if info[\n \"turn\"\n ] != self.__turn or self.__dealing_with_incoming != self.get_wrapper_attr(\n \"dealing_with_incoming\"\n ):\n self.reset_mask()\n self.available_actions = deepcopy(info[\"available_actions\"])\n self.__turn = info[\"turn\"]\n self.__dealing_with_incoming = self.get_wrapper_attr(\"dealing_with_incoming\")\n self._update_mask(observation, info, action)\n\n return update(observation, deepcopy(self.mask))\n\n def reset_mask(self):\n \"\"\"\n Reset self.mask\n\n This is usually called at the start of a new turn to reset masks.\n \"\"\"\n # Reset mask\n sizes = self.action_config[\"resize\"]\n self.mask[\"actor_type_mask\"] = np.ones(\n len(self.actor_type_list), dtype=np.int32\n )\n\n # Units/Cities/Players and others Masks\n for field in [\"unit\", \"city\", \"others_unit\", \"others_city\", \"others_player\"]:\n self.mask[field + \"_mask\"] = np.ones(sizes[field], dtype=np.int32)[\n ..., np.newaxis\n ]\n\n # Units/Cities Id Masks same as their Masks\n self.mask[\"unit_id_mask\"] = self.mask[\"unit_mask\"]\n self.mask[\"city_id_mask\"] = self.mask[\"city_mask\"]\n\n # Dipl id mask\n self.mask[\"dipl_id_mask\"] = np.ones(sizes[\"dipl\"], dtype=np.int32)[\n ..., np.newaxis\n ]\n\n # Action type mask\n for field in [\"city\", \"unit\", \"dipl\"]:\n self.mask[field + \"_action_type_mask\"] = np.ones(\n (\n sizes[field],\n sum(self.action_config[\"action_layout\"][field].values()),\n ),\n dtype=np.int32,\n )\n for field in [\"gov\", \"tech\"]:\n self.mask[field + \"_action_type_mask\"] = np.ones(\n (sum(self.action_config[\"action_layout\"][field].values()),),\n dtype=np.int32,\n )\n\n def _update_mask(self, observation, info, action):\n # update self.mask using action, observation and info\n if action:\n self._mask_from_action(action)\n self._mask_from_obs(observation)\n self._mask_from_info(info)\n\n def _mask_from_action(self, action):\n # Mask out actions that have been performed in this turn.\n actor_type = action[\"actor_type\"]\n actor_name = self.actor_type_list[actor_type]\n if actor_name == \"unit\":\n # self.mask[\"unit_action_type_mask\"][\n # action[\"unit_id\"], action[\"unit_action_type\"]\n # ] = 0\n pass\n elif actor_name == \"city\":\n # self.mask[\"city_action_type_mask\"][action[\"city_id\"], :] = 0\n pass\n elif actor_name == \"gov\":\n self.mask[\"gov_action_type_mask\"][:] &= 0\n elif actor_name == \"tech\":\n self.mask[\"tech_action_type_mask\"][:] &= 0\n\n def _mask_from_obs(self, observation):\n # Mask mutable entities using observation\n\n # Mask out trailing spaces for unit and city\n self.mask[\"unit_id_mask\"][len(self.get_wrapper_attr(\"unit_ids\")) : :, :] = 0\n self.mask[\"city_id_mask\"][len(self.get_wrapper_attr(\"city_ids\")) : :, :] = 0\n self.mask[\"dipl_id_mask\"][len(self.get_wrapper_attr(\"dipl_ids\")) : :, :] = 0\n self.mask[\"unit_mask\"] = self.mask[\"unit_id_mask\"].copy()\n self.mask[\"city_mask\"] = self.mask[\"city_id_mask\"].copy()\n\n self.mask[\"unit_action_type_mask\"][\n len(self.get_wrapper_attr(\"unit_ids\")) : :, :\n ] = 0\n self.mask[\"city_action_type_mask\"][\n len(self.get_wrapper_attr(\"city_ids\")) : :, :\n ] = 0\n\n # Mask Unit\n for pos, unit_id in enumerate(\n self.get_wrapper_attr(\"unit_ids\")[: self.action_config[\"resize\"][\"unit\"]]\n ):\n unit = observation[\"unit\"][unit_id]\n if unit[\"moves_left\"] == 0 or self.unwrapped.civ_controller.unit_ctrl.units[\n unit_id\n ][\"activity\"] not in [\n ACTIVITY_IDLE,\n ACTIVITY_FORTIFIED,\n ACTIVITY_SENTRY,\n ACTIVITY_FORTIFYING,\n ]: # agent busy or fortified\n self.mask[\"unit_id_mask\"][pos] &= 0\n self.mask[\"unit_action_type_mask\"][pos, :] &= 0\n\n self.mask[\"others_unit_mask\"][\n len(self.get_wrapper_attr(\"others_unit_ids\")) : :, :\n ] &= 0\n self.mask[\"others_city_mask\"][\n len(self.get_wrapper_attr(\"others_city_ids\")) : :, :\n ] &= 0\n\n if self.get_wrapper_attr(\"researching\"):\n self.mask[\"tech_action_type_mask\"][:] &= 0\n if not self.get_wrapper_attr(\"researching\") and tensor_debug:\n print(f\"techs_researched: {self.get_wrapper_attr('techs_researched')}\")\n\n def _mask_from_info(self, info):\n others_player_num = len(info[\"available_actions\"].get(\"player\", {}).keys())\n self.mask[\"others_player_mask\"][others_player_num::, :] &= 0\n\n # Mask City and Unit\n for mutable in [\"city\", \"unit\", \"dipl\"]:\n entities = info[\"available_actions\"].get(mutable, {})\n if len(entities) == 0:\n self.mask[mutable + \"_action_type_mask\"][:, :] &= 0\n self.mask[mutable + \"_id_mask\"][:] &= 0\n continue\n for i, entity_id in enumerate(\n self.env.get_wrapper_attr(mutable + \"_ids\")[\n : self.action_config[\"resize\"][mutable]\n ]\n ):\n actions = entities.get(entity_id, {})\n if len(actions) == 0:\n self.mask[mutable + \"_action_type_mask\"][i, :] &= 0\n self.mask[mutable + \"_id_mask\"][i] &= 0\n continue\n for action_id, act_name in enumerate(sorted(list(actions.keys()))):\n self.mask[mutable + \"_action_type_mask\"][i, action_id] &= int(\n actions[act_name]\n )\n self.mask[mutable + \"_id_mask\"][i] &= int(\n any(self.mask[mutable + \"_action_type_mask\"][i])\n )\n for mutable in [\"city\", \"unit\", \"dipl\"]:\n actor_type_index = self.actor_type_list.index(mutable)\n self.mask[\"actor_type_mask\"][actor_type_index] &= int(\n any(self.mask[mutable + \"_id_mask\"])\n )\n\n # Mask Gov and Tech\n for immutable in [\"gov\", \"tech\"]:\n options = info[\"available_actions\"].get(immutable, {})\n if len(options) == 0:\n self.mask[immutable + \"_action_type_mask\"][:] &= 0\n continue\n my_player_id = self.get_wrapper_attr(\"my_player_id\")\n for action_id, act_name in enumerate(\n sorted(list(options[my_player_id].keys()))\n ):\n self.mask[immutable + \"_action_type_mask\"][action_id] &= int(\n options[my_player_id][act_name]\n )\n for immutable in [\"gov\", \"tech\"]:\n actor_type_index = self.actor_type_list.index(immutable)\n self.mask[\"actor_type_mask\"][actor_type_index] &= int(\n any(self.mask[immutable + \"_action_type_mask\"])\n )\n\n def _check_action_layout(self):\n action_layout = self.action_config[\"action_layout\"]\n for field in [\"city\", \"unit\"]:\n for id, entity in self.available_actions.get(field, {}).items():\n assert len(entity) == sum(action_layout[field].values())\n assert len(\n self.available_actions[\"gov\"][self.get_wrapper_attr(\"my_player_id\")]\n ) == sum(action_layout[\"gov\"].values())"
},
{
"identifier": "Wrapper",
"path": "src/civrealm/envs/freeciv_wrapper/core.py",
"snippet": "class Wrapper(gymnasium.Wrapper):\n def reset(self, *, seed=None, options=None, **kwargs):\n return self.env.reset(seed=seed, options=options, **kwargs)"
},
{
"identifier": "CacheLastObs",
"path": "src/civrealm/envs/freeciv_wrapper/observation_wrapper.py",
"snippet": "class CacheLastObs(Wrapper):\n \"\"\"\n Cache last observation, and override observation with cached observation\n if terminated or truncated.\n\n Attributes\n -------------\n cached_last_obs: dict\n observation cached from the last call of step() or reset()\n \"\"\"\n\n def __init__(self, env):\n self.cached_last_obs = None\n super().__init__(env)\n\n def step(self, action):\n obs, reward, terminated, truncated, info = self.env.step(action)\n\n if terminated or truncated:\n obs = self.cached_last_obs\n info = {} if info is None else info\n return obs, reward, terminated, truncated, info\n\n self.cached_last_obs = deepcopy(obs)\n return obs, reward, terminated, truncated, info"
},
{
"identifier": "TensorObservation",
"path": "src/civrealm/envs/freeciv_wrapper/observation_wrapper.py",
"snippet": "class TensorObservation(Wrapper):\n \"\"\"\n A wrapper that defines tensor observation space, transforms observations got from\n FreecivBaseEnv into tensor observations.\n\n Parameters\n ----------\n env:\n A FreecivBaseEnv wrapped by TensorBase wrapper\n\n Attributes\n ---------\n observation_config: dict\n tensor observation configuration\n observation_space: gymnasium.spaces.Dict\n a gymnasium.spaces.Dict with keys speficified in configuration;\n observation with keywords `mask` would not be removed.\n obs_initialized: bool\n whether observation spaces has been initialized\n obs_layout: dict\n a dict that specify shapes of flattened numpy arrays in observation\n \"\"\"\n\n mutable_fields = [\n \"city\",\n \"unit\",\n \"others_city\",\n \"others_unit\",\n \"others_player\",\n \"dipl\",\n ]\n immutable_fields = [\"map\", \"rules\", \"player\", \"gov\"]\n\n def __init__(self, env: TensorBase):\n self.obs_initialized = False\n self.observation_config = env.get_wrapper_attr(\"config\")\n self.observation_config[\"resize\"][\"dipl\"] = self.observation_config[\"resize\"][\n \"others_player\"\n ]\n self.obs_layout = {}\n self.others_player_ids = []\n super().__init__(env)\n\n def observation(self, observation):\n \"\"\"\n convert observations obtained from `FreecivBaseEnv` into a dict of flattend numpy arrays.\n \"\"\"\n # in case of gameover, return None as observation\n if len(observation.get(\"player\", {})) == 0:\n return None\n\n observation = deepcopy(observation)\n observation = self._merge_player_techs(observation)\n obs_dict = self._handle_dict(observation)\n obs = self._embed_immutable(deepcopy(obs_dict))\n obs = self._embed_mutable(obs)\n\n if not self.obs_initialized:\n self.observation_space = self._infer_obs_space(obs)\n self.obs_initialized = True\n if tensor_debug:\n self._check_obs_layout(obs)\n return obs\n\n def _handle_dict(self, obs):\n obs[\"city\"] = obs.get(\"city\", {})\n obs[\"unit\"] = obs.get(\"unit\", {})\n\n # TODO: This should be the base env's reponsibility\n # Add info to city and unit from civcontroller\n update(obs[\"city\"], self.unwrapped.civ_controller.city_ctrl.cities)\n update(obs[\"unit\"], self.unwrapped.civ_controller.unit_ctrl.units)\n # update player info with dipl_state\n update(obs[\"player\"], obs.get(\"dipl\", {}))\n\n my_player_id = self.get_wrapper_attr(\"my_player_id\")\n\n obs[\"dipl\"] = {\n player: state[\"diplomacy_clause_map\"]\n for player, state in obs.get(\"dipl\", {}).items()\n if player != my_player_id\n }\n for player, treaty in obs[\"dipl\"].items():\n obs[\"dipl\"][player] = self._encode_treaty(treaty, player)\n\n # remove unused fields and keep mask if given\n obs = {\n k: v\n for k, v in obs.items()\n if k in self.observation_config[\"filter_observation\"] or k.endswith(\"mask\")\n }\n\n # Add others fields and initialize\n\n obs[\"others_unit\"] = {}\n obs[\"others_city\"] = {}\n\n for field in [\"unit\", \"city\"]:\n for key, val in list(obs[field].items()):\n if val[\"owner\"] != my_player_id:\n # delete others' entity from unit and city\n obs[\"others_\" + field][key] = obs[field].pop(key)\n\n obs[\"others_player\"] = {\n key: obs[\"player\"].pop(key)\n for key in list(obs[\"player\"].keys())\n if key != my_player_id\n }\n obs[\"player\"] = obs[\"player\"][my_player_id]\n\n # Initialize build_cost with 0 for now\n obs[\"rules\"][\"build_cost\"] = 0\n\n mutable_fields = [field for field in obs.keys() if field in self.mutable_fields]\n immutable_fields = [\n field for field in obs.keys() if field in self.immutable_fields\n ]\n\n ops = self.observation_config[\"obs_ops\"]\n\n # Handle immutable\n # delete unused keywords and transform useful keywords\n def apply_ops(field):\n for k, val in list(obs[field].items()):\n if k in list(ops[field].keys()):\n obs[field][k] = ops[field][k](val)\n else:\n obs[field].pop(k)\n\n for field in immutable_fields:\n apply_ops(field)\n\n # Handle mutable\n # delete unused keywords and transform useful keywords\n def apply_ops_mutable(field):\n for entity_id, entity in list(obs[field].items()):\n for k, val in list(entity.items()):\n if k in list(ops[field].keys()):\n entity[k] = ops[field][k](val)\n else:\n entity.pop(k)\n\n for field in mutable_fields:\n apply_ops_mutable(field)\n\n self.others_player_ids = sorted(obs[\"others_player\"].keys())\n\n return obs\n\n def _embed_immutable(self, obs):\n immutable = {\n field: obs[field] for field in obs if field in self.immutable_fields\n }\n\n if not self.obs_initialized:\n for field, field_dict in immutable.items():\n self.obs_layout[field] = OrderedDict(\n [(k, field_dict[k].shape) for k in sorted(list(field_dict.keys()))]\n )\n\n for field, field_dict in immutable.items():\n # check field layout is correct\n if tensor_debug:\n assert self.obs_layout[field] == {\n k: v.shape for k, v in field_dict.items()\n }\n\n obs[field] = np.concatenate(\n [field_dict[k] for k in sorted(list(field_dict.keys()))], axis=-1\n ).astype(np.int32)\n return obs\n\n def _embed_mutable(self, obs):\n mutable = {field: obs[field] for field in obs if field in self.mutable_fields}\n mutable_layout = self.observation_config[\"obs_mutable_layout\"]\n\n if not self.obs_initialized:\n for field, entity_dict in mutable.items():\n layout = mutable_layout[field]\n self.obs_layout[field] = OrderedDict(\n [(key, layout[key]) for key in sorted(layout)]\n )\n\n for field, entity_dict in mutable.items():\n # for empty field, fill with zero\n if len(entity_dict) == 0:\n mutable[field] = np.zeros(\n [\n self.observation_config[\"resize\"][field],\n *reduce(add_shape, self.obs_layout[field].values()),\n ],\n dtype=np.int32,\n )\n continue\n if tensor_debug:\n # check entity layout is correct\n assert all(\n self.obs_layout[field] == {k: v.shape for k, v in entity.items()}\n for entity in entity_dict.values()\n )\n # combine every entity's properties into an array along the last axis\n entity_dict = {\n id: np.concatenate([entity[k] for k in sorted(entity.keys())], axis=-1)\n for id, entity in entity_dict.items()\n }\n # combine all entities in a field into an array along the first axis\n mutable[field] = np.stack(\n [entity_dict[id] for id in self.get_wrapper_attr(field + \"_ids\")],\n axis=0,\n ).astype(np.int32)\n\n # resize to maximum entity shape\n for field in mutable:\n size = self.observation_config[\"resize\"][field]\n mutable[field] = resize_data(mutable[field], size).astype(np.int32)\n\n update(obs, mutable)\n return obs\n\n def _infer_obs_space(self, observation) -> spaces.Dict:\n return spaces.Dict(\n [\n (key, spaces.Box(low=0, high=1000, shape=space.shape, dtype=np.int32))\n for key, space in observation.items()\n ]\n )\n\n def _check_obs_layout(self, obs):\n for field, val in self.obs_layout.items():\n shape = reduce(add_shape, val.values())\n assert shape[-1] == obs[field].shape[-1]\n\n def _merge_player_techs(self, obs):\n for player in obs[\"player\"].values():\n player[\"techs\"] = []\n for tech in sorted(obs[\"tech\"]):\n player_tech = player.pop(f\"tech_{tech}\")\n player[\"techs\"].append(player_tech if player_tech is not None else 255)\n return obs\n\n def _encode_treaty(self, treaty, player):\n encoded = {\n \"type\": np.zeros(10 * 2, dtype=np.int32),\n \"give_city\": np.zeros(\n self.observation_config[\"resize\"][\"city\"], dtype=np.int32\n ),\n \"ask_city\": np.zeros(\n self.observation_config[\"resize\"][\"others_city\"], dtype=np.int32\n ),\n \"give_gold\": 255,\n \"ask_gold\": 255,\n }\n\n for clause in treaty:\n value = clause[\"value\"]\n\n if clause[\"type\"] == player_const.CLAUSE_GOLD:\n gold = sum(int(value >= level) for level in GOLD_SET)\n if clause[\"giver\"] == player:\n encoded[\"ask_gold\"] = gold\n else:\n encoded[\"give_gold\"] = gold\n elif clause[\"type\"] == player_const.CLAUSE_CITY:\n if clause[\"giver\"] == player:\n city_list = self.get_wrapper_attr(\"others_city_ids\")\n field = \"ask_city\"\n else:\n city_list = self.get_wrapper_attr(\"city_ids\")\n field = \"give_city\"\n if value in city_list:\n city_idx = city_list.index(value)\n encoded[field][city_idx] = 1\n\n if clause[\"giver\"] == player:\n encoded[\"type\"][clause[\"type\"]] = 1\n else:\n encoded[\"type\"][clause[\"type\"] + 10] = 1\n\n return encoded"
},
{
"identifier": "TensorBase",
"path": "src/civrealm/envs/freeciv_wrapper/tensor_base_wrapper.py",
"snippet": "class TensorBase(Wrapper):\n \"\"\"\n A basic wrapper that deals with config loading and entity id recording, \n required by all tensor-related wrappers.\n\n\n Parameters\n ----------\n env: FreecivBaseEnv\n config: dict\n tensor env configuration\n\n Attributes\n ---------\n config: dict\n A dict that specifies all configurations related to tensor wrapper.\n my_player_id: int\n My player id.\n unit_ids: list\n A sorted list of my unit ids.\n city_ids: list\n A sorted list of my city ids.\n others_unit_ids: list\n A sorted list of others unit ids.\n others_city_ids: list\n A sorted list of others city ids.\n dipl_ids : list\n A list of others player ids.\n units : dict\n ruleset information about units.\n unit_types :list\n A list of all unit types.\n unit_costs : list\n A list of int indicating unit costs.\n improvements : dict\n Ruleset information about city improvements.\n impr_costs :list\n A list of int indicating city improvements costs.\n\n \"\"\"\n\n def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config):\n self.config = config\n self.my_player_id = -1\n\n # mutable ids\n self.unit_ids = []\n self.city_ids = []\n self.others_unit_ids = []\n self.others_city_ids = []\n self.dipl_ids = []\n\n # ruleset\n self.units = {}\n self.unit_types = []\n self.unit_costs = []\n self.improvements = {}\n self.impr_costs = []\n\n super().__init__(env)\n\n def update_sequence_ids(self, observation):\n \"\"\"\n Use city, unit and dipl information in observation to update ids.\n \"\"\"\n self.unit_ids = sorted(\n list(\n k\n for k in observation.get(\"unit\", {}).keys()\n if observation[\"unit\"][k][\"owner\"] == self.my_player_id\n )\n )\n self.others_unit_ids = sorted(\n list(\n k\n for k in observation.get(\"unit\", {}).keys()\n if observation[\"unit\"][k][\"owner\"] != self.my_player_id\n )\n )\n self.city_ids = sorted(\n list(\n k\n for k in observation.get(\"city\", {}).keys()\n if observation[\"city\"][k][\"owner\"] == self.my_player_id\n )\n )\n self.others_city_ids = sorted(\n list(\n k\n for k in observation.get(\"city\", {}).keys()\n if observation[\"city\"][k][\"owner\"] != self.my_player_id\n )\n )\n self.dipl_ids = [\n player\n for player in sorted(observation.get(\"dipl\", {}).keys())\n if player != self.my_player_id\n ]\n\n def update_config(self):\n \"\"\"\n Update config using ruleset information at the start of the turn.\n \"\"\"\n self.units = self.unwrapped.civ_controller.rule_ctrl.unit_types\n self.unit_types = [self.units[i][\"name\"] for i in range(len(self.units))]\n self.unit_costs = [self.units[i][\"build_cost\"] for i in range(len(self.units))]\n self.improvements = self.unwrapped.civ_controller.rule_ctrl.improvements\n self.impr_costs = [\n self.improvements[i][\"build_cost\"] for i in range(len(self.improvements))\n ]\n self.config[\"obs_ops\"][\"unit\"][\"type_rule_name\"] = onehotifier_maker(\n self.unit_types\n )\n self.config[\"obs_ops\"][\"rules\"][\"build_cost\"] = lambda _: np.array(\n self.unit_costs + self.impr_costs\n )\n\n def reset(self, *args, **kwargs):\n obs, info = self.env.reset(*args, **kwargs)\n self.my_player_id = self.unwrapped.civ_controller.player_ctrl.my_player_id\n\n self.update_config()\n self.update_sequence_ids(obs)\n return obs, info\n\n def step(self, *args, **kwargs):\n obs, reward, terminated, truncated, info = self.env.step(*args, **kwargs)\n self.update_sequence_ids(obs)\n return obs, reward, terminated, truncated, info"
}
] | import numpy as np
from civrealm.envs import FreecivBaseEnv
from civrealm.envs.freeciv_wrapper.config import default_tensor_config
from .action_wrapper import TensorAction
from .core import Wrapper
from .observation_wrapper import CacheLastObs, TensorObservation
from .tensor_base_wrapper import TensorBase | 7,333 |
class TensorWrapper(Wrapper):
"""
TensorWrapper is used to make Civrealm environment tensorized by converting
observations from FreecivBaseEnv into tensors and tensor actions back to actions compatible with
FreecivBaseEnv.
TensorWrapper is composed `TensorBase`, `TensorAction`, `TensorObservation`
and `CacheLastObs`.
Parameters
----------
env
config:
tensor env configuration
Attributes
----------
config: dict
tensor wrapper configuration
"""
def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config):
self.config = config
super().__init__(
CacheLastObs(
|
class TensorWrapper(Wrapper):
"""
TensorWrapper is used to make Civrealm environment tensorized by converting
observations from FreecivBaseEnv into tensors and tensor actions back to actions compatible with
FreecivBaseEnv.
TensorWrapper is composed `TensorBase`, `TensorAction`, `TensorObservation`
and `CacheLastObs`.
Parameters
----------
env
config:
tensor env configuration
Attributes
----------
config: dict
tensor wrapper configuration
"""
def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config):
self.config = config
super().__init__(
CacheLastObs( | TensorObservation(TensorAction(TensorBase(env, config=config))) | 0 | 2023-11-18 19:35:50+00:00 | 12k |
RAIVNLab/MatFormer-OLMo | olmo/model.py | [
{
"identifier": "PathOrStr",
"path": "olmo/aliases.py",
"snippet": ""
},
{
"identifier": "BeamSearch",
"path": "olmo/beam_search.py",
"snippet": "class BeamSearch:\n \"\"\"\n Implements the beam search algorithm for decoding the most likely sequences.\n\n :param end_index: The index of the \"stop\" or \"end\" token in the vocabulary. Usually the EOS token ID.\n\n :param max_steps: The maximum number of decoding steps to take, i.e. the maximum length\n of the predicted sequences.\n\n :param beam_size: The width of the beam used.\n\n :param per_node_beam_size: The maximum number of candidates to consider per node, at each step in the search.\n If not given, this just defaults to `beam_size`. Setting this parameter\n to a number smaller than `beam_size` may give better results, as it can introduce\n more diversity into the search. See\n [*Beam Search Strategies for Neural Machine Translation*, Freitag and Al-Onaizan, 2017]\n (https://api.semanticscholar.org/CorpusID:2229477).\n\n :param sampler: An optional `Sampler` which is used to pick next candidate nodes and beams.\n If not specified, `DeterministicSampler` will be used, which just takes the\n `per_node_beam_size` most likely nodes and the `beam_size` most likely beams.\n\n Using the [`GumbelSampler`](#gumbelsampler), on the other hand, will give you\n [Stochastic Beam Search](https://api.semanticscholar.org/CorpusID:76662039).\n\n :param min_steps: The minimum number of decoding steps to take, i.e. the minimum length of\n the predicted sequences. This does not include the start or end tokens. If `None`,\n no minimum is enforced.\n\n :param final_sequence_scorer: An optional `FinalSequenceScorer` which is used to score the final generated sequences.\n The output from this module is what is returned by the `search` method. If not\n specified, `SequenceLogProbabilityScorer` will be used, which scores the sequences\n by the sum of the token log probabilities.\n\n :param constraints: An optional list of `Constraint`s which should be applied during beam search. If not\n provided, no constraints will be enforced.\n\n \"\"\"\n\n def __init__(\n self,\n end_index: int,\n *,\n max_steps: int = 50,\n beam_size: int = 10,\n per_node_beam_size: Optional[int] = None,\n sampler: Optional[Sampler] = None,\n min_steps: Optional[int] = None,\n final_sequence_scorer: Optional[FinalSequenceScorer] = None,\n constraints: Optional[List[Constraint]] = None,\n ) -> None:\n if not max_steps > 0:\n raise ValueError(\"max_steps must be positive\")\n if not beam_size > 0:\n raise ValueError(\"beam_size must be positive\")\n if per_node_beam_size is not None and not per_node_beam_size > 0:\n raise ValueError(\"per_node_beam_size must be positive\")\n if min_steps is not None:\n if not min_steps >= 0:\n raise ValueError(\"min_steps must be non-negative\")\n if not min_steps <= max_steps:\n raise ValueError(\"min_steps must be less than or equal to max_steps\")\n\n self._end_index = end_index\n self.max_steps = max_steps\n self.beam_size = beam_size\n self.per_node_beam_size = per_node_beam_size or beam_size\n self.sampler = sampler or DeterministicSampler()\n self.min_steps = min_steps or 0\n self.final_sequence_scorer = final_sequence_scorer or SequenceLogProbabilityScorer()\n self.constraints = constraints or []\n\n @staticmethod\n def _reconstruct_sequences(predictions, backpointers):\n # Reconstruct the sequences.\n # shape: [(batch_size, beam_size, 1)]\n reconstructed_predictions = [predictions[-1].unsqueeze(2)]\n\n if not backpointers:\n return reconstructed_predictions\n\n # shape: (batch_size, beam_size)\n cur_backpointers = backpointers[-1]\n\n for timestep in range(len(predictions) - 2, 0, -1):\n # shape: (batch_size, beam_size, 1)\n cur_preds = predictions[timestep].gather(1, cur_backpointers).unsqueeze(2)\n\n reconstructed_predictions.append(cur_preds)\n\n # shape: (batch_size, beam_size)\n cur_backpointers = backpointers[timestep - 1].gather(1, cur_backpointers)\n\n # shape: (batch_size, beam_size, 1)\n final_preds = predictions[0].gather(1, cur_backpointers).unsqueeze(2)\n\n reconstructed_predictions.append(final_preds)\n\n return reconstructed_predictions\n\n def search(\n self,\n start_predictions: torch.Tensor,\n start_state: StateType,\n step: StepFunctionType,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Given a starting state and a step function, apply beam search to find the\n most likely target sequences.\n\n Returns a tuple of `(predictions, final_scores)`, where `predictions`\n has shape `(batch_size, beam_size, max_steps)` and `final_scores`\n has shape `(batch_size, beam_size)`.\n\n .. note::\n If your step function returns `-inf` for some log probabilities\n (like if you're using a masked log-softmax) then some of the \"best\"\n sequences returned may also have `-inf` log probability. Specifically\n this happens when the beam size is smaller than the number of actions\n with finite log probability (non-zero probability) returned by the step function.\n Therefore if you're using a mask you may want to check the results from `search`\n and potentially discard sequences with non-finite log probability.\n\n :param start_predictions: A tensor containing the initial predictions with shape `(batch_size,)`.\n Usually the initial predictions are just the index of the \"start\" token\n in the target vocabulary.\n\n :param start_state: The initial state passed to the `step` function. Each value of the state dict\n should be a tensor of shape `(batch_size, *)`, where `*` means any other\n number of dimensions.\n\n :param step: A function that is responsible for computing the next most likely tokens,\n given the current state and the predictions from the last time step.\n The function should accept two or three arguments:\n\n - a tensor of shape `(group_size,)` or representing the index of the predicted\n tokens from the last time step,\n - the current state, a `StateType`, and\n - optionally, the timestep, an `int`.\n\n The `group_size` will be `batch_size * beam_size`, except in the initial\n step, for which it will just be `batch_size`.\n\n The function is expected to return a tuple, where the first element\n is a tensor of shape `(group_size, vocab_size)` containing\n the log probabilities of the tokens for the next step, and the second\n element is the updated state. The tensor in the state should have shape\n `(group_size, *)`, where `*` means any other number of dimensions.\n\n \"\"\"\n step_signature = signature(step)\n if len(step_signature.parameters) < 3:\n # If the step function we're given does not take the time step argument, wrap it\n # in one that does.\n old_step = cast(StepFunctionTypeNoTimestep, step)\n\n def new_step(last_predictions: torch.Tensor, state: Dict[str, torch.Tensor], time_step: int):\n del time_step\n return old_step(last_predictions, state)\n\n return self._search(start_predictions, start_state, new_step)\n else:\n return self._search(start_predictions, start_state, cast(StepFunctionTypeWithTimestep, step))\n\n def _search(\n self,\n start_predictions: torch.Tensor,\n start_state: StateType,\n step: StepFunctionTypeWithTimestep,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n batch_size = start_predictions.size()[0]\n\n # List of (batch_size, beam_size) tensors. One for each time step. Does not\n # include the start symbols, which are implicit.\n predictions: List[torch.Tensor] = []\n\n # List of (batch_size, beam_size) tensors. One for each time step. None for\n # the first. Stores the index n for the parent prediction, i.e.\n # predictions[t-1][i][n], that it came from.\n backpointers: List[torch.Tensor] = []\n\n constraint_states = [constraint.init_state(batch_size) for constraint in self.constraints]\n\n # Calculate the first timestep. This is done outside the main loop\n # because we are going from a single decoder input (the output from the\n # encoder) to the top `beam_size` decoder outputs. On the other hand,\n # within the main loop we are going from the `beam_size` elements of the\n # beam to `beam_size`^2 candidates from which we will select the top\n # `beam_size` elements for the next iteration.\n # shape: (batch_size, num_classes)\n start_class_log_probabilities, state = step(start_predictions, start_state, 0)\n\n num_classes = start_class_log_probabilities.size()[1]\n\n # Make sure `per_node_beam_size` is not larger than `num_classes`.\n if self.per_node_beam_size > num_classes:\n raise ValueError(\n f\"Vocab size ({num_classes:d}) too small \"\n f\"relative to per_node_beam_size ({self.per_node_beam_size:d}).\\n\"\n f\"Please decrease beam_size or per_node_beam_size.\"\n )\n\n sampler_state = self.sampler.init_state(start_class_log_probabilities, batch_size, num_classes)\n\n # Apply all constraints.\n if self.constraints:\n # shape: (batch_size, 1, num_classes)\n expanded_start_class_log_probabilities = start_class_log_probabilities.unsqueeze(1)\n for constraint, constraint_state in zip(self.constraints, constraint_states):\n expanded_start_class_log_probabilities = constraint.apply(\n constraint_state, expanded_start_class_log_probabilities\n )\n start_class_log_probabilities = expanded_start_class_log_probabilities.squeeze(1)\n\n # Prevent selecting the end symbol if there is any min_steps constraint\n if self.min_steps >= 1:\n start_class_log_probabilities[:, self._end_index] = torch.finfo(\n start_class_log_probabilities.dtype\n ).min\n\n # Get the initial predicted classed and their log probabilities.\n # shape: (batch_size, beam_size), (batch_size, beam_size)\n (\n start_top_log_probabilities,\n start_predicted_classes,\n sampler_state,\n ) = self.sampler.sample_beams(start_class_log_probabilities, self.beam_size, sampler_state)\n\n if self.beam_size == 1 and (start_predicted_classes == self._end_index).all():\n warnings.warn(\n \"Empty sequences predicted. You may want to increase the beam size or ensure \"\n \"your step function is working properly.\",\n RuntimeWarning,\n )\n return start_predicted_classes.unsqueeze(-1), start_top_log_probabilities\n\n # The log probabilities for the last time step.\n # shape: (batch_size, beam_size)\n last_log_probabilities = start_top_log_probabilities\n\n # shape: [(batch_size, beam_size)]\n predictions.append(start_predicted_classes)\n\n # Log probability tensor that mandates that the end token is selected.\n # shape: (batch_size * beam_size, num_classes)\n log_probs_after_end = start_class_log_probabilities.new_full(\n (batch_size * self.beam_size, num_classes),\n torch.finfo(start_class_log_probabilities.dtype).min,\n )\n log_probs_after_end[:, self._end_index] = 0.0\n\n # Set the same state for each element in the beam.\n self._update_initial_state(state, batch_size)\n\n for i, constraint in enumerate(self.constraints):\n constraint_states[i] = constraint.update_state(constraint_states[i], start_predicted_classes)\n\n for timestep in range(self.max_steps - 1):\n # shape: (batch_size * beam_size,)\n last_predictions = predictions[-1].reshape(batch_size * self.beam_size)\n\n # If every predicted token from the last step is `self._end_index`,\n # then we can stop early.\n if (last_predictions == self._end_index).all():\n break\n # Take a step. This get the predicted log probs of the next classes\n # and updates the state.\n # shape: (batch_size * beam_size, num_classes)\n class_log_probabilities, state = step(last_predictions, state, timestep + 1)\n\n # Apply all constraints.\n if self.constraints:\n # shape: (batch_size, beam_size, num_classes)\n reshaped_class_log_probabilities = class_log_probabilities.view(batch_size, self.beam_size, -1)\n for constraint, constraint_state in zip(self.constraints, constraint_states):\n reshaped_class_log_probabilities = constraint.apply(\n constraint_state, reshaped_class_log_probabilities\n )\n # shape: (batch_size * beam_size, num_classes)\n class_log_probabilities = reshaped_class_log_probabilities.view(batch_size * self.beam_size, -1)\n\n # The `timestep`-th iteration of the for loop is generating the `timestep + 2`-th token\n # of the sequence (because `timestep` is 0-indexed and we generated the first token\n # before the for loop). Here we block the end index if the search is not allowed to\n # terminate on this iteration.\n if timestep + 2 <= self.min_steps:\n class_log_probabilities[:, self._end_index] = torch.finfo(class_log_probabilities.dtype).min\n\n # shape: (batch_size * beam_size, num_classes)\n last_predictions_expanded = last_predictions.unsqueeze(-1).expand(\n batch_size * self.beam_size, num_classes\n )\n\n # Here we are finding any beams where we predicted the end token in\n # the previous timestep and replacing the distribution with a\n # one-hot distribution, forcing the beam to predict the end token\n # this timestep as well.\n # shape: (batch_size * beam_size, num_classes)\n cleaned_log_probabilities = torch.where(\n last_predictions_expanded == self._end_index,\n log_probs_after_end,\n class_log_probabilities,\n )\n\n # shape (both): (batch_size * beam_size, per_node_beam_size)\n top_log_probabilities, predicted_classes, sampler_state = self.sampler.sample_nodes(\n cleaned_log_probabilities, self.per_node_beam_size, sampler_state\n )\n\n # Here we expand the last log probabilities to (batch_size * beam_size, per_node_beam_size)\n # so that we can add them to the current log probs for this timestep.\n # This lets us maintain the log probability of each element on the beam.\n # shape: (batch_size * beam_size, per_node_beam_size)\n expanded_last_log_probabilities = (\n last_log_probabilities.unsqueeze(2)\n .expand(batch_size, self.beam_size, self.per_node_beam_size)\n .reshape(batch_size * self.beam_size, self.per_node_beam_size)\n )\n\n # shape: (batch_size * beam_size, per_node_beam_size)\n summed_top_log_probabilities = top_log_probabilities + expanded_last_log_probabilities\n\n # shape: (batch_size, beam_size * per_node_beam_size)\n reshaped_summed = summed_top_log_probabilities.reshape(\n batch_size, self.beam_size * self.per_node_beam_size\n )\n\n # shape: (batch_size, beam_size * per_node_beam_size)\n reshaped_predicted_classes = predicted_classes.reshape(\n batch_size, self.beam_size * self.per_node_beam_size\n )\n\n # Keep only the top `beam_size` beam indices.\n # shape (both): (batch_size, beam_size)\n (\n restricted_beam_log_probs,\n restricted_beam_indices,\n sampler_state,\n ) = self.sampler.sample_beams(reshaped_summed, self.beam_size, sampler_state)\n\n # Use the beam indices to extract the corresponding classes.\n # shape: (batch_size, beam_size)\n restricted_predicted_classes = reshaped_predicted_classes.gather(1, restricted_beam_indices)\n\n predictions.append(restricted_predicted_classes)\n\n # shape: (batch_size, beam_size)\n last_log_probabilities = restricted_beam_log_probs\n\n # The beam indices come from a `beam_size * per_node_beam_size` dimension where the\n # indices with a common ancestor are grouped together. Hence\n # dividing by per_node_beam_size gives the ancestor. (Note that this is integer\n # division as the tensor is a LongTensor.)\n # shape: (batch_size, beam_size)\n backpointer = torch.divide(restricted_beam_indices, self.per_node_beam_size, rounding_mode=\"trunc\")\n backpointers.append(backpointer)\n\n # Keep only the pieces of the state tensors corresponding to the\n # ancestors created this iteration.\n self._update_state(state, backpointer)\n\n for i, constraint in enumerate(self.constraints):\n constraint_states[i] = constraint.update_state(\n constraint_states[i], restricted_predicted_classes, last_backpointer=backpointer\n )\n\n # Warn about \"-inf\" log probabilities if not using any constraints (negligible\n # log probabilities are expected when using constraints).\n if not self.constraints and (\n not torch.isfinite(last_log_probabilities).all()\n or (last_log_probabilities == torch.finfo(last_log_probabilities.dtype).min).any()\n ):\n warnings.warn(\n \"Negligible log probabilities encountered ('-inf' or equivalent). \"\n \"Some final sequences may not make sense. \"\n \"This can happen when the beam size is larger than the number of valid (non-zero \"\n \"probability) transitions that the step function produces.\",\n RuntimeWarning,\n )\n\n reconstructed_predictions = self._reconstruct_sequences(predictions, backpointers)\n\n # shape: (batch_size, beam_size, max_steps)\n all_predictions = torch.cat(list(reversed(reconstructed_predictions)), 2)\n\n # Calculate the final sequence scores\n # shape: (batch_size, beam_size)\n final_scores = self.final_sequence_scorer.score(all_predictions, last_log_probabilities, self._end_index)\n\n # Sort the sequences based on the final scores so the best scoring\n # sequence is at index 0\n sorted_final_scores, sorted_indices = torch.sort(final_scores, dim=1, descending=True)\n sorted_all_predictions = torch.gather(\n all_predictions, 1, sorted_indices.unsqueeze(-1).expand_as(all_predictions)\n )\n\n return sorted_all_predictions, sorted_final_scores\n\n def _update_initial_state(self, state: StateType, batch_size: int):\n \"\"\"\n Expand tensors in a state dictionary from `(batch_size, *)` to `(batch_size * beam_size, *)`.\n \"\"\"\n for key, state_tensor in state.items():\n if state_tensor is None:\n continue\n # shape: (batch_size * beam_size, *)\n _, *last_dims = state_tensor.size()\n state[key] = (\n state_tensor.unsqueeze(1)\n .expand(batch_size, self.beam_size, *last_dims)\n .reshape(batch_size * self.beam_size, *last_dims)\n )\n\n def _update_state(self, state: StateType, backpointer: torch.Tensor):\n batch_size = backpointer.size()[0]\n\n for key, state_tensor in state.items():\n if state_tensor is None:\n continue\n _, *last_dims = state_tensor.size()\n # shape: (batch_size, beam_size, *)\n expanded_backpointer = backpointer.view(batch_size, self.beam_size, *([1] * len(last_dims))).expand(\n batch_size, self.beam_size, *last_dims\n )\n # shape: (batch_size * beam_size, *)\n state[key] = (\n state_tensor.reshape(batch_size, self.beam_size, *last_dims)\n .gather(1, expanded_backpointer)\n .reshape(batch_size * self.beam_size, *last_dims)\n )"
},
{
"identifier": "Constraint",
"path": "olmo/beam_search.py",
"snippet": "class Constraint:\n \"\"\"\n An abstract class that can be used to enforce constraints on the output predictions\n by manipulating the class log probabilities during beam search.\n\n A `Constraint` just has three methods that need to be implemented by subclasses:\n `init_state()`, `apply()` and `_update_state()`.\n\n `init_state()` takes one argument:\n\n - the batch size, an int\n\n It returns a constraint state, which is a nested list of dictionaries, with any state needed for subsequent\n calls to `apply()` and `update_state()`. The length of the outer list should be equal to `batch_size`.\n Each inner list should be of length 1.\n\n `apply()` takes two arguments:\n\n - the constraint state, which is a nested list of dictionaries. The length of the outer list is `batch_size`\n and the length of each inner list is `beam_size` except on the first time `apply()` is called when it is 1.\n - `class_log_probabilities`, a tensor of shape `(batch_size, beam_size, num_classes)` that contains the\n log probabilities for the classes during search. The first time `apply()` is called, `beam_size = 1`.\n\n The `apply()` method should return new `class_log_probabilities` that enforce the constraint\n for this step of beam search. For instance, it may prevent a specific class from being selected by setting\n the corresponding log probability to a negligible value such as `float(\"-inf\")` or\n `torch.finfo(class_log_probabilities.dtype).min`.\n\n `_update_state()` takes two arguments:\n\n - the copied parent constraint state, which is a nested list of dictionaries. `state[i][j]` contains the\n copied state for the parent of `last_prediction[i, j]`. It is unique to that batch and beam, so it can be\n directly edited in-place without affecting the others.\n - last_prediction, a tensor of shape `(batch_size, beam_size)` containing the predictions from the last\n step of beam search.\n\n The `_update_state()` function should return a new constraint state, a nested list of dictionaries of\n length `batch_size` and inner list of length `beam_size`, one for each of the predictions in `last_prediction`.\n\n \"\"\"\n\n @abstractmethod\n def init_state(\n self,\n batch_size: int,\n ) -> ConstraintStateType:\n raise NotImplementedError\n\n @abstractmethod\n def apply(\n self,\n state: ConstraintStateType,\n class_log_probabilities: torch.Tensor,\n ) -> torch.Tensor:\n raise NotImplementedError\n\n @staticmethod\n def _copy_state(\n state: ConstraintStateType,\n batch_size: int,\n beam_size: int,\n last_backpointer: Optional[torch.Tensor] = None,\n ) -> ConstraintStateType:\n \"\"\"\n Copies the `state` . This method copies the data in `state` using `copy.deepcopy()`. If this\n is not appropriate for your constraint, you will need to implement the copying yourself.\n \"\"\"\n new_state = []\n for i in range(batch_size):\n batch_state = []\n for j in range(beam_size):\n if last_backpointer is None:\n # This is the first prediction, so the backpointer is 0\n backpointer = 0\n else:\n backpointer = last_backpointer[i, j].item()\n batch_state.append(copy.deepcopy(state[i][backpointer])) # type: ignore\n new_state.append(batch_state)\n return new_state\n\n def update_state(\n self,\n state: ConstraintStateType,\n last_prediction: torch.Tensor,\n last_backpointer: Optional[torch.Tensor] = None,\n ) -> ConstraintStateType:\n batch_size, beam_size = last_prediction.size()\n new_state = self._copy_state(state, batch_size, beam_size, last_backpointer)\n return self._update_state(new_state, last_prediction)\n\n @abstractmethod\n def _update_state(\n self,\n state: ConstraintStateType,\n last_prediction: torch.Tensor,\n ) -> ConstraintStateType:\n raise NotImplementedError"
},
{
"identifier": "FinalSequenceScorer",
"path": "olmo/beam_search.py",
"snippet": "class FinalSequenceScorer:\n \"\"\"\n An abstract class that can be used to score the final generated sequences found\n by beam search. Given the predicted sequences and the corresponding log probabilities of\n those sequences, the class calculates and returns the final score of the sequences.\n\n The default implementation scores the sequences using the sum of the log probabilities of\n the sequence, which is passed as input.\n \"\"\"\n\n @abstractmethod\n def score(self, predictions: torch.Tensor, log_probabilities: torch.Tensor, end_index: int) -> torch.Tensor:\n \"\"\"\n Score the final predictions found by beam search.\n Returns a tensor of the final sequence scores of shape `(batch_size, beam_size)`.\n\n :param predictions: A tensor containing the initial predictions with shape `(batch_size, beam_size, max_steps)`.\n :param log_probabilities: A tensor containing the log probabilities of the sequence, defined as the sum\n of the log probabilities per token, with shape `(batch_size, beam_size)`.\n :param end_index: The index of the end symbol.\n\n \"\"\"\n raise NotImplementedError"
},
{
"identifier": "Sampler",
"path": "olmo/beam_search.py",
"snippet": "class Sampler:\n \"\"\"\n An abstract class that can be used to sample candidates (either nodes or beams)\n within `BeamSearch`.\n\n A `Sampler` just has three methods, `init_state()`, `sample_nodes()` and `sample_beams()`.\n\n `init_state()` takes three arguments:\n\n - a tensor of starting log probs with shape `(batch_size,, num_classes)`,\n - the batch size, an int,\n - and the number of classes, also an int.\n\n It returns a state dictionary with any state tensors needed for subsequent\n calls to `sample_nodes()` and `sample_beams()`.\n\n By default this method just returns an empty dictionary.\n\n Both `sample_nodes()` and `sample_beams()` should take three arguments:\n\n - tensor of normalized log probabilities with shape `(batch_size, num_examples)`,\n - an integer representing the number of samples to take for each example in the batch,\n - and a state dictionary which could contain any tensors needed for the `Sampler` to keep\n track of state.\n\n For `sample_nodes()`, `num_examples = num_classes`, but for `sample_beams`,\n `num_examples = beam_size * per_node_beam_size`.\n\n The return value should be a tuple containing:\n\n - a tensor of log probabilities of the sampled examples with shape `(batch_size, num_samples)`,\n - a tensor of indices of the sampled examples with shape `(batch_size, num_samples)`,\n - and the updated state dictionary.\n\n A default implementation of `sample_beams` is provided, which just deterministically\n picks the `k` examples with highest log probability.\n \"\"\"\n\n def init_state(\n self, start_class_log_probabilities: torch.Tensor, batch_size: int, num_classes: int\n ) -> StateType:\n del start_class_log_probabilities, batch_size, num_classes\n return {}\n\n @abstractmethod\n def sample_nodes(\n self, log_probs: torch.Tensor, per_node_beam_size: int, state: StateType\n ) -> Tuple[torch.Tensor, torch.Tensor, StateType]:\n raise NotImplementedError\n\n def sample_beams(\n self, log_probs: torch.Tensor, beam_size: int, state: StateType\n ) -> Tuple[torch.Tensor, torch.Tensor, StateType]:\n del state\n selected_log_probs, selected_indices = torch.topk(log_probs, beam_size, dim=-1)\n return selected_log_probs, selected_indices, {}"
},
{
"identifier": "ActivationType",
"path": "olmo/config.py",
"snippet": "class ActivationType(StrEnum):\n gelu = \"gelu\"\n relu = \"relu\"\n swiglu = \"swiglu\""
},
{
"identifier": "BlockType",
"path": "olmo/config.py",
"snippet": "class BlockType(StrEnum):\n sequential = \"sequential\"\n parallel = \"parallel\""
},
{
"identifier": "LayerNormType",
"path": "olmo/config.py",
"snippet": "class LayerNormType(StrEnum):\n default = \"default\"\n \"\"\"\n The default LayerNorm implementation, equivalent to PyTorch's built-in version.\n \"\"\"\n\n low_precision = \"low_precision\"\n \"\"\"\n A low-precision version of the default LayerNorm.\n \"\"\"\n\n rms = \"rms\"\n \"\"\"\n An RMSNorm implementation. When using ``torch.compile`` this is\n probably the fastest implementation.\n \"\"\"\n\n low_precision_rms = \"low_precision_rms\"\n \"\"\"\n A low-precision version of RMSNorm.\n \"\"\""
},
{
"identifier": "ModelConfig",
"path": "olmo/config.py",
"snippet": "class ModelConfig(BaseConfig):\n \"\"\"\n OLMo (model) configuration.\n \"\"\"\n\n # Note that the defaults for these attributes are equivalent to the base GPT2 model.\n\n d_model: int = 768\n \"\"\"\n The hidden size of the model.\n \"\"\"\n\n n_heads: int = 12\n \"\"\"\n The number of self-attention heads.\n \"\"\"\n\n n_layers: int = 12\n \"\"\"\n The number of layers/blocks.\n \"\"\"\n\n mlp_ratio: int = 4\n \"\"\"\n The ratio of the inner MLP dimensionality to ``d_model``.\n \"\"\"\n\n activation_type: ActivationType = ActivationType.swiglu\n \"\"\"\n The activation function to use within the MLP layers.\n \"\"\"\n\n block_type: BlockType = BlockType.sequential\n \"\"\"\n The transformer block implementation.\n \"\"\"\n\n alibi: bool = False\n \"\"\"\n If ``True``, use ALiBi embeddings. Mutually exclusive with ``rope``.\n \"\"\"\n\n alibi_bias_max: float = 8.0\n \"\"\"\n Maximum absolute value of ALiBi bias.\n \"\"\"\n\n rope: bool = False\n \"\"\"\n Use rotary positional embeddings (RoPE). Mutually exclusive with ``alibi``.\n \"\"\"\n\n flash_attention: bool = False\n \"\"\"\n If ``True``, use ``FlashAttention``.\n \"\"\"\n\n attention_dropout: float = 0.1\n \"\"\"\n The dropout probability within the attention modules.\n \"\"\"\n\n multi_query_attention: bool = False\n \"\"\"\n Use the Multi-Query formulation of attention used in PaLM. This reduces the number of parameters\n and is more efficient during inference.\n \"\"\"\n\n attention_layer_norm: bool = False\n \"\"\"\n Apply layer norm to the keys and queries within the attention mechanism.\n This can help stabilize training.\n \"\"\"\n\n residual_dropout: float = 0.1\n \"\"\"\n The dropout probability for the MLP and attention output within each block.\n \"\"\"\n\n embedding_dropout: float = 0.1\n \"\"\"\n The dropout probability for embeddings.\n \"\"\"\n\n layer_norm_type: LayerNormType = LayerNormType.default\n \"\"\"\n The layernorm implementation to use.\n \"\"\"\n\n max_sequence_length: int = 1024\n \"\"\"\n The maximum input sequence length supported by the model.\n \"\"\"\n\n include_bias: bool = True\n \"\"\"\n Whether or not to include bias parameters in linear layers.\n In PaLM, they got rid of all bias terms because they found that large\n models tend to have near 0 bias terms anyway.\n \"\"\"\n\n vocab_size: int = 50257\n \"\"\"\n Vocabulary size of the model.\n \"\"\"\n\n embedding_size: Optional[int] = 50304\n \"\"\"\n The number of embeddings, i.e. the number of tokens. If set to ``None`` it will default\n to ``vocab_size``. If ``vocab_size`` is not a multiple of 128, setting this to the\n next multiple of 128 that's greater than ``vocab_size`` can improve throughput\n substantially.\n \"\"\"\n\n eos_token_id: int = 50256\n \"\"\"\n The ID of the end-of-sentence special token.\n \"\"\"\n\n pad_token_id: int = 50256\n \"\"\"\n The ID of the token to use for padding. Defaults to the ID of the EOS token.\n \"\"\"\n\n init_device: Optional[str] = None\n \"\"\"\n The torch device to use when initializing the model parameters, e.g. \"cpu\", \"cuda:0\", \"meta\".\n \"\"\"\n\n init_std: float = 0.02\n \"\"\"\n Standard deviation used when initializing parameters.\n \"\"\"\n\n precision: Optional[str] = None\n \"\"\"\n Precision used to train/evaluate with. You shouldn't set this directly.\n See :data:`TrainConfig.precision` instead.\n \"\"\""
},
{
"identifier": "OlmoConfigurationError",
"path": "olmo/exceptions.py",
"snippet": "class OlmoConfigurationError(OlmoError):\n \"\"\"\n An error with a configuration file.\n \"\"\""
}
] | import math
import os
import torch
import torch.backends.cuda
import torch.nn as nn
import torch.nn.functional as F
import warnings
from abc import abstractmethod
from typing import Dict, List, NamedTuple, Optional, Sequence, Tuple, cast
from torch import einsum
from .aliases import PathOrStr
from .beam_search import BeamSearch, Constraint, FinalSequenceScorer, Sampler
from .config import ActivationType, BlockType, LayerNormType, ModelConfig
from .exceptions import OlmoConfigurationError
from functools import partial
from cached_path import cached_path | 8,310 | """
Adapted from
[MosaiclML](https://github.com/mosaicml/examples.git) and
[minGPT](https://github.com/karpathy/minGPT.git)
"""
from __future__ import annotations
__all__ = [
"LayerNormBase",
"LayerNorm",
"RMSLayerNorm",
"RotaryEmbedding",
"Activation",
"GELU",
"ReLU",
"SwiGLU",
"OlmoBlock",
"OlmoSequentialBlock",
"OlmoParallelBlock",
"Olmo",
"OlmoOutput",
"OlmoGenerateOutput",
]
class MatformerManager:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.current_factor = 1
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
class LayerNormBase(nn.Module):
| """
Adapted from
[MosaiclML](https://github.com/mosaicml/examples.git) and
[minGPT](https://github.com/karpathy/minGPT.git)
"""
from __future__ import annotations
__all__ = [
"LayerNormBase",
"LayerNorm",
"RMSLayerNorm",
"RotaryEmbedding",
"Activation",
"GELU",
"ReLU",
"SwiGLU",
"OlmoBlock",
"OlmoSequentialBlock",
"OlmoParallelBlock",
"Olmo",
"OlmoOutput",
"OlmoGenerateOutput",
]
class MatformerManager:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.current_factor = 1
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
class LayerNormBase(nn.Module): | def __init__(self, config: ModelConfig): | 8 | 2023-11-14 02:24:07+00:00 | 12k |
1in-oos/ccplus | caringcaribou/tests/test_module_uds.py | [
{
"identifier": "Constants",
"path": "caringcaribou/utils/iso14229_1.py",
"snippet": "class Constants(object):\n # NR_SI (Negative Response Service Identifier) is a bit special, since\n # it is not a service per se.\n # From ISO-14229-1 specification: \"The NR_SI value is co-ordinated with\n # the SI values. The NR_SI value is not used as a SI value in order to\n # make A_Data coding and decoding easier.\"\n NR_SI = 0x7F"
},
{
"identifier": "Iso14229_1",
"path": "caringcaribou/utils/iso14229_1.py",
"snippet": "class Iso14229_1(object):\n P3_CLIENT = 5\n\n def __init__(self, tp):\n self.tp = tp\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n @staticmethod\n def get_service_response_id(request_id):\n \"\"\"\n Returns the service response ID for the given request ID\n\n :param request_id: Request service ID\n :return: Corresponding response service ID\n \"\"\"\n return request_id + 0x40\n\n @staticmethod\n def get_service_request_id(response_id):\n \"\"\"\n Returns the service request ID for the given response ID\n\n :param response_id: Response service ID\n :return: Corresponding request service ID\n \"\"\"\n return response_id - 0x40\n\n def send_request(self, data):\n \"\"\"\n Sends a request message containing 'data' through the underlying\n TP layer\n\n :param data: The data to send\n :return: None\n \"\"\"\n return self.tp.send_request(data)\n\n def send_response(self, data):\n \"\"\"\n Sends a response message containing 'data' through the underlying\n TP layer\n\n :param data: The data to send\n :return: None\n \"\"\"\n return self.tp.send_response(data)\n\n def receive_response(self, wait_window):\n \"\"\"\n Attempts to receive a response through the underlying TP layer\n\n :param wait_window: Minimum time (in seconds) to wait before timeout\n :return: The received response if successful,\n None otherwise\n \"\"\"\n start_time = time.process_time()\n while True:\n current_time = time.process_time()\n if (current_time - start_time) > wait_window:\n return None\n\n response = self.tp.indication(wait_window)\n NRC = NegativeResponseCodes\n NRC_RCRRP = NRC.REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING\n if response is not None and len(response) >= 3:\n if (response[0] == Constants.NR_SI and\n response[2] == NRC_RCRRP):\n continue\n break\n return response\n\n @staticmethod\n def is_positive_response(response):\n \"\"\"\n Returns a bool indicating whether 'response' is positive\n\n :param response: ISO-14229-1 response data\n :return: False if response is a NEGATIVE_RESPONSE,\n True otherwise\n \"\"\"\n if (response is not None and\n len(response) > 0 and\n response[0] != Constants.NR_SI):\n return True\n return False\n\n def read_data_by_identifier(self, identifier):\n \"\"\"\n Sends a \"read data by identifier\" request for 'identifier'\n\n :param identifier: Data identifier\n :return: Response data if successful,\n None otherwise\n \"\"\"\n response = []\n num_dids = len(identifier)\n if num_dids > 0:\n request = [0] * ((num_dids * 2) + 1)\n request[0] = ServiceID.READ_DATA_BY_IDENTIFIER\n for i in range(0, num_dids):\n request[i * 2 + 1] = (identifier[i] >> 8) & 0xFF\n request[i * 2 + 2] = identifier[i] & 0xFF\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n return response\n\n def read_memory_by_address(self, address_and_length_format,\n memory_address, memory_size):\n \"\"\"\n Sends a \"read memory by address\" request for 'memory_address'\n\n :param address_and_length_format: Address and length format\n :param memory_address: Memory address\n :param memory_size: Memory size\n :return: Response data if successful,\n None otherwise\n \"\"\"\n addr_sz_fmt = (address_and_length_format >> 4) & 0xF\n data_sz_fmt = (address_and_length_format & 0xF)\n\n request = [0] * (1 + 1 + addr_sz_fmt + data_sz_fmt)\n request[0] = ServiceID.READ_MEMORY_BY_ADDRESS\n request[1] = address_and_length_format\n offset = 2\n for i in (range(0, addr_sz_fmt)):\n request[addr_sz_fmt + offset - i - 1] = (memory_address & 0xFF)\n memory_address = (memory_address >> 8)\n\n offset += addr_sz_fmt\n\n for i in (range(0, data_sz_fmt)):\n request[data_sz_fmt + offset - i - 1] = (memory_size & 0xFF)\n memory_size = (memory_size >> 8)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def write_memory_by_address(self, address_and_length_format,\n memory_address, memory_size, data):\n \"\"\"\n Sends a \"write memory by address\" request to write 'data' to\n 'memory_address'\n\n :param address_and_length_format: Address and length format\n :param memory_address: Memory address\n :param memory_size: Memory size\n :param data: The data to write to 'memory_address'\n :return: Response data if successful,\n None otherwise\n \"\"\"\n addr_sz_fmt = (address_and_length_format >> 4) & 0xF\n data_sz_fmt = (address_and_length_format & 0xF)\n\n request = [0] * (1 + 1 + addr_sz_fmt + data_sz_fmt)\n request[0] = ServiceID.WRITE_MEMORY_BY_ADDRESS\n request[1] = address_and_length_format\n offset = 2\n for i in (range(0, addr_sz_fmt)):\n request[addr_sz_fmt + offset - i - 1] = (memory_address & 0xFF)\n memory_address = (memory_address >> 8)\n\n offset += addr_sz_fmt\n\n for i in (range(0, data_sz_fmt)):\n request[data_sz_fmt + offset - i - 1] = (memory_size & 0xFF)\n memory_size = (memory_size >> 8)\n\n request += data\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def write_data_by_identifier(self, identifier, data):\n \"\"\"\n Sends a \"write data by identifier\" request to write 'data' to\n 'identifier'\n\n :param identifier: Data identifier\n :param data: Data to write to 'identifier'\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * (1 + 2)\n\n request[0] = ServiceID.WRITE_DATA_BY_IDENTIFIER\n request[1] = (identifier >> 8) & 0xFF\n request[2] = identifier & 0xFF\n request += data\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def input_output_control_by_identifier(self, identifier, data):\n \"\"\"\n Sends a \"input output control by identifier\" request for 'data' to\n 'identifier'\n\n :param identifier: Data identifier\n :param data: Data\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * (1 + 2)\n\n request[0] = ServiceID.INPUT_OUTPUT_CONTROL_BY_IDENTIFIER\n request[1] = (identifier >> 8) & 0xFF\n request[2] = identifier & 0xFF\n request += data\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def dynamically_define_data_identifier(self, identifier,\n sub_function, sub_function_arg):\n \"\"\"\n Sends a \"dynamically define data identifier\" request for\n 'identifier'\n\n :param identifier: DDDID to set\n :param sub_function: Sub function\n :param sub_function_arg: Sub function arguments\n :return: Response data if successful,\n None otherwise\n \"\"\"\n if (identifier is None or\n sub_function is None or\n sub_function_arg is None):\n return None\n\n request = [0] * (1 + 1 + 2 + len(sub_function_arg) * 4)\n request[0] = ServiceID.DYNAMICALLY_DEFINE_DATA_IDENTIFIER\n request[1] = sub_function\n request[2] = (identifier >> 8) & 0xFF\n request[3] = identifier & 0xFF\n\n offset = 4\n for did in sub_function_arg:\n request[offset + 0] = (did.sourceDataIdentifier >> 8) & 0xFF\n request[offset + 1] = did.sourceDataIdentifier & 0xFF\n request[offset + 2] = did.positionInSourceDataRecord\n request[offset + 3] = did.memorySize\n offset += 4\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def diagnostic_session_control(self, session_type):\n \"\"\"\n Sends a \"DiagnosticSessionControl\" request for specified session\n type\n\n :param session_type: Indicates which kind of session should be\n requested\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * 2\n request[0] = ServiceID.DIAGNOSTIC_SESSION_CONTROL\n request[1] = session_type\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def ecu_reset(self, reset_type):\n \"\"\"\n Sends an \"ECU reset\" request for specified reset type\n\n :param reset_type: Indicates which kind of reset should be requested\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * 2\n request[0] = ServiceID.ECU_RESET\n request[1] = reset_type\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def security_access_request_seed(self, level, data_record=None):\n \"\"\"\n Sends a Security Access \"Request seed\" message for 'level'\n\n :param level: Security Access Type level to send request seed for\n :param data_record: Optional data to transmit when requesting seed,\n e.g. client identification\n :return: Response data (containing seed) if successful,\n None otherwise\n \"\"\"\n service_id = ServiceID.SECURITY_ACCESS\n request = [service_id, level]\n if data_record:\n for data_record in data_record:\n request.append(data_record)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def security_access_send_key(self, level, key):\n \"\"\"\n Sends a Security Access \"Send key\" message with 'key' for 'level'\n\n :param level: Security Access Type level to send key for\n :param key: Key to transmit\n :return: Response data if successful,\n None otherwise\n \"\"\"\n service_id = ServiceID.SECURITY_ACCESS\n request = [service_id, level]\n for key_byte in key:\n request.append(key_byte)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def read_data_by_periodic_identifier(self, transmission_mode,\n identifier):\n \"\"\"\n Sends a \"read data by periodic identifier\" request for 'identifier'\n\n :param transmission_mode: Transmission mode\n :param identifier: Identifier\n :return: Response data if successful,\n None otherwise\n \"\"\"\n if (transmission_mode is None or\n identifier is None or\n len(identifier) == 0):\n return None\n\n request = [0] * (2 + len(identifier))\n request[0] = ServiceID.READ_DATA_BY_PERIODIC_IDENTIFIER\n request[1] = transmission_mode\n\n for i in range(0, len(identifier)):\n request[2 + i] = identifier[i]\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response"
},
{
"identifier": "NegativeResponseCodes",
"path": "caringcaribou/utils/iso14229_1.py",
"snippet": "class NegativeResponseCodes(object):\n \"\"\"\n ISO-14229-1 negative response codes\n \"\"\"\n POSITIVE_RESPONSE = 0x00\n # 0x01-0x0F ISO SAE Reserved\n GENERAL_REJECT = 0x10\n SERVICE_NOT_SUPPORTED = 0x11\n SUB_FUNCTION_NOT_SUPPORTED = 0x12\n INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT = 0x13\n RESPONSE_TOO_LONG = 0x14\n # 0x15-0x20 ISO SAE Reserved\n BUSY_REPEAT_REQUEST = 0x21\n CONDITIONS_NOT_CORRECT = 0x22\n # 0x23 ISO SAE Reserved\n REQUEST_SEQUENCE_ERROR = 0x24\n NO_RESPONSE_FROM_SUBNET_COMPONENT = 0x25\n FAILURE_PREVENTS_EXECUTION_OF_REQUESTED_ACTION = 0x26\n # 0x27-0x30 ISO SAE Reserved\n REQUEST_OUT_OF_RANGE = 0x31\n # 0x32 ISO SAE Reserved\n SECURITY_ACCESS_DENIED = 0x33\n # 0x34 ISO SAE Reserved\n INVALID_KEY = 0x35\n EXCEEDED_NUMBER_OF_ATTEMPTS = 0x36\n REQUIRED_TIME_DELAY_NOT_EXPIRED = 0x37\n # 0x38-0x4F Reserved by extended data link security document\n # 0x50-0x6F ISO SAE Reserved\n UPLOAD_DOWNLOAD_NOT_ACCEPTED = 0x70\n TRANSFER_DATA_SUSPENDED = 0x71\n GENERAL_PROGRAMMING_FAILURE = 0x72\n WRONG_BLOCK_SEQUENCE_COUNTER = 0x73\n # 0x74-0x77 ISO SAE Reserved\n REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING = 0x78\n # 0x79-0x7D ISO SAE Reserved\n SUB_FUNCTION_NOT_SUPPORTED_IN_ACTIVE_SESSION = 0x7E\n SERVICE_NOT_SUPPORTED_IN_ACTIVE_SESSION = 0x7F\n # 0x80 ISO SAE Reserved\n RPM_TOO_HIGH = 0x81\n RPM_TOO_LOW = 0x82\n ENGINE_IS_RUNNING = 0x83\n ENGINE_IS_NOT_RUNNING = 0x84\n ENGINE_RUN_TIME_TOO_LOW = 0x85\n TEMPERATURE_TOO_HIGH = 0x86\n TEMPERATURE_TOO_LOW = 0x87\n VEHICLE_SPEED_TOO_HIGH = 0x88\n VEHICLE_SPEED_TOO_LOW = 0x89\n THROTTLE_PEDAL_TOO_HIGH = 0x8A\n THROTTLE_PEDAL_TOO_LOW = 0x8B\n TRANSMISSION_RANGE_NOT_IN_NEUTRAL = 0x8C\n TRANSMISSION_RANGE_NOT_IN_GEAR = 0x8D\n # 0x8E ISO SAE Reserved\n BRAKE_SWITCHES_NOT_CLOSED = 0x8F\n SHIFT_LEVER_NOT_IN_PARK = 0x90\n TORQUE_CONVERTER_CLUTCH_LOCKED = 0x91\n VOLTAGE_TOO_HIGH = 0x92\n VOLTAGE_TOO_LOW = 0x93\n # 0x94-0xEF Reserved for specific conditions not correct\n # 0xF0-0xFE Vehicle manufacturer specific conditions not correct\n # 0xFF ISO SAE Reserved"
},
{
"identifier": "ServiceID",
"path": "caringcaribou/utils/iso14229_1.py",
"snippet": "class ServiceID(object):\n \"\"\"\n ISO-14229-1 service ID definitions\n \"\"\"\n DIAGNOSTIC_SESSION_CONTROL = 0x10\n ECU_RESET = 0x11\n CLEAR_DIAGNOSTIC_INFORMATION = 0x14\n READ_DTC_INFORMATION = 0x19\n READ_DATA_BY_IDENTIFIER = 0x22\n READ_MEMORY_BY_ADDRESS = 0x23\n READ_SCALING_DATA_BY_IDENTIFIER = 0x24\n SECURITY_ACCESS = 0x27\n COMMUNICATION_CONTROL = 0x28\n READ_DATA_BY_PERIODIC_IDENTIFIER = 0x2A\n DYNAMICALLY_DEFINE_DATA_IDENTIFIER = 0x2C\n WRITE_DATA_BY_IDENTIFIER = 0x2E\n INPUT_OUTPUT_CONTROL_BY_IDENTIFIER = 0x2F\n ROUTINE_CONTROL = 0x31\n REQUEST_DOWNLOAD = 0x34\n REQUEST_UPLOAD = 0x35\n TRANSFER_DATA = 0x36\n REQUEST_TRANSFER_EXIT = 0x37\n REQUEST_FILE_TRANSFER = 0x38\n WRITE_MEMORY_BY_ADDRESS = 0x3D\n TESTER_PRESENT = 0x3E\n ACCESS_TIMING_PARAMETER = 0x83\n SECURED_DATA_TRANSMISSION = 0x84\n CONTROL_DTC_SETTING = 0x85\n RESPONSE_ON_EVENT = 0x86\n LINK_CONTROL = 0x87"
},
{
"identifier": "Services",
"path": "caringcaribou/utils/iso14229_1.py",
"snippet": "class Services(object):\n \"\"\"Class structure containing service specific constants, sub-function\n parameters and functions\"\"\"\n\n class DiagnosticSessionControl(BaseService):\n\n service_id = ServiceID.DIAGNOSTIC_SESSION_CONTROL\n\n class DiagnosticSessionType(object):\n # 0x00 ISO SAE Reserved\n DEFAULT_SESSION = 0x01\n PROGRAMMING_SESSION = 0x02\n EXTENDED_DIAGNOSTIC_SESSION = 0x03\n SAFETY_SYSTEM_DIAGNOSTIC_SESSION = 0x04\n # 0x05-0x3F ISO SAE Reserved\n # 0x40-0x5F Vehicle manufacturer specific\n VEHICLE_MANUFACTURER_SESSION_MIN = 0x40\n VEHICLE_MANUFACTURER_SESSION_MAX = 0x5F\n # 0x60-0x7E System supplier specific\n SYSTEM_SUPPLIER_SESSION_MIN = 0x60\n SYSTEM_SUPPLIER_SESSION_MAX = 0x7E\n # 0x7F ISO SAE Reserved\n\n class EcuReset(BaseService):\n\n service_id = ServiceID.ECU_RESET\n\n class ResetType(object):\n # 0x00 ISO SAE Reserved\n HARD_RESET = 0x01\n KEY_OFF_ON_RESET = 0x02\n SOFT_RESET = 0x03\n ENABLE_RAPID_POWER_SHUTDOWN = 0x04\n DISABLE_RAPID_POWER_SHUTDOWN = 0x05\n # 0x06-0x3F ISO SAE Reserved\n # 0x40-0x5F Vehicle manufacturer specific\n # 0x60-0x7E System supplier specific\n # 0x7F ISO SAE Reserved\n\n class SecurityAccess(BaseService):\n\n service_id = ServiceID.SECURITY_ACCESS\n\n class RequestSeedOrSendKey(object):\n \"\"\"\n These are lined up so that value X \"request seed level N\" has\n a matching \"send key level N\" at value X+1.\n\n 0x01 is Request seed level 0x01\n 0x02 is Send key level 0x01\n 0x03 is Request seed level 0x02\n 0x04 is Send key level 0x02\n (...)\n 0x41 is Request seed level 0x21\n 0x42 is Send key level 0x21\n\n The security levels numbering is arbitrary and does not imply\n any relationship between the levels.\n \"\"\"\n\n # 0x00 ISO SAE Reserved\n # 0x01-0x42 Vehicle manufacturer specific request\n # seed/send key pairs\n # 0x43-0X5E ISO SAE Reserved\n ISO_26021_2_VALUES = 0x5F\n ISO_26021_2_SEND_KEY = 0x60\n # 0x61-0x7E System supplier specific\n # 0x7F ISO SAE Reserved\n\n __REQUEST_SEED_MIN = 0x01\n __REQUEST_SEED_MAX = 0x41\n __SEND_KEY_MIN = 0x02\n __SEND_KEY_MAX = 0x42\n\n def is_valid_request_seed_level(self, sub_function):\n \"\"\"Returns True if 'sub_function' is a valid request seed\n value and False otherwise\"\"\"\n value = sub_function & 0x7F\n valid_interval = (self.__REQUEST_SEED_MIN\n <= value <= self.__REQUEST_SEED_MAX)\n is_odd = value % 2 == 1\n return valid_interval and is_odd\n\n def is_valid_send_key_level(self, sub_function):\n \"\"\"Returns True if 'sub_function' is a valid send key value\n and False otherwise\"\"\"\n value = sub_function & 0x7F\n valid_interval = (self.__SEND_KEY_MIN\n <= value <= self.__SEND_KEY_MAX)\n is_even = value % 2 == 0\n return valid_interval and is_even\n\n @staticmethod\n def get_send_key_for_request_seed(seed):\n return seed + 1\n\n class TesterPresent(BaseService):\n\n service_id = ServiceID.TESTER_PRESENT"
},
{
"identifier": "MockEcuIso14229",
"path": "caringcaribou/tests/mock/mock_ecu_uds.py",
"snippet": "class MockEcuIso14229(MockEcuIsoTp, MockEcu):\n \"\"\"ISO-14229-1 (Unified Diagnostic Services) mock ECU handler\"\"\"\n\n IDENTIFIER_REQUEST_POSITIVE = 0x01\n IDENTIFIER_REQUEST_POSITIVE_RESPONSE = 0x72\n IDENTIFIER_REQUEST_NEGATIVE = 0x02\n\n REQUEST_IDENTIFIER_VALID = 0xA001\n REQUEST_IDENTIFIER_INVALID = 0xA002\n REQUEST_VALUE = [0xC0, 0xFF, 0xEE]\n\n REQUEST_ADDRESS_LENGTH_AND_FORMAT = 0x22\n REQUEST_ADDRESS = 0x0001\n REQUEST_DATA_SIZE = 0x10\n DATA = list(range(0x14))\n\n # TODO Use dynamic seed value and verify keys using a simple algorithm\n SECURITY_ACCESS_SEED = [0x36, 0x57]\n SECURITY_ACCESS_KEY = [0xC9, 0xA9]\n\n def __init__(self, arb_id_request, arb_id_response, bus=None):\n MockEcu.__init__(self, bus)\n self.ARBITRATION_ID_ISO_14229_REQUEST = arb_id_request\n self.ARBITRATION_ID_ISO_14229_RESPONSE = arb_id_response\n # Set CAN filter to only listen to incoming requests on the correct arbitration ID\n arbitration_id_filter = [{\"can_id\": arb_id_request, \"can_mask\": 0x1fffffff}]\n self.bus.set_filters(arbitration_id_filter)\n # Setup ISO-TP using the filtered bus\n self.iso_tp = IsoTp(arb_id_request=self.ARBITRATION_ID_ISO_14229_REQUEST,\n arb_id_response=self.ARBITRATION_ID_ISO_14229_RESPONSE,\n bus=self.bus)\n # Setup diagnostics on top of ISO-TP\n self.diagnostics = Iso14229_1(tp=self.iso_tp)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n MockEcuIsoTp.__exit__(self, None, None, None)\n\n @staticmethod\n def create_positive_response(request_service_id, response_data=None):\n \"\"\"\n Returns data for a positive response of 'request_service_id' with an optional 'response_data' payload\n\n :param request_service_id: Service ID (SIDRQ) of the incoming request\n :param response_data: List of data bytes to transmit in the response\n :return: List of bytes to be sent as data payload in the response\n \"\"\"\n # Positive response uses a response service ID (SIDPR) based on the request service ID (SIDRQ)\n service_response_id = Iso14229_1.get_service_response_id(request_service_id)\n response = [service_response_id]\n # Append payload\n if response_data is not None:\n response += response_data\n return response\n\n @staticmethod\n def create_negative_response(request_service_id, nrc):\n \"\"\"\n Returns data for a negative response of 'request_service_id' with negative response code 'nrc'\n\n :param request_service_id: Service ID (SIDRQ) of the incoming request\n :param nrc: Negative response code (NRC_)\n :return: List of bytes to be sent as data payload in the response\n \"\"\"\n response = [Constants.NR_SI,\n request_service_id,\n nrc]\n return response\n\n def message_handler(self, data):\n \"\"\"\n Logic for responding to incoming messages\n\n :param data: list of data bytes in incoming message\n :return: None\n \"\"\"\n assert isinstance(data, list)\n try:\n service_id = data[0]\n # Handle different services\n if service_id == ServiceID.DIAGNOSTIC_SESSION_CONTROL:\n # 0x10 Diagnostic session control\n response_data = self.handle_diagnostic_session_control(data)\n elif service_id == ServiceID.ECU_RESET:\n # 0x11 ECU reset\n response_data = self.handle_ecu_reset(data)\n elif service_id == ServiceID.READ_DATA_BY_IDENTIFIER:\n # 0x22 Read data by identifier\n response_data = self.handle_read_data_by_identifier(data)\n elif service_id == ServiceID.READ_MEMORY_BY_ADDRESS:\n # 0x23 Read memory by address\n response_data = self.handle_read_memory_by_address(data)\n elif service_id == ServiceID.SECURITY_ACCESS:\n # 0x27 Security access\n response_data = self.handle_security_access(data)\n elif service_id == ServiceID.WRITE_DATA_BY_IDENTIFIER:\n # 0x2E Write data by identifier\n response_data = self.handle_write_data_by_identifier(data)\n else:\n # Unsupported service\n response_data = self.handle_unsupported_service(data)\n except IndexError:\n # Parsing failed due to invalid message structure\n response_data = self.handle_service_error(data)\n\n # This check makes it possible to support services where a response should not be sent\n if response_data is not None:\n # Simulate a small delay before responding\n time.sleep(self.DELAY_BEFORE_RESPONSE)\n self.diagnostics.send_response(response_data)\n\n def handle_unsupported_service(self, data):\n \"\"\"Provides a standard response for unmapped services, by responding with NRC Service Not Supported\"\"\"\n service_id = data[0]\n nrc = NegativeResponseCodes.SERVICE_NOT_SUPPORTED\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_service_error(self, data):\n \"\"\"Provides a standard response for failed service requests\"\"\"\n service_id = data[0]\n nrc = NegativeResponseCodes.INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_diagnostic_session_control(self, data):\n \"\"\"Evaluates a diagnostic session control request and returns a response\"\"\"\n service_id = data[0]\n # TODO Handle different values?\n session_type = data[1]\n response_data = self.create_positive_response(service_id)\n return response_data\n\n def handle_read_data_by_identifier(self, data):\n \"\"\"\n Evaluates a read data by identifier request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n request = data[2]\n\n if request == self.IDENTIFIER_REQUEST_POSITIVE:\n # Request for positive response\n # TODO Actually read a parameter from memory\n payload = [self.IDENTIFIER_REQUEST_POSITIVE_RESPONSE]\n response_data = self.create_positive_response(service_id, payload)\n elif request == self.IDENTIFIER_REQUEST_NEGATIVE:\n # Request for negative response - use Conditions Not Correct\n nrc = NegativeResponseCodes.CONDITIONS_NOT_CORRECT\n response_data = self.create_negative_response(service_id, nrc)\n else:\n # Unmatched request - use a general reject response\n nrc = NegativeResponseCodes.GENERAL_REJECT\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_write_data_by_identifier(self, data):\n \"\"\"\n Evaluates a write data by identifier request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n\n identifier_start_position = 1\n identifier_length = 2\n identifier = int_from_byte_list(data,\n identifier_start_position,\n identifier_length)\n request_data = data[3:]\n # TODO Actually write data to memory\n if identifier == self.REQUEST_IDENTIFIER_VALID:\n # Request for positive response\n # Standard specifies the response payload to be an echo of the data identifier from the request\n payload = data[identifier_start_position:identifier_start_position + identifier_length]\n response_data = self.create_positive_response(service_id, payload)\n elif identifier == self.REQUEST_IDENTIFIER_INVALID:\n # Request for negative response - use Conditions Not Correct\n nrc = NegativeResponseCodes.CONDITIONS_NOT_CORRECT\n response_data = self.create_negative_response(service_id, nrc)\n else:\n # Unmatched request - use a general reject response\n nrc = NegativeResponseCodes.GENERAL_REJECT\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_read_memory_by_address(self, data):\n \"\"\"\n Evaluates a read memory by address request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n address_field_size = (data[1] >> 4) & 0xF\n data_length_field_size = (data[1] & 0xF)\n address_start_position = 2\n data_length_start_position = 4\n\n start_address = int_from_byte_list(data, address_start_position, address_field_size)\n data_length = int_from_byte_list(data, data_length_start_position, data_length_field_size)\n end_address = start_address + data_length\n if 0 <= start_address <= end_address <= len(self.DATA):\n memory_data = self.DATA[start_address:end_address]\n response_data = self.create_positive_response(service_id, memory_data)\n else:\n nrc = NegativeResponseCodes.REQUEST_OUT_OF_RANGE\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_ecu_reset(self, data):\n \"\"\"\n Evaluates an ECU reset request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n subfunction = data[1]\n reset_type = subfunction & 0x7F\n suppress_positive_response = subfunction >> 7\n\n reset_types = Services.EcuReset.ResetType\n\n if reset_type in [reset_types.HARD_RESET, reset_types.KEY_OFF_ON_RESET, reset_types.SOFT_RESET]:\n if suppress_positive_response:\n response_data = None\n else:\n response_data = self.create_positive_response(service_id, [reset_type])\n else:\n nrc = NegativeResponseCodes.SUB_FUNCTION_NOT_SUPPORTED\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_security_access(self, data):\n \"\"\"\n Evaluates security access requests (both \"Request seed\" and \"Send key\") and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n subfunction = data[1]\n level = subfunction & 0x7F\n\n service_handler = Services.SecurityAccess.RequestSeedOrSendKey()\n if service_handler.is_valid_request_seed_level(level):\n # Request seed handling\n payload = [level]\n payload.extend(self.SECURITY_ACCESS_SEED)\n response_data = self.create_positive_response(service_id, payload)\n elif service_handler.is_valid_send_key_level(level):\n # Send key handling\n expected_key = self.SECURITY_ACCESS_KEY\n received_key = data[2:]\n if received_key == expected_key:\n # Correct key\n response_data = self.create_positive_response(service_id, [level])\n else:\n # Invalid key\n nrc = NegativeResponseCodes.INVALID_KEY\n response_data = self.create_negative_response(service_id, nrc)\n else:\n # Unsupported subfunction\n nrc = NegativeResponseCodes.SUB_FUNCTION_NOT_SUPPORTED\n response_data = self.create_negative_response(service_id, nrc)\n return response_data"
},
{
"identifier": "uds",
"path": "caringcaribou/modules/uds.py",
"snippet": "UDS_SERVICE_NAMES = {\n 0x10: \"DIAGNOSTIC_SESSION_CONTROL\",\n 0x11: \"ECU_RESET\",\n 0x14: \"CLEAR_DIAGNOSTIC_INFORMATION\",\n 0x19: \"READ_DTC_INFORMATION\",\n 0x20: \"RETURN_TO_NORMAL\",\n 0x22: \"READ_DATA_BY_IDENTIFIER\",\n 0x23: \"READ_MEMORY_BY_ADDRESS\",\n 0x24: \"READ_SCALING_DATA_BY_IDENTIFIER\",\n 0x27: \"SECURITY_ACCESS\",\n 0x28: \"COMMUNICATION_CONTROL\",\n 0x2A: \"READ_DATA_BY_PERIODIC_IDENTIFIER\",\n 0x2C: \"DYNAMICALLY_DEFINE_DATA_IDENTIFIER\",\n 0x2D: \"DEFINE_PID_BY_MEMORY_ADDRESS\",\n 0x2E: \"WRITE_DATA_BY_IDENTIFIER\",\n 0x2F: \"INPUT_OUTPUT_CONTROL_BY_IDENTIFIER\",\n 0x31: \"ROUTINE_CONTROL\",\n 0x34: \"REQUEST_DOWNLOAD\",\n 0x35: \"REQUEST_UPLOAD\",\n 0x36: \"TRANSFER_DATA\",\n 0x37: \"REQUEST_TRANSFER_EXIT\",\n 0x38: \"REQUEST_FILE_TRANSFER\",\n 0x3D: \"WRITE_MEMORY_BY_ADDRESS\",\n 0x3E: \"TESTER_PRESENT\",\n 0x7F: \"NEGATIVE_RESPONSE\",\n 0x83: \"ACCESS_TIMING_PARAMETER\",\n 0x84: \"SECURED_DATA_TRANSMISSION\",\n 0x85: \"CONTROL_DTC_SETTING\",\n 0x86: \"RESPONSE_ON_EVENT\",\n 0x87: \"LINK_CONTROL\"\n}\nNRC_NAMES = {\n 0x00: \"POSITIVE_RESPONSE\",\n 0x10: \"GENERAL_REJECT\",\n 0x11: \"SERVICE_NOT_SUPPORTED\",\n 0x12: \"SUB_FUNCTION_NOT_SUPPORTED\",\n 0x13: \"INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT\",\n 0x14: \"RESPONSE_TOO_LONG\",\n 0x21: \"BUSY_REPEAT_REQUEST\",\n 0x22: \"CONDITIONS_NOT_CORRECT\",\n 0x24: \"REQUEST_SEQUENCE_ERROR\",\n 0x25: \"NO_RESPONSE_FROM_SUBNET_COMPONENT\",\n 0x26: \"FAILURE_PREVENTS_EXECUTION_OF_REQUESTED_ACTION\",\n 0x31: \"REQUEST_OUT_OF_RANGE\",\n 0x33: \"SECURITY_ACCESS_DENIED\",\n 0x35: \"INVALID_KEY\",\n 0x36: \"EXCEEDED_NUMBER_OF_ATTEMPTS\",\n 0x37: \"REQUIRED_TIME_DELAY_NOT_EXPIRED\",\n 0x70: \"UPLOAD_DOWNLOAD_NOT_ACCEPTED\",\n 0x71: \"TRANSFER_DATA_SUSPENDED\",\n 0x72: \"GENERAL_PROGRAMMING_FAILURE\",\n 0x73: \"WRONG_BLOCK_SEQUENCE_COUNTER\",\n 0x78: \"REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING\",\n 0x7E: \"SUB_FUNCTION_NOT_SUPPORTED_IN_ACTIVE_SESSION\",\n 0x7F: \"SERVICE_NOT_SUPPORTED_IN_ACTIVE_SESSION\",\n 0x81: \"RPM_TOO_HIGH\",\n 0x82: \"RPM_TOO_LOW\",\n 0x83: \"ENGINE_IS_RUNNING\",\n 0x84: \"ENGINE_IS_NOT_RUNNING\",\n 0x85: \"ENGINE_RUN_TIME_TOO_LOW\",\n 0x86: \"TEMPERATURE_TOO_HIGH\",\n 0x87: \"TEMPERATURE_TOO_LOW\",\n 0x88: \"VEHICLE_SPEED_TOO_HIGH\",\n 0x89: \"VEHICLE_SPEED_TOO_LOW\",\n 0x8A: \"THROTTLE_PEDAL_TOO_HIGH\",\n 0x8B: \"THROTTLE_PEDAL_TOO_LOW\",\n 0x8C: \"TRANSMISSION_RANGE_NOT_IN_NEUTRAL\",\n 0x8D: \"TRANSMISSION_RANGE_NOT_IN_GEAR\",\n 0x8F: \"BRAKE_SWITCHES_NOT_CLOSED\",\n 0x90: \"SHIFT_LEVER_NOT_IN_PARK\",\n 0x91: \"TORQUE_CONVERTER_CLUTCH_LOCKED\",\n 0x92: \"VOLTAGE_TOO_HIGH\",\n 0x93: \"VOLTAGE_TOO_LOW\"\n}\nDELAY_DISCOVERY = 0.01\nDELAY_TESTER_PRESENT = 0.5\nDELAY_SECSEED_RESET = 0.01\nTIMEOUT_SERVICES = 0.2\nTIMEOUT_SUBSERVICES = 0.02\nVERIFICATION_BACKTRACK = 5\nVERIFICATION_EXTRA_DELAY = 0.5\nBYTE_MIN = 0x00\nBYTE_MAX = 0xFF\nDUMP_DID_MIN = 0x0000\nDUMP_DID_MAX = 0xFFFF\nDUMP_DID_TIMEOUT = 0.2\n E=args.E\n E=args.E\ndef uds_discovery(E, min_id, max_id, blacklist_args, auto_blacklist_duration,\n delay, verify, print_results=True):\n def is_valid_response(message):\ndef __uds_discovery_wrapper(args):\ndef service_discovery(arb_id_request, arb_id_response, timeout,\n min_id=BYTE_MIN, max_id=BYTE_MAX, print_results=True):\ndef __service_discovery_wrapper(args):\ndef sub_discovery(arb_id_request, arb_id_response, diagnostic, service, timeout, print_results=True):\ndef __sub_discovery_wrapper(args):\ndef raw_send(arb_id_request, arb_id_response, service, session_type):\ndef tester_present(arb_id_request, delay, duration,\n suppress_positive_response):\ndef __tester_present_wrapper(args):\ndef ecu_reset(arb_id_request, arb_id_response, reset_type, timeout):\ndef __ecu_reset_wrapper(args):\ndef print_negative_response(response):\ndef __security_seed_wrapper(args):\ndef extended_session(arb_id_request, arb_id_response, session_type):\ndef request_seed(arb_id_request, arb_id_response, level,\n data_record, timeout):\ndef send_key(arb_id_request, arb_id_response, level, key, timeout):\ndef __dump_dids_wrapper(args):\ndef __auto_wrapper(args):\ndef dump_dids(arb_id_request, arb_id_response, timeout,\n min_did=DUMP_DID_MIN, max_did=DUMP_DID_MAX, print_results=True):\ndef __parse_args(args):\ndef module_main(arg_list):"
}
] | from caringcaribou.utils.iso14229_1 import Constants, Iso14229_1, NegativeResponseCodes, ServiceID, Services
from caringcaribou.tests.mock.mock_ecu_uds import MockEcuIso14229
from caringcaribou.modules import uds
import unittest | 9,807 | from __future__ import print_function
class UdsModuleTestCase(unittest.TestCase):
ARB_ID_REQUEST = 0x300E
ARB_ID_RESPONSE = 0x300F
# Timeout (in seconds) when waiting for response during bruteforce
BRUTEFORCE_TIMEOUT = 0.01
def setUp(self):
# Initialize mock ECU
| from __future__ import print_function
class UdsModuleTestCase(unittest.TestCase):
ARB_ID_REQUEST = 0x300E
ARB_ID_RESPONSE = 0x300F
# Timeout (in seconds) when waiting for response during bruteforce
BRUTEFORCE_TIMEOUT = 0.01
def setUp(self):
# Initialize mock ECU | self.ecu = MockEcuIso14229(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE) | 5 | 2023-11-13 05:05:46+00:00 | 12k |
L1bra1/WeakMotion | gen_data/step2_waymo_generate_weak.py | [
{
"identifier": "Box",
"path": "gen_data/nuscenes/utils/data_classes.py",
"snippet": "class Box:\n \"\"\" Simple data class representing a 3d box including, label, score and velocity. \"\"\"\n\n def __init__(self,\n center: List[float],\n size: List[float],\n orientation: Quaternion,\n label: int = np.nan,\n score: float = np.nan,\n velocity: Tuple = (np.nan, np.nan, np.nan),\n name: str = None,\n token: str = None):\n \"\"\"\n :param center: Center of box given as x, y, z.\n :param size: Size of box in width, length, height.\n :param orientation: Box orientation.\n :param label: Integer label, optional.\n :param score: Classification score, optional.\n :param velocity: Box velocity in x, y, z direction.\n :param name: Box name, optional. Can be used e.g. for denote category name.\n :param token: Unique string identifier from DB.\n \"\"\"\n assert not np.any(np.isnan(center))\n assert not np.any(np.isnan(size))\n assert len(center) == 3\n assert len(size) == 3\n assert type(orientation) == Quaternion\n\n self.center = np.array(center)\n self.wlh = np.array(size)\n self.orientation = orientation\n self.label = int(label) if not np.isnan(label) else label\n self.score = float(score) if not np.isnan(score) else score\n self.velocity = np.array(velocity)\n self.name = name\n self.token = token\n\n def __eq__(self, other):\n center = np.allclose(self.center, other.center)\n wlh = np.allclose(self.wlh, other.wlh)\n orientation = np.allclose(self.orientation.elements, other.orientation.elements)\n label = (self.label == other.label) or (np.isnan(self.label) and np.isnan(other.label))\n score = (self.score == other.score) or (np.isnan(self.score) and np.isnan(other.score))\n vel = (np.allclose(self.velocity, other.velocity) or\n (np.all(np.isnan(self.velocity)) and np.all(np.isnan(other.velocity))))\n\n return center and wlh and orientation and label and score and vel\n\n def __repr__(self):\n repr_str = 'label: {}, score: {:.2f}, xyz: [{:.2f}, {:.2f}, {:.2f}], wlh: [{:.2f}, {:.2f}, {:.2f}], ' \\\n 'rot axis: [{:.2f}, {:.2f}, {:.2f}], ang(degrees): {:.2f}, ang(rad): {:.2f}, ' \\\n 'vel: {:.2f}, {:.2f}, {:.2f}, name: {}, token: {}'\n\n return repr_str.format(self.label, self.score, self.center[0], self.center[1], self.center[2], self.wlh[0],\n self.wlh[1], self.wlh[2], self.orientation.axis[0], self.orientation.axis[1],\n self.orientation.axis[2], self.orientation.degrees, self.orientation.radians,\n self.velocity[0], self.velocity[1], self.velocity[2], self.name, self.token)\n\n @property\n def rotation_matrix(self) -> np.ndarray:\n \"\"\"\n Return a rotation matrix.\n :return: <np.float: 3, 3>. The box's rotation matrix.\n \"\"\"\n return self.orientation.rotation_matrix\n\n def translate(self, x: np.ndarray) -> None:\n \"\"\"\n Applies a translation.\n :param x: <np.float: 3, 1>. Translation in x, y, z direction.\n \"\"\"\n self.center += x\n\n def rotate(self, quaternion: Quaternion) -> None:\n \"\"\"\n Rotates box.\n :param quaternion: Rotation to apply.\n \"\"\"\n self.center = np.dot(quaternion.rotation_matrix, self.center)\n self.orientation = quaternion * self.orientation\n self.velocity = np.dot(quaternion.rotation_matrix, self.velocity)\n\n def corners(self, wlh_factor: float = 1.0) -> np.ndarray:\n \"\"\"\n Returns the bounding box corners.\n :param wlh_factor: Multiply w, l, h by a factor to scale the box.\n :return: <np.float: 3, 8>. First four corners are the ones facing forward.\n The last four are the ones facing backwards.\n \"\"\"\n w, l, h = self.wlh * wlh_factor\n\n # 3D bounding box corners. (Convention: x points forward, y to the left, z up.)\n x_corners = l / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])\n y_corners = w / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])\n z_corners = h / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])\n corners = np.vstack((x_corners, y_corners, z_corners))\n\n # Rotate\n corners = np.dot(self.orientation.rotation_matrix, corners)\n\n # Translate\n x, y, z = self.center\n corners[0, :] = corners[0, :] + x\n corners[1, :] = corners[1, :] + y\n corners[2, :] = corners[2, :] + z\n\n return corners\n\n def bottom_corners(self) -> np.ndarray:\n \"\"\"\n Returns the four bottom corners.\n :return: <np.float: 3, 4>. Bottom corners. First two face forward, last two face backwards.\n \"\"\"\n return self.corners()[:, [2, 3, 7, 6]]\n\n def render(self,\n axis: Axes,\n view: np.ndarray = np.eye(3),\n normalize: bool = False,\n colors: Tuple = ('b', 'r', 'k'),\n linewidth: float = 2) -> None:\n \"\"\"\n Renders the box in the provided Matplotlib axis.\n :param axis: Axis onto which the box should be drawn.\n :param view: <np.array: 3, 3>. Define a projection in needed (e.g. for drawing projection in an image).\n :param normalize: Whether to normalize the remaining coordinate.\n :param colors: (<Matplotlib.colors>: 3). Valid Matplotlib colors (<str> or normalized RGB tuple) for front,\n back and sides.\n :param linewidth: Width in pixel of the box sides.\n \"\"\"\n corners = view_points(self.corners(), view, normalize=normalize)[:2, :]\n\n def draw_rect(selected_corners, color):\n prev = selected_corners[-1]\n for corner in selected_corners:\n axis.plot([prev[0], corner[0]], [prev[1], corner[1]], color=color, linewidth=linewidth)\n prev = corner\n\n # Draw the sides\n for i in range(4):\n axis.plot([corners.T[i][0], corners.T[i + 4][0]],\n [corners.T[i][1], corners.T[i + 4][1]],\n color=colors[2], linewidth=linewidth)\n\n # Draw front (first 4 corners) and rear (last 4 corners) rectangles(3d)/lines(2d)\n draw_rect(corners.T[:4], colors[0])\n draw_rect(corners.T[4:], colors[1])\n\n # Draw line indicating the front\n center_bottom_forward = np.mean(corners.T[2:4], axis=0)\n center_bottom = np.mean(corners.T[[2, 3, 7, 6]], axis=0)\n axis.plot([center_bottom[0], center_bottom_forward[0]],\n [center_bottom[1], center_bottom_forward[1]],\n color=colors[0], linewidth=linewidth)\n\n def render_cv2(self,\n im: np.ndarray,\n view: np.ndarray = np.eye(3),\n normalize: bool = False,\n colors: Tuple = ((0, 0, 255), (255, 0, 0), (155, 155, 155)),\n linewidth: int = 2) -> None:\n \"\"\"\n Renders box using OpenCV2.\n :param im: <np.array: width, height, 3>. Image array. Channels are in BGR order.\n :param view: <np.array: 3, 3>. Define a projection if needed (e.g. for drawing projection in an image).\n :param normalize: Whether to normalize the remaining coordinate.\n :param colors: ((R, G, B), (R, G, B), (R, G, B)). Colors for front, side & rear.\n :param linewidth: Linewidth for plot.\n \"\"\"\n corners = view_points(self.corners(), view, normalize=normalize)[:2, :]\n\n def draw_rect(selected_corners, color):\n prev = selected_corners[-1]\n for corner in selected_corners:\n cv2.line(im,\n (int(prev[0]), int(prev[1])),\n (int(corner[0]), int(corner[1])),\n color, linewidth)\n prev = corner\n\n # Draw the sides\n for i in range(4):\n cv2.line(im,\n (int(corners.T[i][0]), int(corners.T[i][1])),\n (int(corners.T[i + 4][0]), int(corners.T[i + 4][1])),\n colors[2][::-1], linewidth)\n\n # Draw front (first 4 corners) and rear (last 4 corners) rectangles(3d)/lines(2d)\n draw_rect(corners.T[:4], colors[0][::-1])\n draw_rect(corners.T[4:], colors[1][::-1])\n\n # Draw line indicating the front\n center_bottom_forward = np.mean(corners.T[2:4], axis=0)\n center_bottom = np.mean(corners.T[[2, 3, 7, 6]], axis=0)\n cv2.line(im,\n (int(center_bottom[0]), int(center_bottom[1])),\n (int(center_bottom_forward[0]), int(center_bottom_forward[1])),\n colors[0][::-1], linewidth)\n\n def copy(self) -> 'Box':\n \"\"\"\n Create a copy of self.\n :return: A copy.\n \"\"\"\n return copy.deepcopy(self)"
},
{
"identifier": "process_past_pc_waymo",
"path": "gen_data/waymo_data_utils.py",
"snippet": "def process_past_pc_waymo(scene_name, lidar_path, ann_data, i, past_data_sample_index, ref_pose, ts):\n past_pc_list = dict()\n\n for j in past_data_sample_index:\n sweep_index = i - j\n sweep_ann = ann_data[sweep_index]\n sweep_lidar_pc_path = lidar_path / \"{:04d}.npy\".format(sweep_index)\n sweep_pose = sweep_ann[\"pose\"]\n past_pc = load_waymo_points(sweep_lidar_pc_path)\n\n sweep_token = \"{}_{:04d}\".format(scene_name, sweep_index)\n sweep_ts = sweep_ann[\"time_stamp\"]\n time_lag = ts - sweep_ts\n\n # ref_from_global * global_from_current = ref_from_current\n tm = reduce(np.dot, [np.linalg.inv(ref_pose), sweep_pose])\n past_pc = past_pc.T\n past_pc[:3, :] = tm.dot(np.vstack((past_pc[:3, :], np.ones(past_pc.shape[1]))))[:3, :]\n past_pc_list['synchronized_pc_' + str(j)] = past_pc\n past_pc_list['frame_id_' + str(j)] = sweep_token\n past_pc_list['ts_' + str(j)] = time_lag\n\n return past_pc_list"
},
{
"identifier": "build_BEV_input_waymo",
"path": "gen_data/waymo_data_utils.py",
"snippet": "def build_BEV_input_waymo(past_pc_list, past_data_sample_index, voxel_size, area_extents):\n\n voxel_indices_list = list()\n padded_voxel_points_list = list()\n\n for j in past_data_sample_index[::-1]:\n past_pc = past_pc_list['synchronized_pc_' + str(j)].T\n\n # remove close point\n past_pc, not_close = remove_close(past_pc, radius=1.0)\n # fixed size\n past_pc, filter_idx = filter_pc(past_pc, extents=area_extents)\n\n res, voxel_indices = voxelize_occupy(past_pc, voxel_size=voxel_size, extents=area_extents, return_indices=True)\n\n voxel_indices_list.append(voxel_indices)\n padded_voxel_points_list.append(res)\n\n padded_voxel_points = np.stack(padded_voxel_points_list, axis=0).astype(np.bool)\n return padded_voxel_points, voxel_indices_list"
},
{
"identifier": "build_BEV_gt_waymo",
"path": "gen_data/waymo_data_utils.py",
"snippet": "def build_BEV_gt_waymo(past_pc_list, grid_size, extents, ann_i,\n future_data_sample_index, ann_data, i, ref_pose, ts,\n category_num=5, one_hot_thresh=0.8, min_point_num_per_voxel=-1, proportion_thresh = 0.5):\n\n refer_pc = past_pc_list['synchronized_pc_0'].T\n refer_pc, not_close = remove_close(refer_pc, radius=1.0)\n refer_pc, filter_idx = filter_pc(refer_pc, extents=extents)\n\n # ----------------------------------------------------\n # Filter and sort the reference point cloud\n\n if extents is not None:\n if extents.shape != (3, 2):\n raise ValueError(\"Extents are the wrong shape {}\".format(extents.shape))\n\n filter_idx = np.where((extents[0, 0] < refer_pc[:, 0]) & (refer_pc[:, 0] < extents[0, 1]) &\n (extents[1, 0] < refer_pc[:, 1]) & (refer_pc[:, 1] < extents[1, 1]) &\n (extents[2, 0] < refer_pc[:, 2]) & (refer_pc[:, 2] < extents[2, 1]))[0]\n refer_pc = refer_pc[filter_idx]\n\n # -- Discretize pixel coordinates to given quantization size\n discrete_pts = np.floor(refer_pc[:, 0:2] / grid_size).astype(np.int32)\n\n # -- Use Lex Sort, sort by x, then y\n x_col = discrete_pts[:, 0]\n y_col = discrete_pts[:, 1]\n sorted_order = np.lexsort((y_col, x_col))\n\n refer_pc = refer_pc[sorted_order]\n discrete_pts = discrete_pts[sorted_order]\n\n contiguous_array = np.ascontiguousarray(discrete_pts).view(\n np.dtype((np.void, discrete_pts.dtype.itemsize * discrete_pts.shape[1])))\n\n # -- The new coordinates are the discretized array with its unique indexes\n _, unique_indices = np.unique(contiguous_array, return_index=True)\n\n # -- Sort unique indices to preserve order\n unique_indices.sort()\n pixel_coords = discrete_pts[unique_indices]\n\n # -- Number of points per voxel, last voxel calculated separately\n num_points_in_pixel = np.diff(unique_indices)\n num_points_in_pixel = np.append(num_points_in_pixel, discrete_pts.shape[0] - unique_indices[-1])\n\n # -- Compute the minimum and maximum voxel coordinates\n if extents is not None:\n min_pixel_coord = np.floor(extents.T[0, 0:2] / grid_size)\n max_pixel_coord = np.ceil(extents.T[1, 0:2] / grid_size) - 1\n else:\n min_pixel_coord = np.amin(pixel_coords, axis=0)\n max_pixel_coord = np.amax(pixel_coords, axis=0)\n\n # -- Get the voxel grid dimensions\n num_divisions = ((max_pixel_coord - min_pixel_coord) + 1).astype(np.int32)\n\n # -- Bring the min voxel to the origin\n pixel_indices = (pixel_coords - min_pixel_coord).astype(int)\n # ----------------------------------------------------\n\n # ----------------------------------------------------\n # Get the point cloud subsets, which are inside different instance bounding boxes\n refer_box_list = list()\n refer_pc_idx_per_bbox = list()\n points_category = np.zeros(refer_pc.shape[0], dtype=np.int) # store the point categories\n\n pixel_instance_id = np.zeros(pixel_indices.shape[0], dtype=np.uint8)\n points_instance_id = np.zeros(refer_pc.shape[0], dtype=np.int)\n\n # box in current frame,\n cur_nusc_box_dict = {} # t0 global -> t0\n for obj_idx, obj_id in enumerate(ann_i[\"annos\"]['obj_ids']):\n # vehicle system\n lwh = ann_i[\"annos\"][\"dimensions\"][obj_idx] # c_x, c_y, c_z\n ctr = ann_i[\"annos\"][\"location\"][obj_idx] # l, w, h\n yaw = ann_i[\"annos\"][\"heading_angles\"][obj_idx]\n name = ann_i[\"annos\"][\"name\"][obj_idx]\n\n nusc_box = Box(\n ctr, [lwh[1], lwh[0], lwh[2]],\n Quaternion(axis=[0, 0, 1], angle=yaw), name=name, token=obj_idx\n )\n cur_nusc_box_dict[obj_id] = nusc_box\n\n box_name = name\n\n if box_name in [\"Vehicle\", \"Pedestrian\", \"Cyclist\"]:\n instance_cat = obj_class_map[box_name]\n elif box_name == \"Sign\":\n instance_cat = obj_class_map[\"Others\"]\n else:\n raise Exception\n\n idx = point_in_hull_fast(refer_pc[:, 0:3], nusc_box)\n refer_pc_idx_per_bbox.append(idx)\n refer_box_list.append(nusc_box)\n\n points_category[idx] = instance_cat\n points_instance_id[idx] = obj_idx + 1 # object id starts from 1, background has id 0\n\n # remove the constraint\n # assert np.max(points_instance_id) <= 255, \"The instance id exceeds uint8 max.\"\n\n if len(refer_pc_idx_per_bbox) > 0:\n refer_pc_idx_inside_box = np.concatenate(refer_pc_idx_per_bbox).tolist()\n else:\n refer_pc_idx_inside_box = []\n refer_pc_idx_outside_box = set(range(refer_pc.shape[0])) - set(refer_pc_idx_inside_box)\n refer_pc_idx_outside_box = list(refer_pc_idx_outside_box)\n\n # Compute pixel (cell) categories\n pixel_cat = np.zeros([unique_indices.shape[0], category_num], dtype=np.float32)\n most_freq_info = []\n\n for h, v in enumerate(zip(unique_indices, num_points_in_pixel)):\n pixel_elements_categories = points_category[v[0]:v[0] + v[1]]\n elements_freq = np.bincount(pixel_elements_categories, minlength=category_num)\n assert np.sum(elements_freq) == v[1], \"The frequency count is incorrect.\"\n\n elements_freq = elements_freq / float(v[1])\n most_freq_cat, most_freq = np.argmax(elements_freq), np.max(elements_freq)\n most_freq_info.append([most_freq_cat, most_freq])\n\n most_freq_elements_idx = np.where(pixel_elements_categories == most_freq_cat)[0]\n pixel_elements_instance_ids = points_instance_id[v[0]:v[0] + v[1]]\n most_freq_instance_id = pixel_elements_instance_ids[most_freq_elements_idx[0]]\n\n if most_freq >= one_hot_thresh:\n one_hot_cat = np.zeros(category_num, dtype=np.float32)\n one_hot_cat[most_freq_cat] = 1.0\n pixel_cat[h] = one_hot_cat\n\n pixel_instance_id[h] = most_freq_instance_id\n else:\n pixel_cat[h] = elements_freq # use soft category probability vector.\n\n pixel_cat_map = np.zeros((num_divisions[0], num_divisions[1], category_num), dtype=np.float32)\n pixel_cat_map[pixel_indices[:, 0], pixel_indices[:, 1]] = pixel_cat[:]\n\n pixel_instance_map = np.zeros((num_divisions[0], num_divisions[1]), dtype=np.uint8)\n pixel_instance_map[pixel_indices[:, 0], pixel_indices[:, 1]] = pixel_instance_id[:]\n\n # Set the non-zero pixels to 1.0, which will be helpful for loss computation\n # Note that the non-zero pixels correspond to both the foreground and background objects\n non_empty_map = np.zeros((num_divisions[0], num_divisions[1]), dtype=np.float32)\n non_empty_map[pixel_indices[:, 0], pixel_indices[:, 1]] = 1.0\n\n # Ignore the voxel/pillar which contains number of points less than min_point_num_per_voxel; only for fg points\n cell_pts_num = np.zeros((num_divisions[0], num_divisions[1]), dtype=np.float32)\n cell_pts_num[pixel_indices[:, 0], pixel_indices[:, 1]] = num_points_in_pixel[:]\n tmp_pixel_cat_map = np.argmax(pixel_cat_map, axis=2)\n ignore_mask = np.logical_and(cell_pts_num <= min_point_num_per_voxel, tmp_pixel_cat_map != 0)\n ignore_mask = np.logical_not(ignore_mask)\n ignore_mask = np.expand_dims(ignore_mask, axis=2)\n\n # Compute the displacement vectors w.r.t. the other sweeps\n all_disp_field_gt_list = list()\n all_valid_pixel_maps_list = list() # valid pixel map will be used for masking the computation of loss\n \n for j in future_data_sample_index:\n curr_disp_vectors = np.zeros_like(refer_pc, dtype=np.float32)\n curr_disp_vectors.fill(np.nan)\n curr_disp_vectors[refer_pc_idx_outside_box,] = 0.0\n\n # compute flow between t and t + 0,1\n transformed_box_dict = {} # t1 global -> t0\n\n next_ann = ann_data[i + j]\n next_pose = next_ann[\"pose\"]\n next_T_cur_tm = reduce(np.dot, [np.linalg.inv(ref_pose), next_pose]) # ref_from_next\n next_ts_lag = next_ann[\"time_stamp\"] - ts\n for obj_idx, obj_id in enumerate(next_ann[\"annos\"]['obj_ids']):\n # vehicle system\n ctr = next_ann[\"annos\"][\"location\"][obj_idx]\n lwh = next_ann[\"annos\"][\"dimensions\"][obj_idx] # l, w, h\n yaw = next_ann[\"annos\"][\"heading_angles\"][obj_idx]\n name = next_ann[\"annos\"][\"name\"][obj_idx]\n\n # transform to t0\n # https://github.com/waymo-research/waymo-open-dataset/blob/master/waymo_open_dataset/utils/box_utils.py#L196\n yaw_offset = np.arctan2(next_T_cur_tm[1, 0], next_T_cur_tm[0, 0]) # return radian\n yaw = yaw + yaw_offset\n\n new_ctr = np.einsum('ij,nj->ni', next_T_cur_tm[:3, :3], ctr[None]) + next_T_cur_tm[:3, -1][None]\n new_ctr = new_ctr[0]\n\n # nuscenes devkit requires width, length, height as inputs\n nusc_box = Box(\n new_ctr, [lwh[1], lwh[0], lwh[2]], Quaternion(axis=[0, 0, 1], angle=yaw), name=name, token=obj_idx\n )\n transformed_box_dict[obj_id] = nusc_box\n\n cur_xyz = refer_pc\n # # ----------init-------------------\n cls_mask = np.zeros([len(cur_xyz), 1], dtype=np.int64)\n\n\n for box_token, cur_box in cur_nusc_box_dict.items():\n inbox_idx = point_in_hull_fast(cur_xyz, cur_box)\n\n box_name = cur_box.name\n\n if box_name in [\"Vehicle\", \"Pedestrian\", \"Cyclist\"]:\n cls_mask[inbox_idx] = obj_class_map[box_name]\n elif box_name == \"Sign\":\n cls_mask[inbox_idx] = obj_class_map[\"Others\"]\n else:\n raise Exception\n\n\n cur_xyz_in_box = cur_xyz[inbox_idx]\n\n if box_name != \"Sign\" and box_token in transformed_box_dict: # compute_flow\n transformed_box = transformed_box_dict[box_token]\n in_box_flow, _ = calc_displace_vector(cur_xyz_in_box, cur_box, transformed_box)\n curr_disp_vectors[inbox_idx] = in_box_flow\n else: # ignore\n curr_disp_vectors[inbox_idx] = 0.\n\n # Second, compute the mean displacement vector and category for each non-empty pixel\n disp_field = np.zeros([unique_indices.shape[0], 2], dtype=np.float32) # we only consider the 2D field\n\n # We only compute loss for valid pixels where there are corresponding box annotations between two frames\n valid_pixels = np.zeros(unique_indices.shape[0], dtype=np.bool)\n\n for h, v in enumerate(zip(unique_indices, num_points_in_pixel)):\n\n # Only when the number of majority points exceeds predefined proportion, we compute\n # the displacement vector for this pixel. Otherwise, We consider it is background (possibly ground plane)\n # and has zero displacement.\n pixel_elements_categories = points_category[v[0]:v[0] + v[1]]\n most_freq_cat, most_freq = most_freq_info[h]\n\n if most_freq >= proportion_thresh:\n most_freq_cat_idx = np.where(pixel_elements_categories == most_freq_cat)[0]\n most_freq_cat_disp_vectors = curr_disp_vectors[v[0]:v[0] + v[1], :3]\n most_freq_cat_disp_vectors = most_freq_cat_disp_vectors[most_freq_cat_idx]\n\n if np.isnan(most_freq_cat_disp_vectors).any(): # contains invalid disp vectors\n valid_pixels[h] = 0.0\n else:\n mean_disp_vector = np.mean(most_freq_cat_disp_vectors, axis=0)\n disp_field[h] = mean_disp_vector[0:2] # ignore the z direction\n\n valid_pixels[h] = 1.0\n\n # Finally, assemble to a 2D image\n disp_field_sparse = np.zeros((num_divisions[0], num_divisions[1], 2), dtype=np.float32)\n disp_field_sparse[pixel_indices[:, 0], pixel_indices[:, 1]] = disp_field[:]\n disp_field_sparse = disp_field_sparse * ignore_mask\n\n valid_pixel_map = np.zeros((num_divisions[0], num_divisions[1]), dtype=np.float32)\n valid_pixel_map[pixel_indices[:, 0], pixel_indices[:, 1]] = valid_pixels[:]\n\n all_disp_field_gt_list.append(disp_field_sparse)\n all_valid_pixel_maps_list.append(valid_pixel_map)\n\n all_disp_field_gt_list = np.stack(all_disp_field_gt_list, axis=0)\n all_valid_pixel_maps_list = np.stack(all_valid_pixel_maps_list, axis=0)\n\n\n return all_disp_field_gt_list, all_valid_pixel_maps_list, non_empty_map, pixel_cat_map, pixel_indices, pixel_instance_map"
},
{
"identifier": "convert_to_sparse_bev_waymo",
"path": "gen_data/waymo_data_utils.py",
"snippet": "def convert_to_sparse_bev_waymo(dense_bev_data):\n save_voxel_indices_list, save_voxel_points, save_pixel_indices, save_pixel_instance_maps, \\\n save_disp_field_gt, save_valid_pixel_maps, save_non_empty_maps, save_pixel_cat_maps = dense_bev_data\n\n save_num_past_pcs = 5\n save_num_future_pcs = 10\n\n save_valid_pixel_maps = save_valid_pixel_maps.astype(np.bool)\n save_voxel_dims = save_voxel_points.shape[1:]\n num_categories = save_pixel_cat_maps.shape[-1]\n\n sparse_disp_field_gt = save_disp_field_gt[:, save_pixel_indices[:, 0], save_pixel_indices[:, 1], :]\n sparse_valid_pixel_maps = save_valid_pixel_maps[:, save_pixel_indices[:, 0], save_pixel_indices[:, 1]]\n sparse_pixel_cat_maps = save_pixel_cat_maps[save_pixel_indices[:, 0], save_pixel_indices[:, 1]]\n sparse_pixel_instance_maps = save_pixel_instance_maps[save_pixel_indices[:, 0], save_pixel_indices[:, 1]]\n\n save_data_dict = dict()\n for i in range(len(save_voxel_indices_list)):\n save_data_dict['voxel_indices_' + str(i)] = save_voxel_indices_list[i].astype(np.int32)\n\n save_data_dict['disp_field'] = sparse_disp_field_gt\n save_data_dict['valid_pixel_map'] = sparse_valid_pixel_maps\n save_data_dict['pixel_cat_map'] = sparse_pixel_cat_maps\n save_data_dict['num_past_pcs'] = save_num_past_pcs\n save_data_dict['num_future_pcs'] = save_num_future_pcs\n # save_data_dict['trans_matrices'] = save_trans_matrices\n save_data_dict['3d_dimension'] = save_voxel_dims\n save_data_dict['pixel_indices'] = save_pixel_indices\n save_data_dict['pixel_instance_ids'] = sparse_pixel_instance_maps\n\n # -------------------------------- Sanity Check --------------------------------\n dims = save_non_empty_maps.shape\n\n test_disp_field_gt = np.zeros((save_num_future_pcs, dims[0], dims[1], 2), dtype=np.float32)\n test_disp_field_gt[:, save_pixel_indices[:, 0], save_pixel_indices[:, 1], :] = sparse_disp_field_gt[:]\n assert np.all(test_disp_field_gt == save_disp_field_gt), \"Error: Mismatch\"\n\n test_valid_pixel_maps = np.zeros((save_num_future_pcs, dims[0], dims[1]), dtype=np.bool)\n test_valid_pixel_maps[:, save_pixel_indices[:, 0], save_pixel_indices[:, 1]] = sparse_valid_pixel_maps[:]\n assert np.all(test_valid_pixel_maps == save_valid_pixel_maps), \"Error: Mismatch\"\n\n test_pixel_cat_maps = np.zeros((dims[0], dims[1], num_categories), dtype=np.float32)\n test_pixel_cat_maps[save_pixel_indices[:, 0], save_pixel_indices[:, 1], :] = sparse_pixel_cat_maps[:]\n assert np.all(test_pixel_cat_maps == save_pixel_cat_maps), \"Error: Mismatch\"\n\n test_non_empty_map = np.zeros((dims[0], dims[1]), dtype=np.float32)\n test_non_empty_map[save_pixel_indices[:, 0], save_pixel_indices[:, 1]] = 1.0\n assert np.all(test_non_empty_map == save_non_empty_maps), \"Error: Mismatch\"\n\n test_pixel_instance_map = np.zeros((dims[0], dims[1]), dtype=np.uint8)\n test_pixel_instance_map[save_pixel_indices[:, 0], save_pixel_indices[:, 1]] = sparse_pixel_instance_maps[:]\n assert np.all(test_pixel_instance_map == save_pixel_instance_maps), \"Error: Mismatch\"\n\n for i in range(len(save_voxel_indices_list)):\n indices = save_data_dict['voxel_indices_' + str(i)]\n curr_voxels = np.zeros(save_voxel_dims, dtype=np.bool)\n curr_voxels[indices[:, 0], indices[:, 1], indices[:, 2]] = 1\n assert np.all(curr_voxels == save_voxel_points[i]), \"Error: Mismatch\"\n\n return save_data_dict"
},
{
"identifier": "gen_weak_supervision",
"path": "gen_data/gen_weak_waymo_utils.py",
"snippet": "def gen_weak_supervision(scene_name, lidar_path, ann_data, i, pc_random_index_dict, pc_down_sample_dict, num_down_sample = 50000):\n ''' get current info'''\n ann_i = ann_data[i]\n\n # extract info about reference key\n lidar_pc_path = lidar_path / \"{:04d}.npy\".format(i)\n cur_xyz = load_waymo_points(lidar_pc_path)\n\n ref_pose = ann_i[\"pose\"]\n ref_token = \"{}_{:04d}\".format(scene_name, i)\n ref_ts = ann_i[\"time_stamp\"]\n\n save_weak_dict = dict()\n\n id_list = [-5, 0, 5]\n for j in range(3):\n sweep_index = i + id_list[j]\n sweep_ann = ann_data[sweep_index]\n sweep_lidar_pc_path = lidar_path / \"{:04d}.npy\".format(sweep_index)\n sweep_pose = sweep_ann[\"pose\"]\n sweep_pc = load_waymo_points(sweep_lidar_pc_path)\n\n sweep_token = \"{}_{:04d}\".format(scene_name, sweep_index)\n sweep_ts = sweep_ann[\"time_stamp\"]\n time_lag = sweep_ts - ref_ts\n\n # ref_from_global * global_from_current = ref_from_current\n tm = reduce(np.dot, [np.linalg.inv(ref_pose), sweep_pose])\n sweep_pc = sweep_pc.T\n sweep_pc[:3, :] = tm.dot(np.vstack((sweep_pc[:3, :], np.ones(sweep_pc.shape[1]))))[:3, :]\n points_label = get_label_info(sweep_ann, lidar_path, sweep_index)\n\n # down-sample\n down_sample_idx, pc_down_sample_dict = gen_random_index_for_pc(sweep_pc, sweep_token, pc_down_sample_dict)\n sweep_pc_t = sweep_pc.transpose((1, 0))\n\n # We only preserve a fixed number of points for each point cloud\n if down_sample_idx.shape[0] > num_down_sample:\n sampled_sweep_pc_t = sweep_pc_t[down_sample_idx[:num_down_sample]]\n sampled_points_label = points_label[down_sample_idx[:num_down_sample]].astype(np.int32)\n else:\n sampled_sweep_pc_t = sweep_pc_t[down_sample_idx]\n sampled_points_label = points_label[down_sample_idx].astype(np.int32)\n sampled_sweep_pc = sampled_sweep_pc_t.transpose((1, 0))\n\n save_weak_dict['synchronized_pc_' + str(j)] = sampled_sweep_pc\n save_weak_dict['frame_id_' + str(j)] = sweep_token\n save_weak_dict['ts_' + str(j)] = time_lag\n\n save_weak_dict['points_label_' + str(j)] = sampled_points_label\n\n sample_idx, pc_random_index_dict = gen_random_index_for_pc(sampled_sweep_pc, sweep_token, pc_random_index_dict)\n save_weak_dict['sample_idx_' + str(j)] = sample_idx.astype(np.int32)\n\n return save_weak_dict, pc_random_index_dict, pc_down_sample_dict"
}
] | import numpy as np
import os
import os.path as osp
import copy
import tqdm
import pickle
import argparse
from pathlib import Path
from functools import reduce
from gen_data.nuscenes.utils.data_classes import Box
from pyquaternion import Quaternion
from gen_data.waymo_data_utils import process_past_pc_waymo, build_BEV_input_waymo, build_BEV_gt_waymo, convert_to_sparse_bev_waymo
from gen_data.gen_weak_waymo_utils import gen_weak_supervision
| 9,016 | """
Prepare the input data, motion ground truth, and Foreground/Background information for Waymo data.
"""
obj_class_map = {
"Vehicle": 1, "Pedestrian":2, "Cyclist": 3, "Others": 4
} # take sign as others
voxel_size = (0.25, 0.25, 0.4)
area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]])
def check_folder(folder_name):
if not os.path.exists(folder_name):
os.mkdir(folder_name)
return folder_name
def create_waymo_infos(root_path, save_root_path, mode):
flow_time_gap = 1
past_time_gap = 1
check_folder(os.path.join(save_root_path, 'input-data'))
check_folder(os.path.join(save_root_path, 'input-data', mode))
if mode == "train":
check_folder(os.path.join(save_root_path, 'weak-data'))
check_folder(os.path.join(save_root_path, 'weak-data', mode))
sample_dir = check_folder(os.path.join(save_root_path, 'weak-data', 'train_sample_info'))
if mode == "train":
scene_list_file = "ImageSets/train.txt"
sample_time_gap = 1.0
type = 'training'
elif mode == "val":
scene_list_file = "ImageSets/val.txt"
sample_time_gap = 1.0
type = 'validation'
else:
assert Exception
with open(scene_list_file, 'r') as f:
scene_list = f.readlines()
scene_list = [s.strip().split(".")[0] for s in scene_list]
print("finish loading scene list")
sample_data_step = int(sample_time_gap * 10)
flow_data_step = int(flow_time_gap * 10)
past_data_step = int(past_time_gap * 10)
past_data_sample_index = np.arange(0, past_data_step, 2)
future_data_sample_index = np.arange(1, 1 + flow_data_step, 1)
for scene_name in tqdm.tqdm(scene_list):
lidar_path = root_path / type / scene_name
assert osp.exists(lidar_path)
ann_path = lidar_path / f"{scene_name}.pkl"
pc_random_index_dict = dict()
pc_down_sample_dict = dict()
with open(ann_path, 'rb') as f:
ann_data = pickle.load(f)
num_lidar = len(ann_data)
for i in range(0, num_lidar, sample_data_step):
# remove unenough prev and future sweep
if i < past_data_step or i > (num_lidar - 1 - flow_data_step):
continue
''' get current info'''
ann_i = ann_data[i]
# extract info about reference key
pose = ann_i["pose"] # global_from_car, convert pc in car system to global system
ts = ann_i["time_stamp"]
token = "{}_{:04d}".format(scene_name, i)
''' get past pc '''
past_pc_list = process_past_pc_waymo(scene_name, lidar_path, ann_data, i, past_data_sample_index, pose, ts)
''' build BEV input & gt '''
| """
Prepare the input data, motion ground truth, and Foreground/Background information for Waymo data.
"""
obj_class_map = {
"Vehicle": 1, "Pedestrian":2, "Cyclist": 3, "Others": 4
} # take sign as others
voxel_size = (0.25, 0.25, 0.4)
area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]])
def check_folder(folder_name):
if not os.path.exists(folder_name):
os.mkdir(folder_name)
return folder_name
def create_waymo_infos(root_path, save_root_path, mode):
flow_time_gap = 1
past_time_gap = 1
check_folder(os.path.join(save_root_path, 'input-data'))
check_folder(os.path.join(save_root_path, 'input-data', mode))
if mode == "train":
check_folder(os.path.join(save_root_path, 'weak-data'))
check_folder(os.path.join(save_root_path, 'weak-data', mode))
sample_dir = check_folder(os.path.join(save_root_path, 'weak-data', 'train_sample_info'))
if mode == "train":
scene_list_file = "ImageSets/train.txt"
sample_time_gap = 1.0
type = 'training'
elif mode == "val":
scene_list_file = "ImageSets/val.txt"
sample_time_gap = 1.0
type = 'validation'
else:
assert Exception
with open(scene_list_file, 'r') as f:
scene_list = f.readlines()
scene_list = [s.strip().split(".")[0] for s in scene_list]
print("finish loading scene list")
sample_data_step = int(sample_time_gap * 10)
flow_data_step = int(flow_time_gap * 10)
past_data_step = int(past_time_gap * 10)
past_data_sample_index = np.arange(0, past_data_step, 2)
future_data_sample_index = np.arange(1, 1 + flow_data_step, 1)
for scene_name in tqdm.tqdm(scene_list):
lidar_path = root_path / type / scene_name
assert osp.exists(lidar_path)
ann_path = lidar_path / f"{scene_name}.pkl"
pc_random_index_dict = dict()
pc_down_sample_dict = dict()
with open(ann_path, 'rb') as f:
ann_data = pickle.load(f)
num_lidar = len(ann_data)
for i in range(0, num_lidar, sample_data_step):
# remove unenough prev and future sweep
if i < past_data_step or i > (num_lidar - 1 - flow_data_step):
continue
''' get current info'''
ann_i = ann_data[i]
# extract info about reference key
pose = ann_i["pose"] # global_from_car, convert pc in car system to global system
ts = ann_i["time_stamp"]
token = "{}_{:04d}".format(scene_name, i)
''' get past pc '''
past_pc_list = process_past_pc_waymo(scene_name, lidar_path, ann_data, i, past_data_sample_index, pose, ts)
''' build BEV input & gt '''
| padded_voxel_points, voxel_indices_list = build_BEV_input_waymo(past_pc_list, past_data_sample_index, voxel_size, area_extents)
| 2 | 2023-11-12 07:03:29+00:00 | 12k |
c3exchange/c3-smartcontracts-v1 | contracts_unified/core/methods/settle.py | [
{
"identifier": "ARG_INDEX_ACCOUNT",
"path": "contracts_unified/core/c3call.py",
"snippet": "ARG_INDEX_ACCOUNT = Int(1)"
},
{
"identifier": "ARG_INDEX_OP",
"path": "contracts_unified/core/c3call.py",
"snippet": "ARG_INDEX_OP = Int(2)"
},
{
"identifier": "ARG_INDEX_SELECTOR",
"path": "contracts_unified/core/c3call.py",
"snippet": "ARG_INDEX_SELECTOR = Int(0)"
},
{
"identifier": "health_check",
"path": "contracts_unified/core/internal/health_check.py",
"snippet": "@ABIReturnSubroutine\ndef health_check(\n account: AccountAddress,\n use_maint: abi.Bool,\n *,\n output: ExcessMargin,\n) -> Expr:\n \"\"\"Calculates the user's health\"\"\"\n\n count = abi.Uint64()\n\n user_data = UserInstrumentData()\n cash = Amount()\n principal = SignedAmount()\n index = abi.Uint64()\n\n instrument_id = InstrumentId()\n instrument = InstrumentListElement()\n loaned_balance = SignedAmount()\n balance_sum = SignedAmount()\n has_lend = abi.Uint64()\n\n haircut = Ratio()\n margin = Ratio()\n\n optimal_utilization = Ratio()\n\n price = Price()\n\n return Seq(\n # Clear output\n output.set(Int(0)),\n\n # Loop over instruments\n count.set(cast(abi.ReturnedValue, LocalStateHandler.get_user_instrument_count(account))),\n For(\n instrument_id.set(Int(0)),\n instrument_id.get() < count.get(),\n instrument_id.set(instrument_id.get() + Int(1)),\n ).Do(\n Seq(\n # Extract user position\n user_data.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument_id))),\n cash.set(user_data.cash),\n principal.set(user_data.principal),\n index.set(user_data.index),\n\n If(cash.get() | principal.get()).Then(\n # Get price\n price.set(cast(abi.ReturnedValue, get_normalized_price(instrument_id))),\n\n # Get instrument\n instrument.set(cast(abi.ReturnedValue, GlobalStateHandler.get_instrument(instrument_id))),\n\n # Get loan balance(netting)\n If(principal.get() != Int(0))\n .Then(\n has_lend.set(Not(signed_ltz(principal.get()))),\n If(has_lend.get())\n .Then(\n instrument.lend_index.use(lambda lend_index:\n loaned_balance.set(calculate_accrued_lend(principal, index, lend_index))\n )\n )\n .Else(\n instrument.borrow_index.use(lambda borrow_index:\n loaned_balance.set(calculate_accrued_borrow(principal, index, borrow_index)),\n )\n ),\n )\n .Else(\n has_lend.set(Int(0)),\n loaned_balance.set(Int(0))\n ),\n\n # Calculate balance sum\n balance_sum.set(signed_add(cash.get(), loaned_balance.get())),\n\n # Get risk factors\n # Load risk factors\n If(use_maint.get())\n .Then(\n instrument.maintenance_haircut.store_into(haircut),\n instrument.maintenance_margin.store_into(margin),\n )\n .Else(\n instrument.initial_haircut.store_into(haircut),\n instrument.initial_margin.store_into(margin),\n ),\n\n # Load optimal utilization\n instrument.optimal_utilization.store_into(optimal_utilization),\n\n # Calculate health for this asset and add to output\n # Add first term, health += price * sum * ratio\n If(signed_ltz(balance_sum.get()))\n .Then(\n output.set(signed_sub(output.get(), WideRatio([price.get(), signed_neg(balance_sum.get()), Int(RATIO_ONE) + margin.get()], [Int(PRICECASTER_RESCALE_FACTOR * RATIO_ONE)])))\n )\n .Else(\n output.set(signed_add(output.get(), WideRatio([price.get(), balance_sum.get(), Int(RATIO_ONE) - haircut.get()], [Int(PRICECASTER_RESCALE_FACTOR * RATIO_ONE)])))\n ),\n\n # Lend positions should be further multiplied by (1 - optimal_utilization)\n # We already included the 1 term, so we need to subtract the optimal utilization\n If(has_lend.get())\n .Then(\n output.set(\n signed_sub(\n output.get(),\n WideRatio(\n [price.get(), loaned_balance.get(), Int(RATIO_ONE) - haircut.get(), optimal_utilization.get()],\n # Normalize haircut and utilization\n [Int(PRICECASTER_RESCALE_FACTOR * RATIO_ONE * RATIO_ONE)],\n ),\n )\n )\n )\n ),\n )\n ),\n Log(Concat(account.get(), Itob(output.get())))\n )"
},
{
"identifier": "collect_fees",
"path": "contracts_unified/core/internal/move.py",
"snippet": "@ABIReturnSubroutine\ndef collect_fees(\n instrument_id: InstrumentId,\n amount: Amount,\n) -> Expr:\n \"\"\"Adds amount to the fees target balance\"\"\"\n\n fee_target_account = AccountAddress()\n\n return Seq(\n fee_target_account.set(GlobalStateHandler.get_fee_target()),\n cast(Expr, signed_add_to_cash(fee_target_account, instrument_id, amount))\n )"
},
{
"identifier": "signed_add_to_cash",
"path": "contracts_unified/core/internal/move.py",
"snippet": "@ABIReturnSubroutine\ndef signed_add_to_cash(\n account: AccountAddress,\n instrument_id: InstrumentId,\n amount: Amount,\n) -> Expr:\n \"\"\"Adds amount to the user's asset balance\"\"\"\n\n data = UserInstrumentData()\n new_cash = SignedAmount()\n\n return Seq(\n # Load user data\n data.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument_id))),\n\n # Update user data\n data.cash.use(lambda cash:\n new_cash.set(signed_add(amount.get(), cash.get())),\n ),\n\n # Validate the result is positive\n Assert(Not(signed_ltz(new_cash.get()))),\n\n # Update data\n data.principal.use(lambda principal:\n data.index.use(lambda index:\n data.set(\n new_cash,\n principal,\n index,\n )\n )\n ),\n\n cast(Expr, LocalStateHandler.set_position(account, instrument_id, data)),\n )"
},
{
"identifier": "perform_pool_move",
"path": "contracts_unified/core/internal/perform_pool_move.py",
"snippet": "@ABIReturnSubroutine\ndef perform_pool_move(\n account: AccountAddress,\n instrument_id: InstrumentId,\n transfer_amount: SignedAmount,\n) -> Expr:\n \"\"\"\n Transfers from the user to the pool `transfer_amount`.\n The function takes the following steps:\n 1. Calculates global accrued interest\n 2. Capitalizes the users balance by updating the\n user's principal with the user's accrued interest\n 3. Transfer between the user and the pool\n\n Parameters\n ----------\n user_position: current pool position of the user on `instrument_id`\n instrument_id: instrument index\n transfer_amount: amount to be transfered from the user to the pool.\n a positive value indicates that the user is sending to the pool (repaying and/or subscribing)\n a negative value indicates that the user is receiving from the pool (borrowing and/or redeeming)\n output: the user's position on the pool after the transfer\n \"\"\"\n\n # Instrument's attributes that change as part of this operation\n new_pool_last_update_time = RelativeTimestamp()\n old_pool_last_update_time = RelativeTimestamp()\n\n new_pool_borrowed = Amount()\n old_pool_borrowed = Amount()\n\n new_pool_liquidity = Amount()\n old_pool_liquidity = Amount()\n\n new_pool_borrow_index = InterestRate()\n old_pool_borrow_index = InterestRate()\n\n new_pool_lend_index = InterestRate()\n old_pool_lend_index = InterestRate()\n\n # Instrument attributes that are not affected by this operation\n asset_id = AssetId()\n initial_haircut = Ratio()\n initial_margin = Ratio()\n maintenance_haircut = Ratio()\n maintenance_margin = Ratio()\n\n optimal_utilization_ratio = Ratio()\n optimal_utilization_rate = InterestRate()\n min_rate = InterestRate()\n opt_rate = InterestRate()\n max_rate = InterestRate()\n\n # User's attributes\n user_position = UserInstrumentData()\n new_user_principal = SignedAmount()\n old_user_principal = SignedAmount()\n new_user_index = InterestRate()\n old_user_index = InterestRate()\n new_user_cash = Amount()\n old_user_cash = Amount()\n\n # Variables for intermediate calculations\n old_utilization_rate = InterestRate()\n old_interest_rate = InterestRate()\n delta_time = Timestamp()\n compounding_per_second_rate = InterestRate()\n compounding_per_period_rate = InterestRate()\n\n pool_accrued_interest = Amount()\n\n liquidity_transfer = SignedAmount()\n borrowed_transfer = SignedAmount()\n\n remainder = SignedAmount()\n\n instrument_state = InstrumentListElement()\n new_instrument_state = InstrumentListElement()\n\n return Seq(\n # Loads current instrument state\n instrument_state.set(cast(abi.ReturnedValue, GlobalStateHandler.get_instrument(instrument_id))),\n\n instrument_state.asset_id.store_into(asset_id),\n # Loads pool data\n instrument_state.last_update_time.store_into(old_pool_last_update_time),\n instrument_state.borrowed.store_into(old_pool_borrowed),\n instrument_state.liquidity.store_into(old_pool_liquidity),\n instrument_state.borrow_index.store_into(old_pool_borrow_index),\n instrument_state.lend_index.store_into(old_pool_lend_index),\n\n # Loads interest curve data\n instrument_state.optimal_utilization.store_into(optimal_utilization_ratio),\n optimal_utilization_rate.set(WideRatio([optimal_utilization_ratio.get(), Int(RATE_ONE)], [Int(RATIO_ONE)])),\n instrument_state.min_rate.store_into(min_rate),\n instrument_state.opt_rate.store_into(opt_rate),\n instrument_state.max_rate.store_into(max_rate),\n\n # Loads haircuts and margins\n instrument_state.initial_haircut.store_into(initial_haircut),\n instrument_state.initial_margin.store_into(initial_margin),\n instrument_state.maintenance_haircut.store_into(maintenance_haircut),\n instrument_state.maintenance_margin.store_into(maintenance_margin),\n\n # Calculates the new timestamp\n # NOTE: Updates to this can be controlled via the algosdk function setBlockOffsetTimestamp\n new_pool_last_update_time.set(GlobalStateHandler.get_relative_timestamp()),\n\n ###############################################################################################################\n # 1.\n # Calculates the accrued interest in the pool since the last update\n # and reflects that on the total liquidity and borrow amount.\n\n # 1.1\n # AI_t = ((BI_t / BI_{t-1})-1) * B_{t-1} = ((1+R_{t_1})^dT - 1) * B_{t-1}\n\n # 1.1.1\n # Calculates the pool's utilization\n # U_{t-1} = B_{t-1} / L_{t-1} = B_{t-1} * 1 / L_{t-1}\n old_utilization_rate.set(\n If(old_pool_liquidity.get() == Int(0))\n .Then(Int(0))\n .Else(WideRatio([old_pool_borrowed.get(), Int(RATE_ONE)], [old_pool_liquidity.get()]))\n ),\n\n # 1.1.2\n # Calculates interest rate per second for the period since the last update\n # R_{t-1} = R_min + U_{t-1} / U_opt * R_slope1 if U_{t-1} < U_opt\n # R_{t-1} = R_opt + (U_{t-1}-U_opt) / (1 - U_opt) * R_slope2 if U_{t-1} >= U_opt\n old_interest_rate.set(\n If(old_utilization_rate.get() < optimal_utilization_rate.get())\n .Then(\n min_rate.get()\n + WideRatio(\n [old_utilization_rate.get(), opt_rate.get() - min_rate.get()],\n [optimal_utilization_rate.get()]\n )\n )\n .Else(\n opt_rate.get()\n + WideRatio(\n [old_utilization_rate.get() - optimal_utilization_rate.get(), max_rate.get() - opt_rate.get()],\n [Int(RATE_ONE) - optimal_utilization_rate.get()]\n )\n )\n ),\n\n # 1.1.3\n # Calculates time since previous update\n delta_time.set(new_pool_last_update_time.get() - old_pool_last_update_time.get()),\n\n # 1.1.4\n # AI_t = ((BI_t / BI_{t-1})-1) * B_{t-1} = ((1+R_{t_1})^dT - 1) * B_{t-1}\n compounding_per_second_rate.set(Int(RATE_ONE) + old_interest_rate.get()),\n compounding_per_period_rate.set(teal_expt(compounding_per_second_rate, delta_time)),\n pool_accrued_interest.set(\n WideRatio(\n [compounding_per_period_rate.get() - Int(RATE_ONE), old_pool_borrowed.get()],\n [Int(RATE_ONE)],\n )\n ),\n\n # 1.2\n # Capitalize pool accrued interest into liquidity and borrowed amounts\n new_pool_borrowed.set(old_pool_borrowed.get() + pool_accrued_interest.get()),\n new_pool_liquidity.set(old_pool_liquidity.get() + pool_accrued_interest.get()),\n\n # 1.3\n # Updates pool indexes\n new_pool_borrow_index.set(\n If(old_pool_borrowed.get() == Int(0))\n .Then(\n Int(RATE_ONE)\n )\n .Else(\n WideRatio([old_pool_borrow_index.get(), new_pool_borrowed.get()], [old_pool_borrowed.get()])\n )\n ),\n new_pool_lend_index.set(\n If(old_pool_liquidity.get() == Int(0))\n .Then(\n Int(RATE_ONE)\n )\n .Else(\n WideRatio([old_pool_lend_index.get(), new_pool_liquidity.get()], [old_pool_liquidity.get()])\n )\n ),\n\n # We only perform the pool move if a user was given, otherwise we just update the global instrument data\n If(account.get() != Global.zero_address()).Then(\n ###############################################################################################################\n # 2\n # Get user data\n user_position.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument_id))),\n user_position.cash.store_into(old_user_cash),\n user_position.principal.store_into(old_user_principal),\n user_position.index.store_into(old_user_index),\n\n # Capitalize user's accrued interest into user's principal\n new_user_principal.set(\n If(old_user_index.get() == Int(0))\n .Then(\n Int(0)\n )\n .ElseIf(signed_ltz(old_user_principal.get()))\n .Then(\n # The user has a borrow position\n calculate_accrued_borrow(old_user_principal, old_user_index, new_pool_borrow_index)\n )\n .Else(\n # The user has a lend position\n calculate_accrued_lend(old_user_principal, old_user_index, new_pool_lend_index)\n )\n ),\n\n ###############################################################################################################\n # 3\n # Transfer between the user and the pool\n\n # 3.0 Validate user's position against pool size\n Assert(new_pool_liquidity.get() >= signed_max(Int(0), new_user_principal.get())),\n # NOTE: The case for the user repaying more than the pool has borrowed is handled below\n # in order to handle zero-interest borrow case\n\n # 3.1 Updates the pool borrowed and liquitiy amounts\n\n # 3.1.1 Decompose the transfer_amount into borrowed_transfer and liquidity_transfer\n # such that:\n # a. transfer_amount == borrowed_transfer + liquidity_transfer\n # b. sign(transfer_amount) == sign(borrowed_transfer) == sign(liquidity_transfer)\n # c. if transfer_amount <=0:\n # # User cannot redeem more than its long position\n # liquidity_transfer = max(transfer_amount, min(0, -new_user_principal))\n # else:\n # # User must repay before subscribing\n # liquidity_transfer = max(transfer_amount + min(0, new_user_principal), 0)\n #\n # In other words:\n # - If transfer_amount is negative, then liquidity_transfer represents the\n # amount that the user is redeeming from the pool, and borrowed_transfer the amount that is\n # borrowing from the pool.\n # - If transfer_amount is positive, then liquidity_transfer represents the\n # amount that the user is subscribing to the pool, and borrowed_transfer the amount that is\n # repaying to the pool.\n liquidity_transfer.set(\n signed_max(\n signed_add(\n transfer_amount.get(),\n signed_min(Int(0), new_user_principal.get())\n ),\n signed_min(Int(0), signed_neg(new_user_principal.get()))\n )\n ),\n borrowed_transfer.set(\n signed_sub(\n transfer_amount.get(),\n liquidity_transfer.get()\n )\n ),\n\n # 3.1.2 Applies the liquidity_transfer and borrowed_transfer to the pool\n new_pool_borrowed.set(signed_sub(new_pool_borrowed.get(), borrowed_transfer.get())),\n\n # Handles the case where the user repays more than the pool has borrowed\n # This will happen when there are accumulated microunits of interest\n If(signed_ltz(new_pool_borrowed.get())).Then(\n # Remainder is whatever is left in the transfer after repaying all pool borrows\n remainder.set(signed_neg(new_pool_borrowed.get())),\n\n # New liquidity index is updated to reflect the remainder\n # liquidity_index' = liquidity_index + liquidity_index * remainder / pool_liquidity\n new_pool_lend_index.set(new_pool_lend_index.get() + WideRatio([new_pool_lend_index.get(), remainder.get()], [new_pool_liquidity.get()])),\n\n # New liquidity includes the remainder\n new_pool_liquidity.set(signed_add(new_pool_liquidity.get(), remainder.get())),\n\n # Borrowed is cleared to remain always positive\n new_pool_borrowed.set(Int(0))\n ),\n\n new_pool_liquidity.set(signed_add(new_pool_liquidity.get(), liquidity_transfer.get())),\n\n # 3.1.3 Validate the pool has sufficient liquidity to perform the operation\n Assert(new_pool_liquidity.get() >= new_pool_borrowed.get()),\n\n # 3.2 Update user's principal and cash\n new_user_principal.set(signed_add(new_user_principal.get(), transfer_amount.get())),\n new_user_cash.set(signed_sub(old_user_cash.get(), transfer_amount.get())),\n Assert(Not(signed_ltz(new_user_cash.get()))),\n\n # 3.3 Update user's index\n new_user_index.set(\n If(signed_ltz(new_user_principal.get()))\n .Then(new_pool_borrow_index.get())\n .Else(new_pool_lend_index.get())\n ),\n\n # Update user\n user_position.set(new_user_cash, new_user_principal, new_user_index),\n cast(Expr, LocalStateHandler.set_position(account, instrument_id, user_position)),\n ),\n\n # Update liquidity pool\n new_instrument_state.set(\n asset_id,\n initial_haircut,\n initial_margin,\n maintenance_haircut,\n maintenance_margin,\n new_pool_last_update_time,\n new_pool_borrow_index,\n new_pool_lend_index,\n optimal_utilization_ratio,\n min_rate,\n opt_rate,\n max_rate,\n new_pool_borrowed,\n new_pool_liquidity,\n ),\n\n # Update instrument\n cast(Expr, GlobalStateHandler.set_instrument(instrument_id, new_instrument_state)),\n )"
},
{
"identifier": "setup",
"path": "contracts_unified/core/internal/setup.py",
"snippet": "@Subroutine(TealType.none)\ndef setup(opup_amount: Expr) -> Expr:\n \"\"\"Setup the required pre-method OpUp and state handlers\"\"\"\n\n target = AppId()\n i = abi.Uint64()\n\n return Seq(\n # Get target\n # FIXME: Use the price caster when we can\n target.set(Txn.applications[1]),\n\n # Loop over the opup request\n # NOTE: We can't use the PyTEAL op-up because of ABI issues\n i.set(opup_amount),\n While(i.get() >= Global.min_txn_fee()).Do(\n InnerTxnBuilder.ExecuteMethodCall(\n app_id=target.get(),\n method_signature=\"nop()void\",\n args=[],\n extra_fields={TxnField.fee: Int(0)}\n ),\n i.set(i.get() - Global.min_txn_fee()),\n ),\n )"
},
{
"identifier": "sender_is_sig_validator",
"path": "contracts_unified/core/internal/validate_sender.py",
"snippet": "@ABIReturnSubroutine\ndef sender_is_sig_validator() -> Expr:\n \"\"\"Validates the sender is the signature validator \"\"\"\n\n return Assert(GlobalStateHandler.get_signature_validator() == Txn.sender())"
},
{
"identifier": "OrderStateHandler",
"path": "contracts_unified/core/state_handler/order_handler.py",
"snippet": "class OrderStateHandler:\n \"\"\"Implements the order book state handler for the core contract\"\"\"\n\n @staticmethod\n @ABIReturnSubroutine\n def get_order_id(\n order: OrderData,\n *,\n output: OrderId,\n ) -> Expr:\n \"\"\"Get the order ID for an order\"\"\"\n return output.set(Concat(ORDER_PREFIX, Sha512_256(order.encode())))\n\n @staticmethod\n @ABIReturnSubroutine\n def add_order(\n order: OrderData,\n ) -> Expr:\n \"\"\"Adds an order to the order book\"\"\"\n\n order_id = abi.make(OrderId)\n on_chain_data = OnChainOrderData()\n\n return Seq(\n # Create order ID\n order_id.set(OrderStateHandler.get_order_id(order)),\n\n # Check order does not exist\n (length := App.box_length(order_id.get())),\n If(length.hasValue(), Return()),\n\n # Create on-chain data\n order.sell_amount.use(lambda sell_amount:\n order.max_borrow_amount.use(lambda borrow_amount:\n order.max_repay_amount.use(lambda repay_amount:\n on_chain_data.set(sell_amount, borrow_amount, repay_amount)\n )\n )\n ),\n\n # Update box\n App.box_put(order_id.get(), on_chain_data.encode()),\n # Ensure we have enough funds for mbr\n cast(Expr, GlobalStateHandler.ensure_mbr_fund()),\n )\n\n @staticmethod\n @ABIReturnSubroutine\n def get_order_onchain(\n order_id: OrderId,\n *,\n output: OnChainOrderData,\n ) -> Expr:\n \"\"\"Gets an order from the order book\"\"\"\n\n return Seq(\n (result := App.box_get(order_id.get())),\n Assert(result.hasValue()),\n output.decode(result.value()),\n )\n\n @staticmethod\n # NOTE: Not a subroutine for performance reasons\n # NOTE: We don't use this to create a box so there is no need to ensure the fund\n def set_order_onchain(\n order_id: OrderId,\n data: OnChainOrderData,\n ) -> Expr:\n \"\"\"Sets an order in the order book\"\"\"\n return App.box_put(order_id.get(), data.encode())\n\n @staticmethod\n # NOTE: Not a subroutine for performance reasons\n def delete_order_onchain(\n order_id: OrderId,\n ) -> Expr:\n \"\"\"Deletes an order from the order book\"\"\"\n return Pop(App.box_delete(order_id.get()))"
},
{
"identifier": "AccountAddress",
"path": "contracts_unified/library/c3types.py",
"snippet": "class SignedInstrumentAmount(abi.NamedTuple):\nclass LiquidationFactors(abi.NamedTuple):\nclass InstrumentListElement(abi.NamedTuple):\nclass UserInstrumentData(abi.NamedTuple):\nclass OnChainOrderData(abi.NamedTuple):\nclass WormholeAddress(abi.NamedTuple):\nclass DecodedWormholePayload(abi.NamedTuple):"
},
{
"identifier": "SettleExtraData",
"path": "contracts_unified/library/c3types_server.py",
"snippet": "class SettleExtraData(abi.NamedTuple):\n \"\"\"Holds server data for the settle function\"\"\"\n # (uint64,uint64,uint64,uint64,uint64,uint64,uint64,uint64,uint64,uint64)\n\n buyer_fees: abi.Field[Amount]\n buyer_to_send: abi.Field[Amount]\n buyer_to_borrow: abi.Field[Amount]\n buyer_to_repay: abi.Field[Amount]\n buyer_negative_margin: abi.Field[Boolean]\n\n seller_fees: abi.Field[Amount]\n seller_to_send: abi.Field[Amount]\n seller_to_borrow: abi.Field[Amount]\n seller_to_repay: abi.Field[Amount]\n seller_negative_margin: abi.Field[Boolean]"
},
{
"identifier": "DelegationChain",
"path": "contracts_unified/library/c3types_user.py",
"snippet": "class OperationId:\nclass SigningMethod:\nclass SignedHeader(abi.NamedTuple):\nclass OperationMetaData(abi.NamedTuple):\nclass WithdrawData(abi.NamedTuple):\nclass PoolMoveData(abi.NamedTuple):\nclass DelegationData(abi.NamedTuple):\nclass LiquidationData(abi.NamedTuple):\nclass AccountMoveData(abi.NamedTuple):\nclass OrderData(abi.NamedTuple):"
},
{
"identifier": "signed_gte",
"path": "contracts_unified/library/signed_math.py",
"snippet": "@Subroutine(TealType.uint64)\ndef signed_gte(lhs: Expr, rhs: Expr) -> Expr:\n \"\"\"Signed greater than or equal to\"\"\"\n return Seq(\n If(signed_ltz(lhs))\n .Then(If(signed_ltz(rhs), lhs >= rhs, Int(0)))\n .Else(If(signed_ltz(rhs), Int(1), lhs >= rhs))\n )"
},
{
"identifier": "signed_ltz",
"path": "contracts_unified/library/signed_math.py",
"snippet": "def signed_ltz(value: Expr) -> Expr:\n \"\"\"Signed less than zero\"\"\"\n return value & Int(0x8000000000000000)"
},
{
"identifier": "signed_neg",
"path": "contracts_unified/library/signed_math.py",
"snippet": "@Subroutine(TealType.uint64)\ndef signed_neg(value: Expr) -> Expr:\n \"\"\"Signed negation\"\"\"\n # Special case for zero because of wrap around\n return If(Not(value), value, ~value + Int(1))"
}
] | from typing import cast
from pyteal import (
ABIReturnSubroutine,
And,
Assert,
BytesGe,
BytesMul,
Expr,
Global,
If,
Int,
Itob,
MethodSignature,
Not,
OnComplete,
Or,
Seq,
abi,
)
from contracts_unified.core.c3call import (
ARG_INDEX_ACCOUNT,
ARG_INDEX_OP,
ARG_INDEX_SELECTOR,
)
from contracts_unified.core.internal.health_check import health_check
from contracts_unified.core.internal.move import collect_fees, signed_add_to_cash
from contracts_unified.core.internal.perform_pool_move import perform_pool_move
from contracts_unified.core.internal.setup import setup
from contracts_unified.core.internal.validate_sender import sender_is_sig_validator
from contracts_unified.core.state_handler.order_handler import OrderStateHandler
from contracts_unified.library.c3types import (
AccountAddress,
Amount,
Boolean,
ExcessMargin,
InstrumentId,
OnChainOrderData,
OrderId,
SignedAmount,
)
from contracts_unified.library.c3types_server import SettleExtraData
from contracts_unified.library.c3types_user import (
DelegationChain,
OperationId,
OperationMetaData,
OrderData,
)
from contracts_unified.library.signed_math import signed_gte, signed_ltz, signed_neg | 7,760 | user_op: OperationMetaData,
_delegation_chain: DelegationChain,
server_args: SettleExtraData,
opup_budget: Amount,
) -> Expr:
"""Settles two orders
Arguments:
add_order_txn (ApplicationCallTransaction): The previous add_order transaction in this group that added the sell order to the order book.
buy_account (AccountAddress): The buyer user's account address.
user_op (OperationMetaData): Operation metadata containing buyer order data.
_delegation_chain (DelegationChain): Delegation chain. Unused.
server_args (SettleExtraData): Extra data for the settle operation.
opup_budget (Amount): Additional computation budget to allocate to this transaction.
"""
abi_false = abi.Bool()
add_order_op = OperationMetaData()
add_order_data = abi.make(abi.DynamicBytes)
buy_order = OrderData()
sell_order = OrderData()
sell_account = AccountAddress()
buy_order_id = abi.make(OrderId)
sell_order_id = abi.make(OrderId)
buy_order_onchain = OnChainOrderData()
sell_order_onchain = OnChainOrderData()
# Amounts for each order's buy/sell side
buyer_sell_amount = Amount()
buyer_buy_amount = Amount()
seller_sell_amount = Amount()
seller_buy_amount = Amount()
# Remaining amounts for each order's buy/sell side
buyer_sell_remaining = Amount()
buyer_borrow_remaining = Amount()
buyer_repay_remaining = Amount()
seller_sell_remaining = Amount()
seller_borrow_remaining = Amount()
seller_repay_remaining = Amount()
# New remaining amounts for each order's buy/sell side
buyer_new_sell_remaining = Amount()
buyer_new_borrow_remaining = Amount()
buyer_new_repay_remaining = Amount()
seller_new_sell_remaining = Amount()
seller_new_borrow_remaining = Amount()
seller_new_repay_remaining = Amount()
buyer_new_order_onchain = OnChainOrderData()
seller_new_order_onchain = OnChainOrderData()
buyer_buy_instrument = InstrumentId()
buyer_sell_instrument = InstrumentId()
seller_buy_instrument = InstrumentId()
seller_sell_instrument = InstrumentId()
buyer_to_send = Amount()
seller_to_send = Amount()
buyer_to_borrow = Amount()
seller_to_borrow = Amount()
buyer_to_repay = Amount()
seller_to_repay = Amount()
buyer_buy_delta = Amount()
seller_buy_delta = Amount()
buyer_sell_delta = Amount()
seller_sell_delta = Amount()
neg_borrow = SignedAmount()
buyer_fees = Amount()
seller_fees = Amount()
buyer_old_health = ExcessMargin()
buyer_health = ExcessMargin()
seller_old_health = ExcessMargin()
seller_health = ExcessMargin()
buyer_negative_margin = Boolean()
seller_negative_margin = Boolean()
return Seq(
setup(opup_budget.get()),
# Set constants
abi_false.set(Int(0)),
# Validate sender is a user proxy
cast(Expr, sender_is_sig_validator()),
# Extract the buy order
user_op.operation.use(lambda op_data:
Seq(
buy_order.decode(op_data.get()),
buy_order.operation.use(lambda op: Assert(op.get() == OperationId.Settle)),
buy_order.account.use(lambda acc: Assert(acc.get() == buy_account.get())),
)
),
# Add the order to the order book
cast(Expr, OrderStateHandler.add_order(buy_order)),
# Validate the sell order
Assert(add_order_txn.get().application_id() == Global.current_application_id()),
Assert(add_order_txn.get().on_completion() == OnComplete.NoOp),
Assert(add_order_txn.get().application_args.length() == ADD_ORDER_ARG_COUNT),
Assert(add_order_txn.get().application_args[ARG_INDEX_SELECTOR] == ADD_ORDER_SIG),
# Get the sell order
sell_account.decode(add_order_txn.get().application_args[ARG_INDEX_ACCOUNT]),
| """
Implements Core contract method for settling a pair of orders.
"""
ADD_ORDER_SIG = MethodSignature("add_order(address,((address,byte[32],uint64),byte[],byte[],uint8,byte[],address,byte[]),((address,byte[32],uint64),byte[],byte[],uint8,byte[],address,byte[])[],uint64)void")
ADD_ORDER_ARG_COUNT = Int(5)
MAX_FEES_DIVISOR = Int(40)
@ABIReturnSubroutine
def add_order(
# NOTE: Any update on this function must update ADD_ORDER_SIG and ADD_ORDER_ARG_COUNT above
account: AccountAddress,
user_op: OperationMetaData,
_delegation_chain: DelegationChain,
opup_budget: Amount,
) -> Expr:
"""Adds an order to the order book
Arguments:
account (AccountAddress): User's account address.
user_op (OperationMetaData): Operation metadata containing order data.
_delegation_chain (DelegationChain): Delegation chain. Unused.
opup_budget (Amount): Additional computation budget to allocate to this transaction.
"""
order = OrderData()
return Seq(
setup(opup_budget.get()),
# Validate signature validator' call
cast(Expr, sender_is_sig_validator()),
# Get order from user_op.data
user_op.operation.use(lambda op_data:
Seq(
order.decode(op_data.get()),
order.operation.use(lambda op: Assert(op.get() == OperationId.Settle)),
order.account.use(lambda acc: Assert(acc.get() == account.get()))
)
),
# Add order to the order book
cast(Expr, OrderStateHandler.add_order(order))
)
@ABIReturnSubroutine
def settle(
add_order_txn: abi.ApplicationCallTransaction,
buy_account: AccountAddress,
user_op: OperationMetaData,
_delegation_chain: DelegationChain,
server_args: SettleExtraData,
opup_budget: Amount,
) -> Expr:
"""Settles two orders
Arguments:
add_order_txn (ApplicationCallTransaction): The previous add_order transaction in this group that added the sell order to the order book.
buy_account (AccountAddress): The buyer user's account address.
user_op (OperationMetaData): Operation metadata containing buyer order data.
_delegation_chain (DelegationChain): Delegation chain. Unused.
server_args (SettleExtraData): Extra data for the settle operation.
opup_budget (Amount): Additional computation budget to allocate to this transaction.
"""
abi_false = abi.Bool()
add_order_op = OperationMetaData()
add_order_data = abi.make(abi.DynamicBytes)
buy_order = OrderData()
sell_order = OrderData()
sell_account = AccountAddress()
buy_order_id = abi.make(OrderId)
sell_order_id = abi.make(OrderId)
buy_order_onchain = OnChainOrderData()
sell_order_onchain = OnChainOrderData()
# Amounts for each order's buy/sell side
buyer_sell_amount = Amount()
buyer_buy_amount = Amount()
seller_sell_amount = Amount()
seller_buy_amount = Amount()
# Remaining amounts for each order's buy/sell side
buyer_sell_remaining = Amount()
buyer_borrow_remaining = Amount()
buyer_repay_remaining = Amount()
seller_sell_remaining = Amount()
seller_borrow_remaining = Amount()
seller_repay_remaining = Amount()
# New remaining amounts for each order's buy/sell side
buyer_new_sell_remaining = Amount()
buyer_new_borrow_remaining = Amount()
buyer_new_repay_remaining = Amount()
seller_new_sell_remaining = Amount()
seller_new_borrow_remaining = Amount()
seller_new_repay_remaining = Amount()
buyer_new_order_onchain = OnChainOrderData()
seller_new_order_onchain = OnChainOrderData()
buyer_buy_instrument = InstrumentId()
buyer_sell_instrument = InstrumentId()
seller_buy_instrument = InstrumentId()
seller_sell_instrument = InstrumentId()
buyer_to_send = Amount()
seller_to_send = Amount()
buyer_to_borrow = Amount()
seller_to_borrow = Amount()
buyer_to_repay = Amount()
seller_to_repay = Amount()
buyer_buy_delta = Amount()
seller_buy_delta = Amount()
buyer_sell_delta = Amount()
seller_sell_delta = Amount()
neg_borrow = SignedAmount()
buyer_fees = Amount()
seller_fees = Amount()
buyer_old_health = ExcessMargin()
buyer_health = ExcessMargin()
seller_old_health = ExcessMargin()
seller_health = ExcessMargin()
buyer_negative_margin = Boolean()
seller_negative_margin = Boolean()
return Seq(
setup(opup_budget.get()),
# Set constants
abi_false.set(Int(0)),
# Validate sender is a user proxy
cast(Expr, sender_is_sig_validator()),
# Extract the buy order
user_op.operation.use(lambda op_data:
Seq(
buy_order.decode(op_data.get()),
buy_order.operation.use(lambda op: Assert(op.get() == OperationId.Settle)),
buy_order.account.use(lambda acc: Assert(acc.get() == buy_account.get())),
)
),
# Add the order to the order book
cast(Expr, OrderStateHandler.add_order(buy_order)),
# Validate the sell order
Assert(add_order_txn.get().application_id() == Global.current_application_id()),
Assert(add_order_txn.get().on_completion() == OnComplete.NoOp),
Assert(add_order_txn.get().application_args.length() == ADD_ORDER_ARG_COUNT),
Assert(add_order_txn.get().application_args[ARG_INDEX_SELECTOR] == ADD_ORDER_SIG),
# Get the sell order
sell_account.decode(add_order_txn.get().application_args[ARG_INDEX_ACCOUNT]), | add_order_op.decode(add_order_txn.get().application_args[ARG_INDEX_OP]), | 1 | 2023-11-17 20:54:15+00:00 | 12k |
gunderson-dettmer/CE2OCF | CE2OCF/datamap/parsers.py | [
{
"identifier": "__version__",
"path": "CE2OCF/__about__.py",
"snippet": ""
},
{
"identifier": "traverse_datamap",
"path": "CE2OCF/datamap/crawler.py",
"snippet": "def traverse_datamap(\n datamap: dict[str, Any] | BaseModel | str | list | FieldPostProcessorModel,\n field_name: str | None,\n ce_objs: list[ContractExpressVarObj],\n post_processor: Callable | None = None,\n iteration: int | None = None,\n value_overrides: dict[str, Any] | None = None,\n fail_on_missing_variable: bool = False,\n drop_null_leaves: bool = True,\n) -> dict[str, Any] | str | bool | float | int | list | None:\n \"\"\"\n Recursively traverse the CE-2-OCF datamap and generate the desired object.\n\n Args:\n drop_null_leaves: If True, don't retain leaf keys where value is null / None\n fail_on_missing_variable: If set to true, if any CE variable is NOT found, the function will\n raise a ValueError. If set to False, you'll get a None. Helpful for debugging or\n mission-critical applications where you don't want silent failures where you get a\n resulting dictionary from the datamap, but it's missing values.\n value_overrides: If this is populated, check to see if leaf value variable name is in this dict, and if so, use\n value provided in dict. Useful in iterative loops where we want to lock value of subsequent\n iterations to first loop. Once this has been provided at a level within the datamap, all\n children of that specific field (if it's a nested dict) will receive the same overrides unless\n overriden by a child tree.\n iteration: Where we want to parse CE objs for multiple repetitions of the same values, set iteration to indicate\n which iteration to look at. Warning - once specified, all recursive calls from the first call to have\n iteration defined will receive the same iteration value.\n datamap (Union[Dict[str, Any], BaseModel]): The current level of the datamap.\n field_name: Field name we're looking for.\n post_processor: A function to process the extracted value for field_name. Expects two args, raw value and the\n ce_json list\n ce_objs (List[Any]): The list of objects to extract values from.\n\n Returns:\n Dict[str, Any]: The resulting object.\n \"\"\"\n\n logger.debug(f\"\\n\\n* --- Traversing field {field_name} iteration {iteration}\")\n logger.debug(f\"\\tOverrides: {value_overrides}\")\n if post_processor is not None:\n logger.debug(f\"\\tPost processor registered for field {field_name}: {post_processor}\")\n else:\n logger.debug(f\"\\tNo post processor registered for field {field_name}\")\n\n if value_overrides is None:\n value_overrides = {}\n\n result: str | bool | int | float | dict | list | None = None\n\n if isinstance(datamap, str):\n result = handle_string_datamap(\n datamap,\n field_name,\n ce_objs,\n post_processor=post_processor,\n iteration=iteration,\n value_overrides=value_overrides,\n fail_on_missing_variable=fail_on_missing_variable,\n )\n\n elif isinstance(datamap, (int, float, bool)):\n result = datamap\n elif isinstance(datamap, list):\n result = handle_list_datamap(datamap, field_name, ce_objs, iteration=iteration, value_overrides=value_overrides)\n elif isinstance(datamap, dict):\n result = handle_dict_datamap(\n datamap,\n field_name,\n ce_objs,\n iteration=iteration,\n value_overrides=value_overrides,\n fail_on_missing_variable=fail_on_missing_variable,\n )\n elif isinstance(\n datamap,\n (\n OverridableStringField,\n OverridableFloatField,\n OverridableBoolField,\n OverridableIntField,\n ),\n ):\n result = handle_overridable_datamap(datamap)\n if not isinstance(result, bool):\n result = str(result)\n\n elif issubclass(datamap.__class__, FieldPostProcessorModel):\n\n # RepeatableDataMap is a sublass of FieldPostProcessorModel, so test for that here...\n if issubclass(datamap.__class__, RepeatableDataMap):\n logger.debug(f\"{datamap} is subclass of RepeatableDataMap\")\n result = handle_repeatable_model_datamap(\n datamap, # noqa: typing has trouble interpreting the implications of issubclass... ignore warning\n field_name,\n ce_objs,\n value_overrides=value_overrides,\n fail_on_missing_variable=fail_on_missing_variable,\n drop_null_leaves=drop_null_leaves,\n )\n\n # And, if we're not looking at a RepeatableDataMap, use FieldPostProcessorModel regular logic\n else:\n logger.debug(\"Datamap is not a subclass of RepeatableDataMap\")\n result = traverse_field_post_processor_model(\n datamap,\n ce_objs,\n iteration=iteration,\n value_overrides=value_overrides,\n fail_on_missing_variable=fail_on_missing_variable,\n )\n\n elif issubclass(datamap.__class__, BaseModel):\n result = handle_base_model_datamap(\n datamap,\n field_name,\n ce_objs,\n iteration=iteration,\n value_overrides=value_overrides,\n fail_on_missing_variable=fail_on_missing_variable,\n drop_null_leaves=drop_null_leaves,\n )\n elif datamap is None:\n logger.warning(\"Datamap was None\")\n\n else:\n logger.error(f\"Unexpected value for datamap: {datamap} {type(datamap)}\")\n\n if post_processor is not None:\n logger.debug(\n f\"\\tXXX - Datamap with name {field_name} has a postprocessor for this field... with initial value: {result}\"\n )\n result = post_processor(result, ce_objs)\n logger.debug(f\"Post-processed value: {result}\")\n\n if result == {}:\n return None\n\n return result"
},
{
"identifier": "DEFAULT_CE_TO_OCF_DATAMAP_PREFERRED_STOCK_LEGEND_ONLY_PATH",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "DEFAULT_CE_TO_OCF_DATAMAP_PREFERRED_STOCK_LEGEND_ONLY_PATH = (\n DEFAULTS_PATH / \"ce_to_ocf_datamap_preferred_stock_legend_only.json\"\n)"
},
{
"identifier": "DEFAULT_CE_TO_OCF_PREFERRED_STOCK_CLASS_ONLY_PATH",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "DEFAULT_CE_TO_OCF_PREFERRED_STOCK_CLASS_ONLY_PATH = DEFAULTS_PATH / \"ce_to_ocf_preferred_stock_class_only.json\""
},
{
"identifier": "load_ce_to_ocf_issuer_datamap",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "def load_ce_to_ocf_issuer_datamap(source_json: Optional[Path] = None) -> IssuerDataMap:\n if source_json is None:\n source_json = DEFAULT_CE_TO_OCF_ISSUER_ONLY_PATH\n return IssuerDataMap.parse_file(source_json)"
},
{
"identifier": "load_ce_to_ocf_stakeholder_datamap",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "def load_ce_to_ocf_stakeholder_datamap(source_json: Optional[Path] = None) -> RepeatableStockholderDataMap:\n \"\"\"\n Loads a RepeatableStockholderDataMap from a json configuration file at source_json. Defaults to the defaults in\n config/defaults.\n\n Args: source_json: Json configuration file mapping ocf fields to ce json data fields. Defaults to\n DEFAULT_CE_TO_OCF_STOCKHOLDERS_ONLY_PATH\n\n Returns: RepeatableStockholderDataMap\n\n \"\"\"\n if source_json is None:\n source_json = DEFAULT_CE_TO_OCF_STOCKHOLDERS_ONLY_PATH\n\n return RepeatableStockholderDataMap.parse_file(source_json)"
},
{
"identifier": "load_ce_to_ocf_stock_class_datamap",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "def load_ce_to_ocf_stock_class_datamap(source_json: Optional[Path] = None) -> StockClassDataMap:\n \"\"\"\n Loads a StockClassDataMap from a json configuration file at source_json. Defaults to the defaults in\n config/defaults. WARNING - DEFAULT IS FOR COMMON\n\n Args: source_json: son configuration file mapping ocf fields to ce json data fields. Defaults to\n DEFAULT_CE_TO_OCF_COMMON_STOCK_CLASS_ONLY_PATH\n\n Returns: StockClassDataMap\n\n \"\"\"\n if source_json is None:\n source_json = DEFAULT_CE_TO_OCF_COMMON_STOCK_CLASS_ONLY_PATH\n return StockClassDataMap.parse_file(source_json)"
},
{
"identifier": "load_ce_to_ocf_stock_legend_datamap",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "def load_ce_to_ocf_stock_legend_datamap(source_json: Optional[Path] = None) -> StockLegendDataMap:\n \"\"\"\n Loads a StockLegendDataMap from a json configuration file at source_json. Defaults to the defaults in\n config/defaults. WARNING - DEFAULT IS FOR GUNDERSON COMMON LEGENDS\n\n Args: source_json: Json configuration file mapping ocf fields to ce json data fields. Defaults to\n DEFAULT_CE_TO_OCF_DATAMAP_COMMON_STOCK_LEGEND_ONLY_PATH\n\n Returns: StockLegendDataMap\n\n \"\"\"\n if source_json is None:\n source_json = DEFAULT_CE_TO_OCF_DATAMAP_COMMON_STOCK_LEGEND_ONLY_PATH\n\n return StockLegendDataMap.parse_file(source_json)"
},
{
"identifier": "load_ce_to_ocf_stock_plan_datamap",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "def load_ce_to_ocf_stock_plan_datamap(source_json: Optional[Path] = None) -> StockPlanDataMap:\n \"\"\"\n Loads a StockPlanDataMap from a json configuration file at source_json. Defaults to the default datamap in\n config/defaults.\n\n :param source_json:Json configuration file mapping ocf fields to ce json data fields. Defaults to\n DEFAULT_CE_TO_OCF_STOCK_PLAN_ONLY_PATH\n\n :return: StockPlanDataMap\n \"\"\"\n\n if source_json is None:\n source_json = DEFAULT_CE_TO_OCF_STOCK_PLAN_ONLY_PATH\n\n return StockPlanDataMap.parse_file(source_json)"
},
{
"identifier": "load_ce_to_ocf_vested_issuances_datamap",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "def load_ce_to_ocf_vested_issuances_datamap(\n source_json: Optional[Path] = None,\n) -> RepeatableFullyVestedStockIssuanceDataMap:\n \"\"\"\n Loads a RepeatableFullyVestedStockIssuanceDataMap from a json configuration file at source_json. Defaults to\n the defaults in config/defaults. Meant for use with issuances that don't vest. Typically, founder preferred.\n\n Args: source_json: Json configuration file mapping ocf fields to ce json data fields. Defaults to\n DEFAULT_CE_TO_OCF_PREFERRED_STOCK_ISSUANCE_ONLY_PATH\n\n Returns: RepeatableFullyVestedStockIssuanceDataMap\n \"\"\"\n if source_json is None:\n source_json = DEFAULT_CE_TO_OCF_PREFERRED_STOCK_ISSUANCE_ONLY_PATH\n\n return RepeatableFullyVestedStockIssuanceDataMap.parse_file(source_json)"
},
{
"identifier": "load_ce_to_ocf_vesting_issuances_datamap",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "def load_ce_to_ocf_vesting_issuances_datamap(\n source_json: Optional[Path] = None,\n) -> RepeatableVestingStockIssuanceDataMap:\n \"\"\"\n Loads a RepeatableVestingStockIssuanceDataMap from a json configuration file at source_json. Defaults to\n the defaults in config/defaults. Meant for use with issuances that can vest. Typically founder common.\n\n Args: source_json: Json configuration file mapping ocf fields to ce json data fields. Defaults to\n DEFAULT_CE_TO_OCF_COMMON_STOCK_ISSUANCE_ONLY_PATH\n\n Returns: RepeatableVestingStockIssuanceDataMap\n \"\"\"\n if source_json is None:\n source_json = DEFAULT_CE_TO_OCF_COMMON_STOCK_ISSUANCE_ONLY_PATH\n\n return RepeatableVestingStockIssuanceDataMap.parse_file(source_json)"
},
{
"identifier": "load_vesting_events_driving_enums_datamap",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "def load_vesting_events_driving_enums_datamap(\n source_jsons: Optional[Path] = None,\n) -> RepeatableVestingEventDriversDataMap:\n \"\"\"\n Loads a RepeatableVestingEventDriversDataMap from data map in source_jsons path. If none provided, use the\n default datamap in DEFAULT_CE_ENUMS_TO_OCF_VESTING_SCHEDULE_ONLY_PATH.\n\n Args:\n source_jsons: Json configuration file mapping ocf fields to ce json data fields. Defaults to\n DEFAULT_CE_ENUMS_TO_OCF_VESTING_EVENTS_ONLY_PATH\n\n Returns: RepeatableVestingEventDriversDataMap\n\n \"\"\"\n if source_jsons is None:\n source_jsons = DEFAULT_CE_ENUMS_TO_OCF_VESTING_EVENTS_ONLY_PATH\n return RepeatableVestingEventDriversDataMap.parse_file(source_jsons)"
},
{
"identifier": "load_vesting_schedule_driving_enums_datamap",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "def load_vesting_schedule_driving_enums_datamap(\n source_jsons: Optional[Path] = None,\n) -> RepeatableVestingScheduleDriversDataMap:\n \"\"\"\n Loads a RepeatableVestingScheduleDriversDataMap from data map in source_jsons path. If none provided, use the\n default datamap in DEFAULT_CE_ENUMS_TO_OCF_VESTING_SCHEDULE_ONLY_PATH.\n\n Args:\n source_jsons: Json configuration file mapping ocf fields to ce json data fields. Defaults to\n DEFAULT_CE_ENUMS_TO_OCF_VESTING_SCHEDULE_ONLY_PATH\n\n Returns: RepeatableVestingScheduleDriversDataMap\n \"\"\"\n if source_jsons is None:\n source_jsons = DEFAULT_CE_ENUMS_TO_OCF_VESTING_SCHEDULE_ONLY_PATH\n return RepeatableVestingScheduleDriversDataMap.parse_file(source_jsons)"
},
{
"identifier": "FullyVestedStockIssuanceDataMap",
"path": "CE2OCF/ocf/datamaps.py",
"snippet": "class FullyVestedStockIssuanceDataMap(FieldPostProcessorModel):\n id: Union[str, OverridableStringField] = Field(default_factory=lambda: {\"static\": uuid.uuid4().__str__()})\n date: Union[str, OverridableStringField] = Field(\n default_factory=lambda: {\"static\": datetime.now(timezone.utc).date().isoformat()}\n )\n object_type: Union[str, OverridableStringField] = Field(default_factory=lambda: {\"static\": \"TX_STOCK_ISSUANCE\"})\n security_id: Union[str, OverridableStringField] = Field(default_factory=lambda: {\"static\": uuid.uuid4().__str__()})\n custom_id: Union[str, OverridableStringField]\n comments: list[Union[str, OverridableStringField]]\n stakeholder_id: Union[str, OverridableStringField]\n board_approval_date: Union[str, OverridableStringField] = Field(\n default_factory=lambda: {\"static\": datetime.now(timezone.utc).date().isoformat()}\n )\n consideration_text: Union[str, OverridableStringField]\n security_law_exemptions: list = []\n stock_class_id: Union[str, OverridableStringField]\n share_price: CurrencyDatamap\n quantity: Union[str, OverridableStringField]\n cost_basis: CurrencyDatamap\n stock_legend_ids: list[Union[str, OverridableStringField]]"
},
{
"identifier": "IssuerDataMap",
"path": "CE2OCF/ocf/datamaps.py",
"snippet": "class IssuerDataMap(FieldPostProcessorModel):\n id: Union[str, OverridableStringField] = Field(default_factory=lambda: {\"static\": uuid.uuid4().__str__()})\n legal_name: Union[str, OverridableStringField]\n dba: Union[str, OverridableStringField]\n country_of_formation: Union[str, OverridableStringField]\n country_subdivision_of_formation: Union[str, OverridableStringField]\n formation_date: Union[str, OverridableStringField] = Field(\n default_factory=lambda: {\"static\": datetime.now(timezone.utc).date().isoformat()}\n )\n object_type: OverridableStringField = Field(default_factory=lambda: {\"static\": \"ISSUER\"})\n tax_ids: Optional[list[Union[str, OverridableStringField]]]\n address: AddressDataMap\n phone: Optional[PhoneDataMap]\n comments: list[Union[str, OverridableStringField]]"
},
{
"identifier": "RepeatableStockholderDataMap",
"path": "CE2OCF/ocf/datamaps.py",
"snippet": "class RepeatableStockholderDataMap(RepeatableDataMap):\n repeated_pattern: StockholderDataMap"
},
{
"identifier": "StockClassDataMap",
"path": "CE2OCF/ocf/datamaps.py",
"snippet": "class StockClassDataMap(FieldPostProcessorModel):\n id: Union[str, OverridableStringField] = Field(default_factory=lambda: {\"static\": uuid.uuid4().__str__()})\n name: Union[str, OverridableStringField]\n object_type: OverridableStringField = Field(default_factory=lambda: {\"static\": \"STOCK_CLASS\"})\n class_type: OverridableStringField\n default_id_prefix: Union[str, OverridableStringField]\n initial_shares_authorized: Union[str, OverridableStringField]\n board_approval_date: Union[str, OverridableStringField] = Field(\n default_factory=lambda: {\"static\": datetime.now(timezone.utc).date().isoformat()}\n )\n votes_per_share: Union[str, OverridableStringField]\n par_value: CurrencyDatamap\n price_per_share: CurrencyDatamap\n seniority: Union[str, OverridableStringField]\n conversion_rights: list[ConversionRightsDataMap]\n liquidation_preference_multiple: Union[str, OverridableStringField]\n participation_cap_multiple: Union[str, OverridableStringField]\n comments: list[Union[str, OverridableStringField]]"
},
{
"identifier": "StockLegendDataMap",
"path": "CE2OCF/ocf/datamaps.py",
"snippet": "class StockLegendDataMap(FieldPostProcessorModel):\n id: Union[str, OverridableStringField] = Field(default_factory=lambda: {\"static\": uuid.uuid4().__str__()})\n object_type: Union[str, OverridableStringField] = Field(default_factory=lambda: {\"static\": \"STOCK_LEGEND_TEMPLATE\"})\n comments: list[Union[str, OverridableStringField]]\n name: Union[str, OverridableStringField]\n text: Union[str, OverridableStringField]"
},
{
"identifier": "StockPlanDataMap",
"path": "CE2OCF/ocf/datamaps.py",
"snippet": "class StockPlanDataMap(FieldPostProcessorModel):\n id: Union[str, OverridableStringField] = Field(default_factory=lambda: {\"static\": uuid.uuid4().__str__()})\n object_type: Union[str, OverridableStringField] = Field(default_factory=lambda: {\"static\": \"STOCK_PLAN\"})\n plan_name: Union[\n str, OverridableStringField\n ] # We actually don't get plan name but year, so we need a post-processor\n stock_class_id: Union[str, OverridableStringField]\n board_approval_date: Union[str, OverridableStringField] = Field(\n default_factory=lambda: {\"static\": datetime.now(timezone.utc).date().isoformat()}\n )\n stockholder_approval_date: Union[str, OverridableStringField] = Field(\n default_factory=lambda: {\"static\": datetime.now(timezone.utc).date().isoformat()}\n )\n initial_shares_reserved: Union[str, OverridableStringField]\n comments: list[Union[str, OverridableStringField]]"
},
{
"identifier": "VestingScheduleInputsDataMap",
"path": "CE2OCF/ocf/datamaps.py",
"snippet": "class VestingScheduleInputsDataMap(FieldPostProcessorModel):\n \"\"\"\n This is the same as VestingEventsInputsDataMap BUT the different class definitions let us register\n different post-processors have fewer worries that someone forgets to register and/or de-register post-processors\n \"\"\"\n\n vesting_schedule: VestingDrivingEnumsDataMap"
},
{
"identifier": "VestingStockIssuanceDataMap",
"path": "CE2OCF/ocf/datamaps.py",
"snippet": "class VestingStockIssuanceDataMap(FieldPostProcessorModel):\n id: Union[str, OverridableStringField] = Field(default_factory=lambda: {\"static\": uuid.uuid4().__str__()})\n date: Union[str, OverridableStringField] = Field(\n default_factory=lambda: {\"static\": datetime.now(timezone.utc).date().isoformat()}\n )\n object_type: Union[str, OverridableStringField] = Field(default_factory=lambda: {\"static\": \"TX_STOCK_ISSUANCE\"})\n security_id: Union[str, OverridableStringField] = Field(default_factory=lambda: {\"static\": uuid.uuid4().__str__()})\n custom_id: Union[str, OverridableStringField]\n comments: list[Union[str, OverridableStringField]]\n stakeholder_id: Union[str, OverridableStringField]\n board_approval_date: Union[str, OverridableStringField] = Field(\n default_factory=lambda: {\"static\": datetime.now(timezone.utc).date().isoformat()}\n )\n consideration_text: Union[str, OverridableStringField]\n security_law_exemptions: list = []\n stock_class_id: Union[str, OverridableStringField]\n share_price: CurrencyDatamap\n quantity: Union[str, OverridableStringField]\n cost_basis: CurrencyDatamap\n stock_legend_ids: list[Union[str, OverridableStringField]]\n vesting_terms_id: Optional[Union[str, OverridableStringField]]"
},
{
"identifier": "generate_vesting_start_id",
"path": "CE2OCF/ocf/generators/ocf_id_generators.py",
"snippet": "def generate_vesting_start_id(schedule_id: str) -> str:\n return f\"{schedule_id} | Start\""
},
{
"identifier": "generate_vesting_start_event",
"path": "CE2OCF/ocf/generators/ocf_vesting_events.py",
"snippet": "def generate_vesting_start_event(\n vesting_commencement_date: datetime.date,\n issuance_id: str = \"\",\n vesting_start_condition_id: str = \"\",\n) -> dict:\n return {\n \"object_type\": \"TX_VESTING_START\",\n \"id\": uuid.uuid4().__str__(),\n \"security_id\": issuance_id,\n \"vesting_condition_id\": vesting_start_condition_id,\n \"date\": vesting_commencement_date.isoformat(),\n }"
},
{
"identifier": "ContractExpressVarObj",
"path": "CE2OCF/types/dictionaries.py",
"snippet": "class ContractExpressVarObj(TypedDict):\n name: str\n values: list[Any]\n repetition: Optional[str]"
},
{
"identifier": "VestingTypesEnum",
"path": "CE2OCF/types/enums.py",
"snippet": "class VestingTypesEnum(str, enum.Enum):\n FOUR_YR_1_YR_CLIFF = \"4yr with 1yr Cliff\"\n FOUR_YR_NO_CLIFF = \"4yr with no Cliff\"\n FULLY_VESTED = \"Fully Vested\"\n CUSTOM = \"Custom\" # We're not going to support this via OCF"
},
{
"identifier": "VariableNotFound",
"path": "CE2OCF/types/exceptions.py",
"snippet": "class VariableNotFound(Exception):\n \"\"\"\n Special exception to throw where we can't find a variable name in CE Jsons\n \"\"\"\n\n pass"
}
] | import datetime
from pathlib import Path
from typing import Callable, Literal, Optional
from CE2OCF import __version__ as version
from CE2OCF.datamap.crawler import traverse_datamap
from CE2OCF.datamap.loaders import (
DEFAULT_CE_TO_OCF_DATAMAP_PREFERRED_STOCK_LEGEND_ONLY_PATH,
DEFAULT_CE_TO_OCF_PREFERRED_STOCK_CLASS_ONLY_PATH,
load_ce_to_ocf_issuer_datamap,
load_ce_to_ocf_stakeholder_datamap,
load_ce_to_ocf_stock_class_datamap,
load_ce_to_ocf_stock_legend_datamap,
load_ce_to_ocf_stock_plan_datamap,
load_ce_to_ocf_vested_issuances_datamap,
load_ce_to_ocf_vesting_issuances_datamap,
load_vesting_events_driving_enums_datamap,
load_vesting_schedule_driving_enums_datamap,
)
from CE2OCF.ocf.datamaps import (
FullyVestedStockIssuanceDataMap,
IssuerDataMap,
RepeatableStockholderDataMap,
StockClassDataMap,
StockLegendDataMap,
StockPlanDataMap,
VestingScheduleInputsDataMap,
VestingStockIssuanceDataMap,
)
from CE2OCF.ocf.generators.ocf_id_generators import (
generate_vesting_start_id,
)
from CE2OCF.ocf.generators.ocf_vesting_events import (
generate_vesting_start_event,
)
from CE2OCF.types.dictionaries import ContractExpressVarObj
from CE2OCF.types.enums import VestingTypesEnum
from CE2OCF.types.exceptions import VariableNotFound | 8,491 | raise ValueError("We only support COMMON or PREFERRED datamaps")
ocf_stock_legend = traverse_datamap(
stock_legend_datamap,
None,
ce_jsons,
value_overrides={"PARSER_VERSION": version, **value_overrides},
fail_on_missing_variable=fail_on_missing_variable,
)
# TODO - improve type checking to check for actual target OCF schema
assert isinstance(ocf_stock_legend, dict), f"Expected ocf_stock_legend to be dict, got {type(ocf_stock_legend)}"
return ocf_stock_legend
def parse_ocf_stakeholders_from_ce_json(
ce_jsons: list[ContractExpressVarObj],
post_processors: Optional[dict[str, Callable]] = None,
clear_old_post_processors: bool = True,
fail_on_missing_variable: bool = False,
custom_datamap_path: Optional[Path] = None,
value_overrides: Optional[dict[str, str]] = None,
) -> list[dict]:
"""
By default, loads our default ce to ocf stakeholder datamap (though you can provide a path your own JSON datamap)
and uses it to parse a list of valid OCF stakeholder objects from a list of ce_json objects.
Args:
ce_jsons: List of CE Jsons matching schema defined in ContractExpressVarObj
clear_old_post_processors: If True, unregister all existing handlers to RepeatableStockholderDataMap before
registering new post processors. Good idea generally to ensure no handlers
remain registered from elsewhere in your code base and is True by default.
post_processors (optional): A dictionary mapping stakeholder object data field names to functions which
you want to run on the parsed data - e.g. if your questionnaire has data that
needs to be formatted or parsed.
fail_on_missing_variable: Set to True if you want to get an error if any data fields are missing.
custom_datamap_path: If you want to use a custom datamap, provide path to json file
value_overrides: If provided, inject this variable value lookup into parser which will override anything in CE
Returns: List of valid ocf stakeholder objects
"""
if clear_old_post_processors:
RepeatableStockholderDataMap.clear_handlers()
if post_processors is not None:
RepeatableStockholderDataMap.register_handlers(post_processors)
if value_overrides is None:
value_overrides = {}
stakeholder_datamap = load_ce_to_ocf_stakeholder_datamap(custom_datamap_path)
stockholders_ocf = traverse_datamap(
stakeholder_datamap,
None,
ce_jsons,
value_overrides={"PARSER_VERSION": version, **value_overrides},
fail_on_missing_variable=fail_on_missing_variable,
)
# TODO - improve type checking to check for actual target OCF schema
assert isinstance(stockholders_ocf, list), (
f"Expected stockholders_ocf to be list of dicts, " f"got {type(stockholders_ocf)}"
)
return stockholders_ocf
def parse_ocf_stock_issuances_from_ce_json(
ce_jsons: list[ContractExpressVarObj],
fail_on_missing_variable: bool = False,
common_post_processors: Optional[dict[str, Callable]] = None,
preferred_post_processors: Optional[dict[str, Callable]] = None,
common_datamap_path: Optional[Path] = None,
preferred_datamap_path: Optional[Path] = None,
common_value_overrides: Optional[dict[str, str]] = None,
preferred_value_overrides: Optional[dict[str, str]] = None,
clear_old_post_processors: bool = True,
) -> list[dict]:
"""
Args:
ce_jsons:
fail_on_missing_variable:
common_post_processors:
preferred_post_processors:
common_datamap_path:
common_value_overrides:
preferred_datamap_path:
preferred_value_overrides:
clear_old_post_processors:
Returns:
"""
def drop_fully_vested_vest_term_id(val, ce_jsons) -> str:
"""
Raise a VariableNotFound exception if fully vested which will cause
the key to be dropped entirely.
Args:
val: Variable name
ce_jsons: List of ce jsons
Returns: Original value or, if fully vested, throw an error
"""
if val.split("/")[0] == "Fully Vested":
raise VariableNotFound
else:
return val
if common_value_overrides is None:
common_value_overrides = {}
if preferred_value_overrides is None:
preferred_value_overrides = {}
if clear_old_post_processors:
VestingStockIssuanceDataMap.clear_handlers()
|
def parse_ocf_issuer_from_ce_jsons(
ce_jsons: list[ContractExpressVarObj],
post_processors: Optional[dict[str, Callable]] = None,
fail_on_missing_variable: bool = False,
custom_datamap_path: Optional[Path] = None,
value_overrides: Optional[dict[str, str]] = None,
clear_old_post_processors: bool = True,
) -> dict:
"""
By default, loads our default ce to ocf issuer datamap (though you can provide a path your own JSON datamap) and
uses it to parse a valid OCF issuer object from a list of ce_json objects.
Args:
ce_jsons: List of CE Jsons matching schema defined in ContractExpressVarObj
post_processors (optional): A dictionary mapping stock class object data field names to functions which
you want to run on the parsed data - e.g. if your questionnaire has data that
needs to be formatted or parsed.
fail_on_missing_variable: Set to True if you want to get an error if any data fields are missing.
custom_datamap_path: If you want to use a custom datamap, provide path to json file
value_overrides: If provided, pass to underlying datamap crawler to override specified lookup values in dict
clear_old_post_processors: If True, unregister all handlers for IssuerDataMap before registering any provided
as post_processors
Returns: Valid ocf issuer json
"""
if clear_old_post_processors:
IssuerDataMap.clear_handlers()
if post_processors is not None:
IssuerDataMap.register_handlers(post_processors)
if value_overrides is None:
value_overrides = {}
issuer_datamap = load_ce_to_ocf_issuer_datamap(custom_datamap_path)
parsed_issuer_ocf = traverse_datamap(
issuer_datamap,
None,
ce_jsons,
value_overrides={"PARSER_VERSION": version, **value_overrides},
fail_on_missing_variable=fail_on_missing_variable,
)
# TODO - improve type checking to check for actual target OCF schema
assert isinstance(parsed_issuer_ocf, dict), f"Expected parsed_issuer_ocf to be dict, got {type(parsed_issuer_ocf)}"
return parsed_issuer_ocf
def parse_stock_plan_from_ce_jsons(
ce_jsons: list[ContractExpressVarObj],
post_processors: Optional[dict[str, Callable]] = None,
fail_on_missing_variable: bool = False,
custom_datamap_path: Optional[Path] = None,
value_overrides: Optional[dict[str, str]] = None,
clear_old_post_processors: bool = True,
) -> dict:
"""
By default, loads our default ce to ocf stock plan datamap (though you can provide a path your own JSON datamap)
and uses it to parse a valid OCF stock plan object from a list of ce_json objects.
:param ce_jsons:
:param post_processors:
:param fail_on_missing_variable:
:param custom_datamap_path:
:param value_overrides:
:param clear_old_post_processors:
:return: Valid OCF stock plan
"""
if clear_old_post_processors:
StockPlanDataMap.clear_handlers()
if post_processors is not None:
StockPlanDataMap.register_handlers(post_processors)
if value_overrides is None:
value_overrides = {}
stock_plan_datamap = load_ce_to_ocf_stock_plan_datamap(custom_datamap_path)
stock_plan_ocf = traverse_datamap(
stock_plan_datamap,
None,
ce_jsons,
value_overrides={"PARSER_VERSION": version, **value_overrides},
fail_on_missing_variable=fail_on_missing_variable,
)
# TODO - improve type checking to check for actual target OCF schema
assert isinstance(stock_plan_ocf, dict), f"Expected parsed_issuer_ocf to be dict, got {type(stock_plan_ocf)}"
return stock_plan_ocf
def parse_ocf_stock_class_from_ce_jsons(
ce_jsons: list[ContractExpressVarObj],
common_or_preferred: Literal["COMMON", "PREFERRED"] = "COMMON",
post_processors: Optional[dict[str, Callable]] = None,
fail_on_missing_variable: bool = False,
custom_datamap_path: Optional[Path] = None,
value_overrides: Optional[dict[str, str]] = None,
clear_old_post_processors: bool = True,
) -> dict:
"""
By default, loads our default ce to ocf common stock class datamap (though you can provide a path your own JSON
datamap) and uses it to parse a valid OCF issuer object from a list of ce_json objects. You can change the
common_or_preferred argument to "PREFERRED" to get a preferred stock class.
Args:
ce_jsons: List of CE Jsons matching schema defined in ContractExpressVarObj
common_or_preferred: Set to "COMMON" (default) to parse common stock and "PREFERRED" to parse preferred stock
post_processors (optional): A dictionary mapping stock class object data field names to functions which
you want to run on the parsed data - e.g. if your questionnaire has data that
needs to be formatted or parsed.
fail_on_missing_variable: Set to True if you want to get an error if any data fields are missing.
custom_datamap_path: If you want to use a custom datamap, provide path to json file
value_overrides: If provided, inject this into datamapper and look up values here first. If found, don't
check CE
Returns: Valid ocf stock class json
"""
if clear_old_post_processors:
StockClassDataMap.clear_handlers()
if post_processors is not None:
StockClassDataMap.register_handlers(post_processors)
if value_overrides is None:
value_overrides = {}
if common_or_preferred == "COMMON":
stock_class_datamap = load_ce_to_ocf_stock_class_datamap(custom_datamap_path)
elif common_or_preferred == "PREFERRED":
stock_class_datamap = load_ce_to_ocf_stock_class_datamap(
custom_datamap_path if custom_datamap_path else DEFAULT_CE_TO_OCF_PREFERRED_STOCK_CLASS_ONLY_PATH
)
else:
raise ValueError("We only support COMMON or PREFERRED datamaps")
stock_class_ocf = traverse_datamap(
stock_class_datamap,
None,
ce_jsons,
value_overrides={"PARSER_VERSION": version, **value_overrides},
fail_on_missing_variable=fail_on_missing_variable,
)
# TODO - improve type checking to check for actual target OCF schema
assert isinstance(stock_class_ocf, dict), f"Expected stock_class_ocf to be dict, got {type(stock_class_ocf)}"
return stock_class_ocf
def parse_ocf_stock_legend_from_ce_jsons(
ce_jsons: list[ContractExpressVarObj],
common_or_preferred: Literal["COMMON", "PREFERRED"] = "COMMON",
post_processors: Optional[dict[str, Callable]] = None,
fail_on_missing_variable: bool = False,
custom_datamap_path: Optional[Path] = None,
value_overrides: Optional[dict[str, str]] = None,
clear_old_post_processors: bool = True,
) -> dict:
"""
By default, loads our default ce to ocf common stock legend datamap (though you can provide a path your own JSON
datamap) and uses it to parse a valid OCF stock legend object from a list of ce_json objects. You can change the
common_or_preferred argument to "PREFERRED" to get a preferred stock legends.
Args:
ce_jsons: List of CE Jsons matching schema defined in ContractExpressVarObj
common_or_preferred: Set to "COMMON" (default) to parse common legends and "PREFERRED" to parse preferred legend
post_processors (optional): A dictionary mapping stock legend data field names to functions which
you want to run on the parsed data
fail_on_missing_variable: Set to True if you want to get an error if any data fields are missing.
custom_datamap_path: If you want to use a custom datamap, provide path to json file
value_overrides: If provided, inject this variable value lookup into parser which will override anything in CE
Returns: Valid ocf stock legend json
"""
if clear_old_post_processors:
StockLegendDataMap.clear_handlers()
if post_processors is not None:
StockLegendDataMap.register_handlers(post_processors)
if value_overrides is None:
value_overrides = {}
if common_or_preferred == "COMMON":
stock_legend_datamap = load_ce_to_ocf_stock_legend_datamap(custom_datamap_path)
elif common_or_preferred == "PREFERRED":
stock_legend_datamap = load_ce_to_ocf_stock_legend_datamap(
custom_datamap_path if custom_datamap_path else DEFAULT_CE_TO_OCF_DATAMAP_PREFERRED_STOCK_LEGEND_ONLY_PATH
)
else:
raise ValueError("We only support COMMON or PREFERRED datamaps")
ocf_stock_legend = traverse_datamap(
stock_legend_datamap,
None,
ce_jsons,
value_overrides={"PARSER_VERSION": version, **value_overrides},
fail_on_missing_variable=fail_on_missing_variable,
)
# TODO - improve type checking to check for actual target OCF schema
assert isinstance(ocf_stock_legend, dict), f"Expected ocf_stock_legend to be dict, got {type(ocf_stock_legend)}"
return ocf_stock_legend
def parse_ocf_stakeholders_from_ce_json(
ce_jsons: list[ContractExpressVarObj],
post_processors: Optional[dict[str, Callable]] = None,
clear_old_post_processors: bool = True,
fail_on_missing_variable: bool = False,
custom_datamap_path: Optional[Path] = None,
value_overrides: Optional[dict[str, str]] = None,
) -> list[dict]:
"""
By default, loads our default ce to ocf stakeholder datamap (though you can provide a path your own JSON datamap)
and uses it to parse a list of valid OCF stakeholder objects from a list of ce_json objects.
Args:
ce_jsons: List of CE Jsons matching schema defined in ContractExpressVarObj
clear_old_post_processors: If True, unregister all existing handlers to RepeatableStockholderDataMap before
registering new post processors. Good idea generally to ensure no handlers
remain registered from elsewhere in your code base and is True by default.
post_processors (optional): A dictionary mapping stakeholder object data field names to functions which
you want to run on the parsed data - e.g. if your questionnaire has data that
needs to be formatted or parsed.
fail_on_missing_variable: Set to True if you want to get an error if any data fields are missing.
custom_datamap_path: If you want to use a custom datamap, provide path to json file
value_overrides: If provided, inject this variable value lookup into parser which will override anything in CE
Returns: List of valid ocf stakeholder objects
"""
if clear_old_post_processors:
RepeatableStockholderDataMap.clear_handlers()
if post_processors is not None:
RepeatableStockholderDataMap.register_handlers(post_processors)
if value_overrides is None:
value_overrides = {}
stakeholder_datamap = load_ce_to_ocf_stakeholder_datamap(custom_datamap_path)
stockholders_ocf = traverse_datamap(
stakeholder_datamap,
None,
ce_jsons,
value_overrides={"PARSER_VERSION": version, **value_overrides},
fail_on_missing_variable=fail_on_missing_variable,
)
# TODO - improve type checking to check for actual target OCF schema
assert isinstance(stockholders_ocf, list), (
f"Expected stockholders_ocf to be list of dicts, " f"got {type(stockholders_ocf)}"
)
return stockholders_ocf
def parse_ocf_stock_issuances_from_ce_json(
ce_jsons: list[ContractExpressVarObj],
fail_on_missing_variable: bool = False,
common_post_processors: Optional[dict[str, Callable]] = None,
preferred_post_processors: Optional[dict[str, Callable]] = None,
common_datamap_path: Optional[Path] = None,
preferred_datamap_path: Optional[Path] = None,
common_value_overrides: Optional[dict[str, str]] = None,
preferred_value_overrides: Optional[dict[str, str]] = None,
clear_old_post_processors: bool = True,
) -> list[dict]:
"""
Args:
ce_jsons:
fail_on_missing_variable:
common_post_processors:
preferred_post_processors:
common_datamap_path:
common_value_overrides:
preferred_datamap_path:
preferred_value_overrides:
clear_old_post_processors:
Returns:
"""
def drop_fully_vested_vest_term_id(val, ce_jsons) -> str:
"""
Raise a VariableNotFound exception if fully vested which will cause
the key to be dropped entirely.
Args:
val: Variable name
ce_jsons: List of ce jsons
Returns: Original value or, if fully vested, throw an error
"""
if val.split("/")[0] == "Fully Vested":
raise VariableNotFound
else:
return val
if common_value_overrides is None:
common_value_overrides = {}
if preferred_value_overrides is None:
preferred_value_overrides = {}
if clear_old_post_processors:
VestingStockIssuanceDataMap.clear_handlers() | FullyVestedStockIssuanceDataMap.clear_handlers() | 13 | 2023-11-13 15:50:53+00:00 | 12k |
cyberark/ark-sdk-python | ark_sdk_python/cli_services/dpa/common/ark_dpa_base_policies_editor_service.py | [
{
"identifier": "ArkInquirerRender",
"path": "ark_sdk_python/args/ark_args_formatter.py",
"snippet": "class ArkInquirerRender(ConsoleRender):\n # pylint: disable=keyword-arg-before-vararg,protected-access\n def __init__(self, event_generator=None, *args, **kwargs):\n super().__init__(event_generator=event_generator, theme=ARK_INQUIRER_THEME, *args, **kwargs)\n\n def render(self, question, answers=None):\n question.answers = answers or {}\n\n if question.ignore:\n return question.default\n\n clazz = self.render_factory(question.kind)\n render = clazz(question, terminal=self.terminal, theme=self._theme, show_default=question.show_default)\n if isinstance(\n render, (inquirer.render.console._text.Text, inquirer.render.console._password.Password, inquirer.render.console._path.Path)\n ):\n render.current = ''\n self.clear_eos()\n\n try:\n a = self._event_loop(render)\n if not a and question.default:\n a = question.default\n elif not a and question.name in answers:\n a = answers[question.name]\n return a\n finally:\n print('')\n\n def _print_header(self, render):\n base = render.get_header()\n\n header = base[: self.width - 9] + '...' if len(base) > self.width - 6 else base\n default_value = '{normal} ({default})'.format(default=render.question.default, normal=self.terminal.normal)\n show_default = render.question.default and render.show_default\n header += default_value if show_default else ''\n msg_template = '{t.move_up}{t.clear_eol}{tq.brackets_color}{tq.mark_color}?{tq.brackets_color} {msg}{t.normal}'\n\n escaped_current_value = str(render.get_current_value()).replace('{', '{{').replace('}', '}}')\n self.print_str(\n f'\\n{msg_template} {escaped_current_value}',\n msg=header,\n lf=not render.title_inline,\n tq=self._theme.Question,\n )"
},
{
"identifier": "ArkISPAuth",
"path": "ark_sdk_python/auth/ark_isp_auth.py",
"snippet": "class ArkISPAuth(ArkAuth):\n def __perform_identity_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret], force: bool\n ) -> ArkToken:\n try:\n method_settings = cast(IdentityArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentity(\n username=auth_profile.username,\n password=secret.secret.get_secret_value() if secret else None,\n identity_url=method_settings.identity_url,\n mfa_type=method_settings.identity_mfa_method,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n )\n identity.auth_identity(profile, ArkSystemConfig.is_interactive() and method_settings.identity_mfa_interactive, force)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n token_lifetime = identity.session_details.token_lifetime\n if not token_lifetime:\n token_lifetime = DEFAULT_TOKEN_LIFETIME\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.Identity,\n expires_in=datetime.now() + timedelta(seconds=token_lifetime),\n refresh_token=identity.session_details.refresh_token,\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n self._logger.exception(f'Failed to authenticate to identity security platform [{str(ex)}]')\n raise ArkAuthException from ex\n\n def __perform_identity_refresh_authentication(self, profile: ArkProfile, auth_profile: ArkAuthProfile, token: ArkToken) -> ArkToken:\n try:\n method_settings = cast(IdentityArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentity(\n username=auth_profile.username,\n password=None,\n identity_url=method_settings.identity_url,\n mfa_type=method_settings.identity_mfa_method,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n load_cache=True,\n cache_profile=profile,\n )\n identity.refresh_auth_identity(profile, method_settings.identity_mfa_interactive, False)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n token_lifetime = identity.session_details.token_lifetime\n if not token_lifetime:\n token_lifetime = DEFAULT_TOKEN_LIFETIME\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.Identity,\n expires_in=datetime.now() + timedelta(seconds=token_lifetime),\n refresh_token=identity.session_details.refresh_token,\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n raise ArkAuthException('Failed to authenticate to isp via identity') from ex\n\n def __perform_identity_service_user_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret], force: bool\n ) -> ArkToken:\n try:\n if not secret:\n raise ArkException('Token secret is required for identity service user auth')\n method_settings = cast(IdentityServiceUserArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentityServiceUser(\n username=auth_profile.username,\n token=secret.secret.get_secret_value(),\n app_name=method_settings.identity_authorization_application,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n )\n identity.auth_identity(profile, force)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.IdentityServiceUser,\n expires_in=datetime.now() + timedelta(hours=4),\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n self._logger.exception(f'Failed to authenticate to identity security platform with service user [{str(ex)}]')\n raise ArkAuthException from ex\n\n @overrides\n def _perform_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret] = None, force: bool = False\n ) -> ArkToken:\n \"\"\"\n Performs authentication to the identity security platform identity tenant\n Authentication can be done with either a service user or a normal user\n Authentication Methods:\n - Identity, Default\n - IdentityServiceUser\n\n Args:\n profile (ArkProfile): _description_\n auth_profile (ArkAuthProfile): _description_\n secret (Optional[ArkSecret], optional): _description_. Defaults to None.\n force (bool, optional): _description_. Defaults to False.\n\n Raises:\n ArkAuthException: _description_\n\n Returns:\n ArkToken: _description_\n \"\"\"\n self._logger.info('Performing authentication to ISP')\n if auth_profile.auth_method in [ArkAuthMethod.Identity, ArkAuthMethod.Default]:\n return self.__perform_identity_authentication(profile, auth_profile, secret, force)\n if auth_profile.auth_method == ArkAuthMethod.IdentityServiceUser:\n return self.__perform_identity_service_user_authentication(profile, auth_profile, secret, force)\n raise ArkAuthException('Given auth method is not supported')\n\n @overrides\n def _perform_refresh_authentication(self, profile: ArkProfile, auth_profile: ArkAuthProfile, token: ArkToken) -> ArkToken:\n \"\"\"\n Refresh for isp tenant is supported only for identity\n\n Args:\n profile (ArkProfile): _description_\n auth_profile (ArkAuthProfile): _description_\n token (ArkToken): _description_\n\n Returns:\n ArkToken: _description_\n \"\"\"\n self._logger.info('Performing refresh authentication to ISP')\n if auth_profile.auth_method in [ArkAuthMethod.Identity, ArkAuthMethod.Default]:\n return self.__perform_identity_refresh_authentication(profile, auth_profile, token)\n return token\n\n @staticmethod\n @overrides\n def authenticator_name() -> str:\n return AUTH_NAME\n\n @staticmethod\n @overrides\n def authenticator_human_readable_name() -> str:\n return AUTH_HUMAN_READABLE_NAME\n\n @staticmethod\n @overrides\n def supported_auth_methods() -> List[ArkAuthMethod]:\n return AUTH_METHODS\n\n @staticmethod\n @overrides\n def default_auth_method() -> Tuple[ArkAuthMethod, ArkAuthMethodSettings]:\n return DEFAULT_AUTH_METHOD, DEFAULT_AUTH_METHOD_SETTINGS"
},
{
"identifier": "ArkServiceException",
"path": "ark_sdk_python/models/ark_exceptions.py",
"snippet": "class ArkServiceException(ArkException):\n def __init__(self, error: Any, *args: object) -> None:\n self.error = error\n super().__init__(error, *args)"
},
{
"identifier": "ArkProfile",
"path": "ark_sdk_python/models/ark_profile.py",
"snippet": "class ArkProfile(ArkModel):\n profile_name: str = Field(default='ark', alias='Profile Name', description='Profile name for storage')\n profile_description: str = Field(default='Default Ark Profile', alias='Profile Description', description='Info about the profile')\n auth_profiles: Dict[str, ArkAuthProfile] = Field(\n description='Authentication profiles configurations, map from name of the authenticator to its profile', default_factory=dict\n )\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('auth_profiles', pre=True)\n def validate_auth_profiles(cls, val):\n auth_profiles = {}\n for k, v in val.items():\n auth_profile = ArkAuthProfile.parse_obj(v)\n # Make sure that the settings are parsed with the correct class\n # Due to properties overlapping\n if 'auth_method_settings' in v:\n auth_profile.auth_method_settings = ArkAuthMethodSettingsMap[auth_profile.auth_method].parse_obj(v['auth_method_settings'])\n auth_profiles[k] = auth_profile\n return auth_profiles"
},
{
"identifier": "ArkProfileLoader",
"path": "ark_sdk_python/models/ark_profile.py",
"snippet": "class ArkProfileLoader:\n @staticmethod\n def profiles_folder() -> str:\n \"\"\"\n Retrieves the profiles folder pathname, from the environment variable when set; otherwise, from the default location.\n\n Returns:\n str: _description_\n \"\"\"\n return os.getenv('ARK_PROFILES_FOLDER', os.path.join(Path.home(), '.ark_profiles'))\n\n @staticmethod\n def default_profile_name() -> str:\n \"\"\"\n Getter for the default profile name.\n\n Returns:\n str: _description_\n \"\"\"\n return 'ark'\n\n @staticmethod\n def deduce_profile_name(profile_name: Optional[str] = None) -> str:\n \"\"\"\n Deduces the profile name from the env.\n\n Args:\n profile_name (Optional[str], optional): Defaults to `None`\n\n Returns:\n str: _description_\n \"\"\"\n if profile_name and profile_name != ArkProfileLoader.default_profile_name():\n return profile_name\n if 'ARK_PROFILE' in os.environ:\n return os.environ['ARK_PROFILE']\n if profile_name:\n return profile_name\n return ArkProfileLoader.default_profile_name()\n\n @staticmethod\n def load_default_profile() -> ArkProfile:\n \"\"\"\n Loads the default profile, either from the OS or creates a new one.\n\n Returns:\n ArkProfile: _description_\n \"\"\"\n folder = ArkProfileLoader.profiles_folder()\n profile_name = ArkProfileLoader.deduce_profile_name()\n if os.path.exists(os.path.join(folder, profile_name)):\n profile: ArkProfile = ArkProfile.parse_file(os.path.join(folder, profile_name))\n return profile\n return ArkProfile()\n\n @staticmethod\n def load_profile(profile_name: str) -> Optional[ArkProfile]:\n \"\"\"\n Loads the specified profile from the OS.\n Returns `None` when a profile is not found with the specified name.\n\n Args:\n profile_name (str): _description_\n\n Returns:\n Optional[ArkProfile]: _description_\n \"\"\"\n folder = ArkProfileLoader.profiles_folder()\n if os.path.exists(os.path.join(folder, profile_name)):\n profile: ArkProfile = ArkProfile.parse_file(os.path.join(folder, profile_name))\n return profile\n return None\n\n @staticmethod\n def save_profile(profile: ArkProfile) -> None:\n \"\"\"\n Saves the profile to the profiles folder on the filesystem.\n\n Args:\n profile (ArkProfile): _description_\n \"\"\"\n folder = ArkProfileLoader.profiles_folder()\n if not os.path.exists(folder):\n os.makedirs(folder)\n with open(os.path.join(folder, profile.profile_name), 'w', encoding='utf-8') as f:\n f.write(profile.json(indent=4, by_alias=False))\n\n @staticmethod\n def load_all_profiles() -> Optional[List[ArkProfile]]:\n \"\"\"\n Loads all the profiles that exist on the machine.\n\n Returns:\n Optional[List[ArkProfile]]: _description_\n \"\"\"\n logger = get_logger('load_all_profiles')\n folder = ArkProfileLoader.profiles_folder()\n if not os.path.exists(folder):\n return None\n profiles: List[ArkProfile] = []\n for profile_name in os.listdir(folder):\n try:\n profiles.append(ArkProfile.parse_file(os.path.join(folder, profile_name)))\n except Exception as ex:\n logger.warning(f'Profile {profile_name} failed to be loaded successfully [{str(ex)}]')\n continue\n return profiles\n\n @staticmethod\n def delete_profile(profile_name: str) -> None:\n \"\"\"\n Deletes the specified profile.\n\n Args:\n profile_name (str): The name of the profile to delete\n \"\"\"\n folder = ArkProfileLoader.profiles_folder()\n if not os.path.exists(folder):\n return None\n if os.path.exists(os.path.join(folder, profile_name)):\n os.unlink(os.path.join(folder, profile_name))\n\n @staticmethod\n def clear_all_profiles() -> None:\n \"\"\"\n Clears all profiles.\n \"\"\"\n folder = ArkProfileLoader.profiles_folder()\n if not os.path.exists(folder):\n return None\n for profile_name in os.listdir(folder):\n os.unlink(os.path.join(folder, profile_name))\n\n @staticmethod\n def profile_exists(profile_name: str) -> bool:\n \"\"\"\n Checks if the specified profile exists.\n\n Args:\n profile_name (str): _description_\n\n Returns:\n bool: _description_\n \"\"\"\n folder = ArkProfileLoader.profiles_folder()\n if not os.path.exists(folder):\n return False\n return os.path.exists(os.path.join(folder, profile_name))"
},
{
"identifier": "ArkDPABaseGeneratePolicy",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_base_generate_policy.py",
"snippet": "class ArkDPABaseGeneratePolicy(ArkModel):\n name: Optional[str] = Field(description='Policy name to generate to the workspace')\n disable_edit: bool = Field(description='Whether no interactiveness / editing is required', default=False)"
},
{
"identifier": "ArkDPACommitPolicies",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_commit_policies.py",
"snippet": "class ArkDPACommitPolicies(ArkModel):\n names: Optional[List[str]] = Field(\n description='Policy names to commit from the workspace to the remote, if not given, choices will be prompted'\n )\n all: bool = Field(description='Whether to commit all locally edited policies', default=False)"
},
{
"identifier": "ArkDPAEditPolicies",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_edit_policies.py",
"snippet": "class ArkDPAEditPolicies(ArkModel):\n names: Optional[List[str]] = Field(description='Policies to edit from the workspace, if not given, choices will be prompted')"
},
{
"identifier": "ArkDPAGetPoliciesStatus",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_get_policies_status.py",
"snippet": "class ArkDPAGetPoliciesStatus(ArkModel):\n names: Optional[List[str]] = Field(description='Policy names to show status on, if not given, shows status on all policies')"
},
{
"identifier": "ArkDPALoadPolicies",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_load_policies.py",
"snippet": "class ArkDPALoadPolicies(ArkModel):\n override: bool = Field(description='Whether to override existing policies', default=False)"
},
{
"identifier": "ArkDPALoadedPolicies",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_loaded_policies.py",
"snippet": "class ArkDPALoadedPolicies(ArkModel):\n loaded_path: str = Field(description='Path to the workspace dir which the policies were loaded to')\n overall_policies_count: int = Field(description='Overall policies in the workspace')\n loaded_policies_count: int = Field(description='Loaded policies count')\n overriden_policies_count: int = Field(description='Overriden policies count')\n untouched_policies_count: int = Field(description='Policies count which were not overriden')"
},
{
"identifier": "ArkDPAPoliciesDiff",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_policies_diff.py",
"snippet": "class ArkDPAPoliciesDiff(ArkModel):\n names: Optional[List[str]] = Field(description='Policy names to show diff on, if not given, shows diff on all policies')\n unified: bool = Field(description='Show all diffs together', default=False)"
},
{
"identifier": "ArkDPAPoliciesStatus",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_policies_status.py",
"snippet": "class ArkDPAPoliciesStatus(ArkModel):\n modified_policies: List[str] = Field(description='List of locally modified policies')\n removed_policies: List[str] = Field(description='List of locally removed policies')\n added_policies: List[str] = Field(description='List of locally added policies')"
},
{
"identifier": "ArkDPARemovePolicies",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_remove_policies.py",
"snippet": "class ArkDPARemovePolicies(ArkModel):\n names: Optional[List[str]] = Field(description='Policies to remove from the workspace, if not given, choices will be prompted')"
},
{
"identifier": "ArkDPAResetPolicies",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_reset_policies.py",
"snippet": "class ArkDPAResetPolicies(ArkModel):\n names: Optional[List[str]] = Field(description='Policy names to reset on the workspace, if not given, all policies are resetted')\n all: bool = Field(description='Whether to reset all locally edited policies', default=False)"
},
{
"identifier": "ArkDPAViewPolicies",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_view_policies.py",
"snippet": "class ArkDPAViewPolicies(ArkModel):\n names: Optional[List[str]] = Field(description='Policy names to view from the workspace, if not given, choices will be prompted')\n unified: bool = Field(description='Show all requested policies together', default=False)"
},
{
"identifier": "ArkDPABaseAddPolicy",
"path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_base_add_policy.py",
"snippet": "class ArkDPABaseAddPolicy(ArkCamelizedModel):\n policy_name: str = Field(description='Policy name to be added')\n description: Optional[str] = Field(description='Description about the policy to add')\n status: ArkDPARuleStatus = Field(description='Status of the policy upon adding', default=ArkDPARuleStatus.Draft)\n start_date: Optional[str] = Field(description='When will the policy start taking effect, empty means it will take effect when added')\n end_date: Optional[str] = Field(description='When will the policy stop taking effect, empty means it will never stop taking effect')"
},
{
"identifier": "ArkDPABasePolicy",
"path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_base_policy.py",
"snippet": "class ArkDPABasePolicy(ArkCamelizedModel):\n policy_id: Optional[str] = Field(description='ID of the policy')\n policy_name: str = Field(description='Name of the policy')\n status: ArkDPARuleStatus = Field(description='Status of the policy')\n description: Optional[str] = Field(description='Description of the policy')\n start_date: Optional[str] = Field(description='Start date of the policy')\n end_date: Optional[str] = Field(description='End date of the policy')"
},
{
"identifier": "ArkDPABasePolicyListItem",
"path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_base_policy_list_item.py",
"snippet": "class ArkDPABasePolicyListItem(ArkCamelizedModel):\n policy_id: str = Field(description='ID of the policy')\n policy_name: str = Field(description='Name of the policy')\n status: ArkDPARuleStatus = Field(description='Status of the policy')\n description: Optional[str] = Field(description='Description of the policy')\n updated_on: str = Field(description='Last update time of the policy')\n rule_names: Optional[List[str]] = Field(description='Names of the authorization rules of the policy')"
},
{
"identifier": "ArkDPABaseUpdatePolicy",
"path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_base_update_policy.py",
"snippet": "class ArkDPABaseUpdatePolicy(ArkCamelizedModel):\n policy_id: Optional[str] = Field(description='Policy id to update')\n policy_name: Optional[str] = Field(description='Policy name to update')\n new_policy_name: Optional[str] = Field(description='New policy name to update')\n description: Optional[str] = Field(description='Description about the policy to be updated')\n status: Optional[ArkDPARuleStatus] = Field(description='Status of the policy to update')\n start_date: Optional[str] = Field(description='New start time to update')\n end_date: Optional[str] = Field(description='New end time to update')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values"
},
{
"identifier": "ArkDPADeletePolicy",
"path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_delete_policy.py",
"snippet": "class ArkDPADeletePolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to delete')\n policy_name: Optional[str] = Field(description='Policy name to delete')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values"
},
{
"identifier": "ArkDPAGetPolicy",
"path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_get_policy.py",
"snippet": "class ArkDPAGetPolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to get')\n policy_name: Optional[str] = Field(description='Policy name to get')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values"
},
{
"identifier": "ArkService",
"path": "ark_sdk_python/services/ark_service.py",
"snippet": "class ArkService(ABC):\n def __init__(self, *authenticators: Any) -> None:\n self._logger = get_logger(self.__class__.__name__)\n self._authenticators = [auth for auth in authenticators if issubclass(type(auth), ArkAuth)]\n given_auth_names = [auth.authenticator_name() for auth in self._authenticators]\n if any(a not in given_auth_names for a in self.service_config().required_authenticator_names):\n raise ArkValidationException(f'{self.service_config().service_name} missing required authenticators for service')\n\n @property\n def authenticators(self) -> List[ArkAuth]:\n \"\"\"\n Returns all the authenticators for the service.\n\n Returns:\n List[ArkAuth]: _description_\n \"\"\"\n return self._authenticators\n\n def authenticator(self, auth_name: str) -> ArkAuth:\n \"\"\"\n Finds the appropriate Ark authenticator class for the specified authenticator.\n\n Args:\n auth_name (str): _description_\n\n Raises:\n ArkNotFoundException: _description_\n\n Returns:\n ArkAuth: _description_\n \"\"\"\n for auth in self.authenticators:\n if auth.authenticator_name() == auth_name:\n return auth\n raise ArkNotFoundException(f'{self.service_config().service_name} Failed to find authenticator {auth_name}')\n\n def has_authenticator(self, auth_name: str) -> bool:\n \"\"\"\n Checks whether the specified authenticator name exists.\n\n Args:\n auth_name (str): _description_\n\n Returns:\n bool: _description_\n \"\"\"\n return any(auth.authenticator_name() == auth_name for auth in self.authenticators)\n\n @staticmethod\n @abstractmethod\n def service_config() -> ArkServiceConfig:\n \"\"\"\n Returns the service configuration, which includes the service name, and its required and optional authenticators.\n\n Returns:\n ArkServiceConfig: _description_\n \"\"\""
}
] | import difflib
import itertools
import os
import inquirer
from abc import ABC, abstractmethod
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Dict, Final, Generic, List, Optional, Tuple, TypeVar
from editor import EditorError
from ark_sdk_python.args.ark_args_formatter import ArkInquirerRender
from ark_sdk_python.auth.ark_isp_auth import ArkISPAuth
from ark_sdk_python.models import ArkServiceException
from ark_sdk_python.models.ark_profile import ArkProfile, ArkProfileLoader
from ark_sdk_python.models.cli_services.dpa.policies_editor.common import (
ArkDPABaseGeneratePolicy,
ArkDPACommitPolicies,
ArkDPAEditPolicies,
ArkDPAGetPoliciesStatus,
ArkDPALoadedPolicies,
ArkDPALoadPolicies,
ArkDPAPoliciesDiff,
ArkDPAPoliciesStatus,
ArkDPARemovePolicies,
ArkDPAResetPolicies,
ArkDPAViewPolicies,
)
from ark_sdk_python.models.services.dpa.policies.common import (
ArkDPABaseAddPolicy,
ArkDPABasePolicy,
ArkDPABasePolicyListItem,
ArkDPABaseUpdatePolicy,
ArkDPADeletePolicy,
ArkDPAGetPolicy,
)
from ark_sdk_python.services.ark_service import ArkService | 8,995 | inquirer.Checkbox(
'names',
f'Which {self._policies_family} policies would you like to view?',
choices=[p.policy_name for p in workspace_policies.values()],
)
],
render=ArkInquirerRender(),
)
if not answers:
return
policy_names = answers['names']
if not policy_names:
return
try:
if view_policies.unified:
inquirer.prompt(
[inquirer.Editor('views', f'Show all selected {self._policies_family} policies')],
answers={
'views': '\n\n\n'.join(
[f'# Policy [{policy_name}]\n{workspace_policies[policy_name].json(indent=4)}' for policy_name in policy_names]
)
},
render=ArkInquirerRender(),
)
else:
inquirer.prompt(
[inquirer.Editor(f'{policy_name}_view', f'Show [{policy_name}]') for policy_name in policy_names],
render=ArkInquirerRender(),
answers={f'{policy_name}_view': workspace_policies[policy_name].json(indent=4) for policy_name in policy_names},
)
except EditorError as ex:
self._logger.error(
f'An error occurred while trying to view the {self._policies_family} policies, '
f'you can view the policies at [{self.__policies_cache_dir}] [{str(ex)}]'
)
def reset_policies(self, reset_policy: ArkDPAResetPolicies) -> None:
"""
Resets local workspace policies.
When all policies are reset, all local policies are overwritten and deleted policies are removed.
Otherwise, the user can select which policies are reset.
This function does not alter newly generated uncommitted policies.
Args:
reset_policy (ArkDPAResetPolicies): _description_
"""
if reset_policy.all:
answers = inquirer.prompt(
[inquirer.Confirm('reset', message=f'Are you sure you want to reset all edited {self._policies_family} policies?')]
)
if not answers:
return
if answers['reset']:
self.load_policies(ArkDPALoadPolicies(override=True))
else:
policies_diff = self.__load_policies_diff()
removed_policies = self.__load_removed_policies_from_workspace()
if not policies_diff and not removed_policies:
return
policy_names = reset_policy.names
if not policy_names:
answers = inquirer.prompt(
[
inquirer.Checkbox(
'names',
f'Which {self._policies_family} policies would you like to reset?, press space to select',
choices=[p for p in policies_diff.keys() + removed_policies.keys()],
)
],
render=ArkInquirerRender(),
)
if not answers:
return
policy_names = answers['names']
policy_names = [p for p in policy_names if p in policies_diff or p in removed_policies]
for policy_name in policy_names:
policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')
if policy_name in policies_diff:
policy_path.write_text(policies_diff[policy_name][1].json(indent=4))
elif policy_name in removed_policies:
policy_path.write_text(removed_policies[policy_name].json(indent=4))
(Path(self.__policies_cache_dir) / (policy_name + '.json.removed')).unlink(missing_ok=True)
def generate_policy(self, generate_policy: GeneratePolicyType) -> None:
"""
Generates a new policy from a template and the user's parameters.
The user is prompted for the parameters when they are not specified in the CLI.
After policy's parameters are defined, the policy is generates in memory and can bee edited.
The new policy is saved locally until it is committed.
Args:
generate_policy (GeneratePolicyType): _description_
"""
workspace_policies = self.__load_existing_policies_from_workspace()
workspace_policies.update(self.__load_generated_policies_from_workspace())
policy = self._generate_policy(generate_policy, workspace_policies)
policy_path = Path(self.__policies_cache_dir) / (policy.policy_name + '.json.generated')
# Let the user edit the generated policy
if not generate_policy.disable_edit:
try:
answers = inquirer.prompt(
[
inquirer.Editor(
'policy_editor',
f'Newly {self._policies_family} policy is generated and ready to be edited, once edited, it will be saved to the local workspace',
)
],
render=ArkInquirerRender(),
answers={'policy_editor': policy.json(indent=4, exclude_none=True)},
)
if not answers:
return
policy = self.__policy_type.parse_raw(answers['policy_editor'])
except EditorError as ex:
self._logger.error(
f'An error occurred while trying to edit the {self._policies_family} policy, '
f'the policy will be saved to [{policy_path}] and can be edited manually [{str(ex)}]'
)
policy_path.write_text(policy.json(indent=4))
|
MAX_LINE_DIFF: Final[int] = 100000
PolicyType = TypeVar('PolicyType', bound=ArkDPABasePolicy)
PolicyListItemType = TypeVar('PolicyListItemType', bound=ArkDPABasePolicyListItem)
AddPolicyType = TypeVar('AddPolicyType', bound=ArkDPABaseAddPolicy)
UpdatePolicyType = TypeVar('UpdatePolicyType', bound=ArkDPABaseUpdatePolicy)
GeneratePolicyType = TypeVar('GeneratePolicyType', bound=ArkDPABaseGeneratePolicy)
class ArkDPABasePoliciesEditorService(
ArkService, ABC, Generic[PolicyType, PolicyListItemType, AddPolicyType, UpdatePolicyType, GeneratePolicyType]
):
def __init__(
self,
policy_type: PolicyType,
add_policy_type: AddPolicyType,
update_policy_type: UpdatePolicyType,
isp_auth: ArkISPAuth,
policies_family: str,
tenant_id: str,
policies_cache_dir: Optional[str] = None,
profile: Optional[ArkProfile] = None,
) -> None:
super().__init__(isp_auth)
profile = profile or ArkProfileLoader.load_default_profile()
self._policies_family = policies_family
self.__policies_cache_dir = Path(policies_cache_dir or Path.home() / '.ark_cache' / 'profiles' / profile.profile_name / tenant_id)
if not policies_cache_dir and 'ARK_DPA_POLICIES_EDITOR_FOLDER' in os.environ:
self.__policies_cache_dir = Path(os.environ['ARK_DPA_POLICIES_EDITOR_FOLDER'])
self.__policies_cache_dir = self.__policies_cache_dir / policies_family
self.__policies_cache_dir.mkdir(exist_ok=True, parents=True)
self.__policy_type = policy_type
self.__add_policy_type = add_policy_type
self.__update_policy_type = update_policy_type
@abstractmethod
def _policy(self, get_policy: ArkDPAGetPolicy) -> PolicyType:
pass
@abstractmethod
def _list_policies(self) -> List[PolicyListItemType]:
pass
@abstractmethod
def _add_policy(self, add_policy: AddPolicyType) -> PolicyType:
pass
@abstractmethod
def _update_policy(self, update_policy: UpdatePolicyType) -> PolicyType:
pass
@abstractmethod
def _delete_policy(self, delete_policy: ArkDPADeletePolicy) -> None:
pass
@abstractmethod
def _generate_policy(self, generate_policy: GeneratePolicyType, workspace_policies: List[PolicyType]) -> PolicyType:
pass
def __load_policy_diff(self, workspace_policy: PolicyType) -> Optional[Tuple[PolicyType, PolicyType]]:
remote_policy = self._policy(ArkDPAGetPolicy(policy_id=str(workspace_policy.policy_id)))
if remote_policy != workspace_policy:
return (workspace_policy, remote_policy)
return None
def __load_policies_diff(self) -> Dict[str, Tuple[PolicyType, PolicyType]]:
workspace_policies = self.__load_existing_policies_from_workspace()
with ThreadPoolExecutor() as executor:
remote_policies = {
p[0].policy_name: p for p in executor.map(self.__load_policy_diff, workspace_policies.values()) if p is not None
}
return remote_policies
def __load_policies_from_workspace_by_suffix(self, suffix: str = '') -> Dict[str, PolicyType]:
p = Path(self.__policies_cache_dir).glob(f'*.json{suffix}')
policies_files = [x for x in p if x.is_file() and x.suffix == suffix or '.json']
policies = {}
for f in policies_files:
policy = self.__policy_type.parse_file(f)
policies[policy.policy_name] = policy
return policies
def __load_removed_policies_from_workspace(self) -> Dict[str, PolicyType]:
return self.__load_policies_from_workspace_by_suffix('.removed')
def __load_generated_policies_from_workspace(self) -> Dict[str, PolicyType]:
return self.__load_policies_from_workspace_by_suffix('.generated')
def __load_existing_policies_from_workspace(self) -> Dict[str, PolicyType]:
return self.__load_policies_from_workspace_by_suffix()
def __load_policy_to_workspace(self, policy: PolicyListItemType, override: bool) -> Optional[PolicyType]:
policy_data = self._policy(ArkDPAGetPolicy(policy_id=policy.policy_id))
policy_path = Path(self.__policies_cache_dir) / (policy_data.policy_name + '.json')
if policy_path.exists():
existing_data = self.__policy_type.parse_raw(policy_path.read_text())
if existing_data != policy_data:
if not override:
return policy_data
if not policy_data.policy_id:
policy_data.policy_id = policy.policy_id
policy_path.write_text(policy_data.json(indent=4))
(Path(self.__policies_cache_dir) / (policy_data.policy_name + '.json.removed')).unlink(missing_ok=True)
def load_policies(self, load_policies: ArkDPALoadPolicies) -> ArkDPALoadedPolicies:
"""
Loads all remote policies into the local workspace.
The user is asked whether to overwrite existing policies that were edited either locally or remotely.
When default overwrite is enabled, existing policies are overwritten without prompts.
Args:
load_policies (ArkDPALoadPolicies): _description_
Returns:
ArkDPALoadedPolicies: _description_
"""
policies = self._list_policies()
policies_to_query: Dict[str, PolicyType] = []
with ThreadPoolExecutor() as executor:
policies_to_query = {
p.policy_name: p
for p in executor.map(lambda p: self.__load_policy_to_workspace(p, load_policies.override), policies)
if p is not None
}
# Build the query editor to ask the user
policies_to_override = []
if policies_to_query:
answers = inquirer.prompt(
[
inquirer.Checkbox(
'override',
message=f'Conflicts detected, please choose if you wish to override local {self._policies_family} policies or leave them as is',
choices=[p.policy_name for p in policies_to_query.values()],
)
],
render=ArkInquirerRender(),
)
if not answers:
return
policies_to_override = answers['override']
for policy_name in policies_to_override:
policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')
if policy_path.exists() and policy_name in policies_to_query:
policy_path.write_text(policies_to_query[policy_name].json(indent=4))
return ArkDPALoadedPolicies(
loaded_path=str(self.__policies_cache_dir),
overall_policies_count=len(policies),
loaded_policies_count=len(policies) - len(policies_to_query),
overriden_policies_count=len(policies_to_override),
untouched_policies_count=len(policies_to_query) - len(policies_to_override),
)
def edit_policies(self, edit_policies: ArkDPAEditPolicies) -> None:
"""
Edits the set of specified policies one at a time, either via the CLI or the default OS editor.
Edited policies are only saved locally until they are committed.
Args:
edit_policies (ArkDPAEditPolicies): _description_
Raises:
ArkServiceException: _description_
"""
workspace_policies = self.__load_existing_policies_from_workspace()
workspace_policies.update(self.__load_generated_policies_from_workspace())
if not workspace_policies:
raise ArkServiceException(
f'No {self._policies_family} policies to edit in the workspace, please load the policies or generate a new one'
)
policy_names = edit_policies.names
if not policy_names:
answers = inquirer.prompt(
[
inquirer.Checkbox(
'names',
f'Which {self._policies_family} policies would you like to edit?, press space to select',
choices=[p.policy_name for p in workspace_policies.values()],
)
],
render=ArkInquirerRender(),
)
if not answers:
return
policy_names = answers['names']
try:
answers = inquirer.prompt(
[
inquirer.Editor(f'{name}_edit', message=f'Chosen {self._policies_family} policy [{name}] is about to be edited')
for name in policy_names
],
render=ArkInquirerRender(),
answers={f'{name}_edit': workspace_policies[name].json(indent=4) for name in policy_names},
)
for name in policy_names:
policy = self.__policy_type.parse_raw(answers[f'{name}_edit'])
for path in [
Path(self.__policies_cache_dir) / (name + '.json'),
Path(self.__policies_cache_dir) / (name + '.json.generated'),
]:
if path.exists():
path.write_text(policy.json(indent=4))
break
except EditorError as ex:
self._logger.error(
f'An error occurred while trying to edit {self._policies_family} policies, '
f'you can edit the policies at [{self.__policies_cache_dir}] [{str(ex)}]'
)
def remove_policies(self, remove_policies: ArkDPARemovePolicies) -> None:
"""
Removes one or more policies from the local workspace.
Until changes are committed, removing a remote policy only appends the `.deleted` indication to its name.
After committing the changes, the policies are deleted both locally and remotely.
New, uncommitted policies are deleted locally after the user consents.
Args:
remove_policies (ArkDPARemovePolicies): _description_
Raises:
ArkServiceException: _description_
"""
workspace_policies = self.__load_existing_policies_from_workspace()
workspace_policies.update(self.__load_generated_policies_from_workspace())
if not workspace_policies:
raise ArkServiceException(
f'No {self._policies_family} policies to remove in the workspace, please load the policies or generate a new one'
)
policy_names = remove_policies.names
if not policy_names:
answers = inquirer.prompt(
[
inquirer.Checkbox(
'names',
f'Which {self._policies_family} policies would you like to remove?, press space to select',
choices=[p.policy_name for p in workspace_policies.values()],
)
],
render=ArkInquirerRender(),
)
if not answers:
return
policy_names = answers['names']
for policy_name in policy_names:
for path in [
Path(self.__policies_cache_dir) / (policy_name + '.json'),
Path(self.__policies_cache_dir) / (policy_name + '.json.generated'),
]:
if path.exists():
if path.suffix == '.json':
path.rename(Path(self.__policies_cache_dir) / (policy_name + '.json.removed'))
else:
answers = inquirer.prompt(
[
inquirer.Confirm(
'remove',
message=f'Are you sure you want to remove local {self._policies_family} policy [{policy_name}]?, removing an uncommitted local policy cannot be reverted',
)
],
render=ArkInquirerRender(),
)
if not answers:
return
if answers['remove']:
path.unlink(missing_ok=True)
def view_policies(self, view_policies: ArkDPAViewPolicies) -> None:
"""
Allows the user to view one or more policies either together or individually, as defined in the CLI user prompt.
Policies are viewed in the machine's default editor (both existing policies and newly generated policies).
Args:
view_policies (ArkDPAViewPolicies): _description_
"""
workspace_policies = self.__load_existing_policies_from_workspace()
workspace_policies.update(self.__load_generated_policies_from_workspace())
policy_names = view_policies.names
if not policy_names:
answers = inquirer.prompt(
[
inquirer.Checkbox(
'names',
f'Which {self._policies_family} policies would you like to view?',
choices=[p.policy_name for p in workspace_policies.values()],
)
],
render=ArkInquirerRender(),
)
if not answers:
return
policy_names = answers['names']
if not policy_names:
return
try:
if view_policies.unified:
inquirer.prompt(
[inquirer.Editor('views', f'Show all selected {self._policies_family} policies')],
answers={
'views': '\n\n\n'.join(
[f'# Policy [{policy_name}]\n{workspace_policies[policy_name].json(indent=4)}' for policy_name in policy_names]
)
},
render=ArkInquirerRender(),
)
else:
inquirer.prompt(
[inquirer.Editor(f'{policy_name}_view', f'Show [{policy_name}]') for policy_name in policy_names],
render=ArkInquirerRender(),
answers={f'{policy_name}_view': workspace_policies[policy_name].json(indent=4) for policy_name in policy_names},
)
except EditorError as ex:
self._logger.error(
f'An error occurred while trying to view the {self._policies_family} policies, '
f'you can view the policies at [{self.__policies_cache_dir}] [{str(ex)}]'
)
def reset_policies(self, reset_policy: ArkDPAResetPolicies) -> None:
"""
Resets local workspace policies.
When all policies are reset, all local policies are overwritten and deleted policies are removed.
Otherwise, the user can select which policies are reset.
This function does not alter newly generated uncommitted policies.
Args:
reset_policy (ArkDPAResetPolicies): _description_
"""
if reset_policy.all:
answers = inquirer.prompt(
[inquirer.Confirm('reset', message=f'Are you sure you want to reset all edited {self._policies_family} policies?')]
)
if not answers:
return
if answers['reset']:
self.load_policies(ArkDPALoadPolicies(override=True))
else:
policies_diff = self.__load_policies_diff()
removed_policies = self.__load_removed_policies_from_workspace()
if not policies_diff and not removed_policies:
return
policy_names = reset_policy.names
if not policy_names:
answers = inquirer.prompt(
[
inquirer.Checkbox(
'names',
f'Which {self._policies_family} policies would you like to reset?, press space to select',
choices=[p for p in policies_diff.keys() + removed_policies.keys()],
)
],
render=ArkInquirerRender(),
)
if not answers:
return
policy_names = answers['names']
policy_names = [p for p in policy_names if p in policies_diff or p in removed_policies]
for policy_name in policy_names:
policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')
if policy_name in policies_diff:
policy_path.write_text(policies_diff[policy_name][1].json(indent=4))
elif policy_name in removed_policies:
policy_path.write_text(removed_policies[policy_name].json(indent=4))
(Path(self.__policies_cache_dir) / (policy_name + '.json.removed')).unlink(missing_ok=True)
def generate_policy(self, generate_policy: GeneratePolicyType) -> None:
"""
Generates a new policy from a template and the user's parameters.
The user is prompted for the parameters when they are not specified in the CLI.
After policy's parameters are defined, the policy is generates in memory and can bee edited.
The new policy is saved locally until it is committed.
Args:
generate_policy (GeneratePolicyType): _description_
"""
workspace_policies = self.__load_existing_policies_from_workspace()
workspace_policies.update(self.__load_generated_policies_from_workspace())
policy = self._generate_policy(generate_policy, workspace_policies)
policy_path = Path(self.__policies_cache_dir) / (policy.policy_name + '.json.generated')
# Let the user edit the generated policy
if not generate_policy.disable_edit:
try:
answers = inquirer.prompt(
[
inquirer.Editor(
'policy_editor',
f'Newly {self._policies_family} policy is generated and ready to be edited, once edited, it will be saved to the local workspace',
)
],
render=ArkInquirerRender(),
answers={'policy_editor': policy.json(indent=4, exclude_none=True)},
)
if not answers:
return
policy = self.__policy_type.parse_raw(answers['policy_editor'])
except EditorError as ex:
self._logger.error(
f'An error occurred while trying to edit the {self._policies_family} policy, '
f'the policy will be saved to [{policy_path}] and can be edited manually [{str(ex)}]'
)
policy_path.write_text(policy.json(indent=4))
| def policies_diff(self, policies_diff: ArkDPAPoliciesDiff) -> None: | 11 | 2023-11-13 09:24:31+00:00 | 12k |
mohenghui/detectAuto_v8 | ultralytics/models/sam/build.py | [
{
"identifier": "attempt_download_asset",
"path": "ultralytics/utils/downloads.py",
"snippet": "def attempt_download_asset(file, repo='ultralytics/assets', release='v0.0.0'):\n \"\"\"\n Attempt file download from GitHub release assets if not found locally.\n\n release = 'latest', 'v6.2', etc.\n \"\"\"\n from ultralytics.utils import SETTINGS # scoped for circular import\n\n # YOLOv3/5u updates\n file = str(file)\n file = checks.check_yolov5u_filename(file)\n file = Path(file.strip().replace(\"'\", ''))\n if file.exists():\n return str(file)\n elif (SETTINGS['weights_dir'] / file).exists():\n return str(SETTINGS['weights_dir'] / file)\n else:\n # URL specified\n name = Path(parse.unquote(str(file))).name # decode '%2F' to '/' etc.\n if str(file).startswith(('http:/', 'https:/')): # download\n url = str(file).replace(':/', '://') # Pathlib turns :// -> :/\n file = url2file(name) # parse authentication https://url.com/file.txt?auth...\n if Path(file).is_file():\n LOGGER.info(f'Found {clean_url(url)} locally at {file}') # file already exists\n else:\n safe_download(url=url, file=file, min_bytes=1E5)\n\n elif repo == GITHUB_ASSETS_REPO and name in GITHUB_ASSETS_NAMES:\n safe_download(url=f'https://github.com/{repo}/releases/download/{release}/{name}', file=file, min_bytes=1E5)\n\n else:\n tag, assets = get_github_assets(repo, release)\n if not assets:\n tag, assets = get_github_assets(repo) # latest release\n if name in assets:\n safe_download(url=f'https://github.com/{repo}/releases/download/{tag}/{name}', file=file, min_bytes=1E5)\n\n return str(file)"
},
{
"identifier": "MaskDecoder",
"path": "ultralytics/models/sam/modules/decoders.py",
"snippet": "class MaskDecoder(nn.Module):\n \"\"\"\n Decoder module for generating masks and their associated quality scores, using a transformer architecture to predict\n masks given image and prompt embeddings.\n\n Attributes:\n transformer_dim (int): Channel dimension for the transformer module.\n transformer (nn.Module): The transformer module used for mask prediction.\n num_multimask_outputs (int): Number of masks to predict for disambiguating masks.\n iou_token (nn.Embedding): Embedding for the IoU token.\n num_mask_tokens (int): Number of mask tokens.\n mask_tokens (nn.Embedding): Embedding for the mask tokens.\n output_upscaling (nn.Sequential): Neural network sequence for upscaling the output.\n output_hypernetworks_mlps (nn.ModuleList): Hypernetwork MLPs for generating masks.\n iou_prediction_head (nn.Module): MLP for predicting mask quality.\n \"\"\"\n\n def __init__(\n self,\n *,\n transformer_dim: int,\n transformer: nn.Module,\n num_multimask_outputs: int = 3,\n activation: Type[nn.Module] = nn.GELU,\n iou_head_depth: int = 3,\n iou_head_hidden_dim: int = 256,\n ) -> None:\n \"\"\"\n Predicts masks given an image and prompt embeddings, using a transformer architecture.\n\n Args:\n transformer_dim (int): the channel dimension of the transformer module\n transformer (nn.Module): the transformer used to predict masks\n num_multimask_outputs (int): the number of masks to predict when disambiguating masks\n activation (nn.Module): the type of activation to use when upscaling masks\n iou_head_depth (int): the depth of the MLP used to predict mask quality\n iou_head_hidden_dim (int): the hidden dimension of the MLP used to predict mask quality\n \"\"\"\n super().__init__()\n self.transformer_dim = transformer_dim\n self.transformer = transformer\n\n self.num_multimask_outputs = num_multimask_outputs\n\n self.iou_token = nn.Embedding(1, transformer_dim)\n self.num_mask_tokens = num_multimask_outputs + 1\n self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)\n\n self.output_upscaling = nn.Sequential(\n nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),\n LayerNorm2d(transformer_dim // 4),\n activation(),\n nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),\n activation(),\n )\n self.output_hypernetworks_mlps = nn.ModuleList([\n MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for _ in range(self.num_mask_tokens)])\n\n self.iou_prediction_head = MLP(transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth)\n\n def forward(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n multimask_output: bool,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks given image and prompt embeddings.\n\n Args:\n image_embeddings (torch.Tensor): the embeddings from the image encoder\n image_pe (torch.Tensor): positional encoding with the shape of image_embeddings\n sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes\n dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs\n multimask_output (bool): Whether to return multiple masks or a single mask.\n\n Returns:\n torch.Tensor: batched predicted masks\n torch.Tensor: batched predictions of mask quality\n \"\"\"\n masks, iou_pred = self.predict_masks(\n image_embeddings=image_embeddings,\n image_pe=image_pe,\n sparse_prompt_embeddings=sparse_prompt_embeddings,\n dense_prompt_embeddings=dense_prompt_embeddings,\n )\n\n # Select the correct mask or masks for output\n mask_slice = slice(1, None) if multimask_output else slice(0, 1)\n masks = masks[:, mask_slice, :, :]\n iou_pred = iou_pred[:, mask_slice]\n\n # Prepare output\n return masks, iou_pred\n\n def predict_masks(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Predicts masks.\n\n See 'forward' for more details.\n \"\"\"\n # Concatenate output tokens\n output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)\n output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)\n tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)\n\n # Expand per-image data in batch direction to be per-mask\n src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)\n src = src + dense_prompt_embeddings\n pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)\n b, c, h, w = src.shape\n\n # Run the transformer\n hs, src = self.transformer(src, pos_src, tokens)\n iou_token_out = hs[:, 0, :]\n mask_tokens_out = hs[:, 1:(1 + self.num_mask_tokens), :]\n\n # Upscale mask embeddings and predict masks using the mask tokens\n src = src.transpose(1, 2).view(b, c, h, w)\n upscaled_embedding = self.output_upscaling(src)\n hyper_in_list: List[torch.Tensor] = [\n self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) for i in range(self.num_mask_tokens)]\n hyper_in = torch.stack(hyper_in_list, dim=1)\n b, c, h, w = upscaled_embedding.shape\n masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)\n\n # Generate mask quality predictions\n iou_pred = self.iou_prediction_head(iou_token_out)\n\n return masks, iou_pred"
},
{
"identifier": "ImageEncoderViT",
"path": "ultralytics/models/sam/modules/encoders.py",
"snippet": "class ImageEncoderViT(nn.Module):\n \"\"\"\n An image encoder using Vision Transformer (ViT) architecture for encoding an image into a compact latent space. The\n encoder takes an image, splits it into patches, and processes these patches through a series of transformer blocks.\n The encoded patches are then processed through a neck to generate the final encoded representation.\n\n This class and its supporting functions below lightly adapted from the ViTDet backbone available at\n https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py.\n\n Attributes:\n img_size (int): Dimension of input images, assumed to be square.\n patch_embed (PatchEmbed): Module for patch embedding.\n pos_embed (nn.Parameter, optional): Absolute positional embedding for patches.\n blocks (nn.ModuleList): List of transformer blocks for processing patch embeddings.\n neck (nn.Sequential): Neck module to further process the output.\n \"\"\"\n\n def __init__(\n self,\n img_size: int = 1024,\n patch_size: int = 16,\n in_chans: int = 3,\n embed_dim: int = 768,\n depth: int = 12,\n num_heads: int = 12,\n mlp_ratio: float = 4.0,\n out_chans: int = 256,\n qkv_bias: bool = True,\n norm_layer: Type[nn.Module] = nn.LayerNorm,\n act_layer: Type[nn.Module] = nn.GELU,\n use_abs_pos: bool = True,\n use_rel_pos: bool = False,\n rel_pos_zero_init: bool = True,\n window_size: int = 0,\n global_attn_indexes: Tuple[int, ...] = (),\n ) -> None:\n \"\"\"\n Args:\n img_size (int): Input image size.\n patch_size (int): Patch size.\n in_chans (int): Number of input image channels.\n embed_dim (int): Patch embedding dimension.\n depth (int): Depth of ViT.\n num_heads (int): Number of attention heads in each ViT block.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool): If True, add a learnable bias to query, key, value.\n norm_layer (nn.Module): Normalization layer.\n act_layer (nn.Module): Activation layer.\n use_abs_pos (bool): If True, use absolute positional embeddings.\n use_rel_pos (bool): If True, add relative positional embeddings to the attention map.\n rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\n window_size (int): Window size for window attention blocks.\n global_attn_indexes (list): Indexes for blocks using global attention.\n \"\"\"\n super().__init__()\n self.img_size = img_size\n\n self.patch_embed = PatchEmbed(\n kernel_size=(patch_size, patch_size),\n stride=(patch_size, patch_size),\n in_chans=in_chans,\n embed_dim=embed_dim,\n )\n\n self.pos_embed: Optional[nn.Parameter] = None\n if use_abs_pos:\n # Initialize absolute positional embedding with pretrain image size.\n self.pos_embed = nn.Parameter(torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim))\n\n self.blocks = nn.ModuleList()\n for i in range(depth):\n block = Block(\n dim=embed_dim,\n num_heads=num_heads,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n norm_layer=norm_layer,\n act_layer=act_layer,\n use_rel_pos=use_rel_pos,\n rel_pos_zero_init=rel_pos_zero_init,\n window_size=window_size if i not in global_attn_indexes else 0,\n input_size=(img_size // patch_size, img_size // patch_size),\n )\n self.blocks.append(block)\n\n self.neck = nn.Sequential(\n nn.Conv2d(\n embed_dim,\n out_chans,\n kernel_size=1,\n bias=False,\n ),\n LayerNorm2d(out_chans),\n nn.Conv2d(\n out_chans,\n out_chans,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n LayerNorm2d(out_chans),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Processes input through patch embedding, applies positional embedding if present, and passes through blocks\n and neck.\n \"\"\"\n x = self.patch_embed(x)\n if self.pos_embed is not None:\n x = x + self.pos_embed\n for blk in self.blocks:\n x = blk(x)\n return self.neck(x.permute(0, 3, 1, 2))"
},
{
"identifier": "PromptEncoder",
"path": "ultralytics/models/sam/modules/encoders.py",
"snippet": "class PromptEncoder(nn.Module):\n \"\"\"\n Encodes different types of prompts, including points, boxes, and masks, for input to SAM's mask decoder. The encoder\n produces both sparse and dense embeddings for the input prompts.\n\n Attributes:\n embed_dim (int): Dimension of the embeddings.\n input_image_size (Tuple[int, int]): Size of the input image as (H, W).\n image_embedding_size (Tuple[int, int]): Spatial size of the image embedding as (H, W).\n pe_layer (PositionEmbeddingRandom): Module for random position embedding.\n num_point_embeddings (int): Number of point embeddings for different types of points.\n point_embeddings (nn.ModuleList): List of point embeddings.\n not_a_point_embed (nn.Embedding): Embedding for points that are not a part of any label.\n mask_input_size (Tuple[int, int]): Size of the input mask.\n mask_downscaling (nn.Sequential): Neural network for downscaling the mask.\n no_mask_embed (nn.Embedding): Embedding for cases where no mask is provided.\n \"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n image_embedding_size: Tuple[int, int],\n input_image_size: Tuple[int, int],\n mask_in_chans: int,\n activation: Type[nn.Module] = nn.GELU,\n ) -> None:\n \"\"\"\n Encodes prompts for input to SAM's mask decoder.\n\n Args:\n embed_dim (int): The prompts' embedding dimension\n image_embedding_size (tuple(int, int)): The spatial size of the\n image embedding, as (H, W).\n input_image_size (int): The padded size of the image as input\n to the image encoder, as (H, W).\n mask_in_chans (int): The number of hidden channels used for\n encoding input masks.\n activation (nn.Module): The activation to use when encoding\n input masks.\n \"\"\"\n super().__init__()\n self.embed_dim = embed_dim\n self.input_image_size = input_image_size\n self.image_embedding_size = image_embedding_size\n self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)\n\n self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners\n point_embeddings = [nn.Embedding(1, embed_dim) for _ in range(self.num_point_embeddings)]\n self.point_embeddings = nn.ModuleList(point_embeddings)\n self.not_a_point_embed = nn.Embedding(1, embed_dim)\n\n self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])\n self.mask_downscaling = nn.Sequential(\n nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),\n LayerNorm2d(mask_in_chans // 4),\n activation(),\n nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),\n LayerNorm2d(mask_in_chans),\n activation(),\n nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),\n )\n self.no_mask_embed = nn.Embedding(1, embed_dim)\n\n def get_dense_pe(self) -> torch.Tensor:\n \"\"\"\n Returns the positional encoding used to encode point prompts, applied to a dense set of points the shape of the\n image encoding.\n\n Returns:\n torch.Tensor: Positional encoding with shape 1x(embed_dim)x(embedding_h)x(embedding_w)\n \"\"\"\n return self.pe_layer(self.image_embedding_size).unsqueeze(0)\n\n def _embed_points(\n self,\n points: torch.Tensor,\n labels: torch.Tensor,\n pad: bool,\n ) -> torch.Tensor:\n \"\"\"Embeds point prompts.\"\"\"\n points = points + 0.5 # Shift to center of pixel\n if pad:\n padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)\n padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)\n points = torch.cat([points, padding_point], dim=1)\n labels = torch.cat([labels, padding_label], dim=1)\n point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)\n point_embedding[labels == -1] = 0.0\n point_embedding[labels == -1] += self.not_a_point_embed.weight\n point_embedding[labels == 0] += self.point_embeddings[0].weight\n point_embedding[labels == 1] += self.point_embeddings[1].weight\n return point_embedding\n\n def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds box prompts.\"\"\"\n boxes = boxes + 0.5 # Shift to center of pixel\n coords = boxes.reshape(-1, 2, 2)\n corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)\n corner_embedding[:, 0, :] += self.point_embeddings[2].weight\n corner_embedding[:, 1, :] += self.point_embeddings[3].weight\n return corner_embedding\n\n def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds mask inputs.\"\"\"\n return self.mask_downscaling(masks)\n\n def _get_batch_size(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n ) -> int:\n \"\"\"Gets the batch size of the output given the batch size of the input prompts.\"\"\"\n if points is not None:\n return points[0].shape[0]\n elif boxes is not None:\n return boxes.shape[0]\n elif masks is not None:\n return masks.shape[0]\n else:\n return 1\n\n def _get_device(self) -> torch.device:\n \"\"\"Returns the device of the first point embedding's weight tensor.\"\"\"\n return self.point_embeddings[0].weight.device\n\n def forward(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Embeds different types of prompts, returning both sparse and dense embeddings.\n\n Args:\n points (tuple(torch.Tensor, torch.Tensor), None): point coordinates and labels to embed.\n boxes (torch.Tensor, None): boxes to embed\n masks (torch.Tensor, None): masks to embed\n\n Returns:\n torch.Tensor: sparse embeddings for the points and boxes, with shape BxNx(embed_dim), where N is determined\n by the number of input points and boxes.\n torch.Tensor: dense embeddings for the masks, in the shape Bx(embed_dim)x(embed_H)x(embed_W)\n \"\"\"\n bs = self._get_batch_size(points, boxes, masks)\n sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())\n if points is not None:\n coords, labels = points\n point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))\n sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)\n if boxes is not None:\n box_embeddings = self._embed_boxes(boxes)\n sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)\n\n if masks is not None:\n dense_embeddings = self._embed_masks(masks)\n else:\n dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1,\n 1).expand(bs, -1, self.image_embedding_size[0],\n self.image_embedding_size[1])\n\n return sparse_embeddings, dense_embeddings"
},
{
"identifier": "Sam",
"path": "ultralytics/models/sam/modules/sam.py",
"snippet": "class Sam(nn.Module):\n \"\"\"\n Sam (Segment Anything Model) is designed for object segmentation tasks. It uses image encoders to generate image\n embeddings, and prompt encoders to encode various types of input prompts. These embeddings are then used by the mask\n decoder to predict object masks.\n\n Attributes:\n mask_threshold (float): Threshold value for mask prediction.\n image_format (str): Format of the input image, default is 'RGB'.\n image_encoder (ImageEncoderViT): The backbone used to encode the image into embeddings.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts object masks from the image and prompt embeddings.\n pixel_mean (List[float]): Mean pixel values for image normalization.\n pixel_std (List[float]): Standard deviation values for image normalization.\n \"\"\"\n mask_threshold: float = 0.0\n image_format: str = 'RGB'\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = (123.675, 116.28, 103.53),\n pixel_std: List[float] = (58.395, 57.12, 57.375)\n ) -> None:\n \"\"\"\n Initialize the Sam class to predict object masks from an image and input prompts.\n\n Note:\n All forward() operations moved to SAMPredictor.\n\n Args:\n image_encoder (ImageEncoderViT): The backbone used to encode the image into image embeddings.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings and encoded prompts.\n pixel_mean (List[float], optional): Mean values for normalizing pixels in the input image. Defaults to\n (123.675, 116.28, 103.53).\n pixel_std (List[float], optional): Std values for normalizing pixels in the input image. Defaults to\n (58.395, 57.12, 57.375).\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer('pixel_mean', torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer('pixel_std', torch.Tensor(pixel_std).view(-1, 1, 1), False)"
},
{
"identifier": "TinyViT",
"path": "ultralytics/models/sam/modules/tiny_encoder.py",
"snippet": "class TinyViT(nn.Module):\n \"\"\"\n The TinyViT architecture for vision tasks.\n\n Attributes:\n img_size (int): Input image size.\n in_chans (int): Number of input channels.\n num_classes (int): Number of classification classes.\n embed_dims (List[int]): List of embedding dimensions for each layer.\n depths (List[int]): List of depths for each layer.\n num_heads (List[int]): List of number of attention heads for each layer.\n window_sizes (List[int]): List of window sizes for each layer.\n mlp_ratio (float): Ratio of MLP hidden dimension to embedding dimension.\n drop_rate (float): Dropout rate for drop layers.\n drop_path_rate (float): Drop path rate for stochastic depth.\n use_checkpoint (bool): Use checkpointing for efficient memory usage.\n mbconv_expand_ratio (float): Expansion ratio for MBConv layer.\n local_conv_size (int): Local convolution kernel size.\n layer_lr_decay (float): Layer-wise learning rate decay.\n\n Note:\n This implementation is generalized to accept a list of depths, attention heads,\n embedding dimensions and window sizes, which allows you to create a\n \"stack\" of TinyViT models of varying configurations.\n \"\"\"\n\n def __init__(\n self,\n img_size=224,\n in_chans=3,\n num_classes=1000,\n embed_dims=[96, 192, 384, 768],\n depths=[2, 2, 6, 2],\n num_heads=[3, 6, 12, 24],\n window_sizes=[7, 7, 14, 7],\n mlp_ratio=4.,\n drop_rate=0.,\n drop_path_rate=0.1,\n use_checkpoint=False,\n mbconv_expand_ratio=4.0,\n local_conv_size=3,\n layer_lr_decay=1.0,\n ):\n \"\"\"\n Initializes the TinyViT model.\n\n Args:\n img_size (int, optional): The input image size. Defaults to 224.\n in_chans (int, optional): Number of input channels. Defaults to 3.\n num_classes (int, optional): Number of classification classes. Defaults to 1000.\n embed_dims (List[int], optional): List of embedding dimensions for each layer. Defaults to [96, 192, 384, 768].\n depths (List[int], optional): List of depths for each layer. Defaults to [2, 2, 6, 2].\n num_heads (List[int], optional): List of number of attention heads for each layer. Defaults to [3, 6, 12, 24].\n window_sizes (List[int], optional): List of window sizes for each layer. Defaults to [7, 7, 14, 7].\n mlp_ratio (float, optional): Ratio of MLP hidden dimension to embedding dimension. Defaults to 4.\n drop_rate (float, optional): Dropout rate. Defaults to 0.\n drop_path_rate (float, optional): Drop path rate for stochastic depth. Defaults to 0.1.\n use_checkpoint (bool, optional): Whether to use checkpointing for efficient memory usage. Defaults to False.\n mbconv_expand_ratio (float, optional): Expansion ratio for MBConv layer. Defaults to 4.0.\n local_conv_size (int, optional): Local convolution kernel size. Defaults to 3.\n layer_lr_decay (float, optional): Layer-wise learning rate decay. Defaults to 1.0.\n \"\"\"\n super().__init__()\n self.img_size = img_size\n self.num_classes = num_classes\n self.depths = depths\n self.num_layers = len(depths)\n self.mlp_ratio = mlp_ratio\n\n activation = nn.GELU\n\n self.patch_embed = PatchEmbed(in_chans=in_chans,\n embed_dim=embed_dims[0],\n resolution=img_size,\n activation=activation)\n\n patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = patches_resolution\n\n # Stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # Build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n kwargs = dict(\n dim=embed_dims[i_layer],\n input_resolution=(patches_resolution[0] // (2 ** (i_layer - 1 if i_layer == 3 else i_layer)),\n patches_resolution[1] // (2 ** (i_layer - 1 if i_layer == 3 else i_layer))),\n # input_resolution=(patches_resolution[0] // (2 ** i_layer),\n # patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint,\n out_dim=embed_dims[min(i_layer + 1,\n len(embed_dims) - 1)],\n activation=activation,\n )\n if i_layer == 0:\n layer = ConvLayer(conv_expand_ratio=mbconv_expand_ratio, **kwargs)\n else:\n layer = BasicLayer(num_heads=num_heads[i_layer],\n window_size=window_sizes[i_layer],\n mlp_ratio=self.mlp_ratio,\n drop=drop_rate,\n local_conv_size=local_conv_size,\n **kwargs)\n self.layers.append(layer)\n\n # Classifier head\n self.norm_head = nn.LayerNorm(embed_dims[-1])\n self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity()\n\n # Init weights\n self.apply(self._init_weights)\n self.set_layer_lr_decay(layer_lr_decay)\n self.neck = nn.Sequential(\n nn.Conv2d(\n embed_dims[-1],\n 256,\n kernel_size=1,\n bias=False,\n ),\n LayerNorm2d(256),\n nn.Conv2d(\n 256,\n 256,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n LayerNorm2d(256),\n )\n\n def set_layer_lr_decay(self, layer_lr_decay):\n \"\"\"Sets the learning rate decay for each layer in the TinyViT model.\"\"\"\n decay_rate = layer_lr_decay\n\n # Layers -> blocks (depth)\n depth = sum(self.depths)\n lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)]\n\n def _set_lr_scale(m, scale):\n \"\"\"Sets the learning rate scale for each layer in the model based on the layer's depth.\"\"\"\n for p in m.parameters():\n p.lr_scale = scale\n\n self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0]))\n i = 0\n for layer in self.layers:\n for block in layer.blocks:\n block.apply(lambda x: _set_lr_scale(x, lr_scales[i]))\n i += 1\n if layer.downsample is not None:\n layer.downsample.apply(lambda x: _set_lr_scale(x, lr_scales[i - 1]))\n assert i == depth\n for m in [self.norm_head, self.head]:\n m.apply(lambda x: _set_lr_scale(x, lr_scales[-1]))\n\n for k, p in self.named_parameters():\n p.param_name = k\n\n def _check_lr_scale(m):\n \"\"\"Checks if the learning rate scale attribute is present in module's parameters.\"\"\"\n for p in m.parameters():\n assert hasattr(p, 'lr_scale'), p.param_name\n\n self.apply(_check_lr_scale)\n\n def _init_weights(self, m):\n \"\"\"Initializes weights for linear layers and layer normalization in the given module.\"\"\"\n if isinstance(m, nn.Linear):\n # NOTE: This initialization is needed only for training.\n # trunc_normal_(m.weight, std=.02)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n \"\"\"Returns a dictionary of parameter names where weight decay should not be applied.\"\"\"\n return {'attention_biases'}\n\n def forward_features(self, x):\n \"\"\"Runs the input through the model layers and returns the transformed output.\"\"\"\n x = self.patch_embed(x) # x input is (N, C, H, W)\n\n x = self.layers[0](x)\n start_i = 1\n\n for i in range(start_i, len(self.layers)):\n layer = self.layers[i]\n x = layer(x)\n B, _, C = x.size()\n x = x.view(B, 64, 64, C)\n x = x.permute(0, 3, 1, 2)\n return self.neck(x)\n\n def forward(self, x):\n \"\"\"Executes a forward pass on the input tensor through the constructed model layers.\"\"\"\n return self.forward_features(x)"
},
{
"identifier": "TwoWayTransformer",
"path": "ultralytics/models/sam/modules/transformer.py",
"snippet": "class TwoWayTransformer(nn.Module):\n \"\"\"\n A Two-Way Transformer module that enables the simultaneous attention to both image and query points. This class\n serves as a specialized transformer decoder that attends to an input image using queries whose positional embedding\n is supplied. This is particularly useful for tasks like object detection, image segmentation, and point cloud\n processing.\n\n Attributes:\n depth (int): The number of layers in the transformer.\n embedding_dim (int): The channel dimension for the input embeddings.\n num_heads (int): The number of heads for multihead attention.\n mlp_dim (int): The internal channel dimension for the MLP block.\n layers (nn.ModuleList): The list of TwoWayAttentionBlock layers that make up the transformer.\n final_attn_token_to_image (Attention): The final attention layer applied from the queries to the image.\n norm_final_attn (nn.LayerNorm): The layer normalization applied to the final queries.\n \"\"\"\n\n def __init__(\n self,\n depth: int,\n embedding_dim: int,\n num_heads: int,\n mlp_dim: int,\n activation: Type[nn.Module] = nn.ReLU,\n attention_downsample_rate: int = 2,\n ) -> None:\n \"\"\"\n A transformer decoder that attends to an input image using queries whose positional embedding is supplied.\n\n Args:\n depth (int): number of layers in the transformer\n embedding_dim (int): the channel dimension for the input embeddings\n num_heads (int): the number of heads for multihead attention. Must\n divide embedding_dim\n mlp_dim (int): the channel dimension internal to the MLP block\n activation (nn.Module): the activation to use in the MLP block\n \"\"\"\n super().__init__()\n self.depth = depth\n self.embedding_dim = embedding_dim\n self.num_heads = num_heads\n self.mlp_dim = mlp_dim\n self.layers = nn.ModuleList()\n\n for i in range(depth):\n self.layers.append(\n TwoWayAttentionBlock(\n embedding_dim=embedding_dim,\n num_heads=num_heads,\n mlp_dim=mlp_dim,\n activation=activation,\n attention_downsample_rate=attention_downsample_rate,\n skip_first_layer_pe=(i == 0),\n ))\n\n self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)\n self.norm_final_attn = nn.LayerNorm(embedding_dim)\n\n def forward(\n self,\n image_embedding: Tensor,\n image_pe: Tensor,\n point_embedding: Tensor,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"\n Args:\n image_embedding (torch.Tensor): image to attend to. Should be shape B x embedding_dim x h x w for any h and w.\n image_pe (torch.Tensor): the positional encoding to add to the image. Must have same shape as image_embedding.\n point_embedding (torch.Tensor): the embedding to add to the query points.\n Must have shape B x N_points x embedding_dim for any N_points.\n\n Returns:\n (torch.Tensor): the processed point_embedding\n (torch.Tensor): the processed image_embedding\n \"\"\"\n # BxCxHxW -> BxHWxC == B x N_image_tokens x C\n bs, c, h, w = image_embedding.shape\n image_embedding = image_embedding.flatten(2).permute(0, 2, 1)\n image_pe = image_pe.flatten(2).permute(0, 2, 1)\n\n # Prepare queries\n queries = point_embedding\n keys = image_embedding\n\n # Apply transformer blocks and final layernorm\n for layer in self.layers:\n queries, keys = layer(\n queries=queries,\n keys=keys,\n query_pe=point_embedding,\n key_pe=image_pe,\n )\n\n # Apply the final attention layer from the points to the image\n q = queries + point_embedding\n k = keys + image_pe\n attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)\n queries = queries + attn_out\n queries = self.norm_final_attn(queries)\n\n return queries, keys"
}
] | from functools import partial
from ultralytics.utils.downloads import attempt_download_asset
from .modules.decoders import MaskDecoder
from .modules.encoders import ImageEncoderViT, PromptEncoder
from .modules.sam import Sam
from .modules.tiny_encoder import TinyViT
from .modules.transformer import TwoWayTransformer
import torch | 9,754 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(checkpoint=None):
"""Build and return a Segment Anything Model (SAM) h-size model."""
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
def build_sam_vit_l(checkpoint=None):
"""Build and return a Segment Anything Model (SAM) l-size model."""
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
"""Build and return a Segment Anything Model (SAM) b-size model."""
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
def build_mobile_sam(checkpoint=None):
"""Build and return Mobile Segment Anything Model (Mobile-SAM)."""
return _build_sam(
encoder_embed_dim=[64, 128, 160, 320],
encoder_depth=[2, 2, 6, 2],
encoder_num_heads=[2, 4, 5, 10],
encoder_global_attn_indexes=None,
mobile_sam=True,
checkpoint=checkpoint,
)
def _build_sam(encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
mobile_sam=False):
"""Builds the selected SAM model architecture."""
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
image_encoder = (TinyViT(
img_size=1024,
in_chans=3,
num_classes=1000,
embed_dims=encoder_embed_dim,
depths=encoder_depth,
num_heads=encoder_num_heads,
window_sizes=[7, 7, 14, 7],
mlp_ratio=4.0,
drop_rate=0.0,
drop_path_rate=0.0,
use_checkpoint=False,
mbconv_expand_ratio=4.0,
local_conv_size=3,
layer_lr_decay=0.8,
) if mobile_sam else ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
))
sam = Sam(
image_encoder=image_encoder,
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
),
pixel_mean=[123.675, 116.28, 103.53],
pixel_std=[58.395, 57.12, 57.375],
)
if checkpoint is not None:
| # Ultralytics YOLO 🚀, AGPL-3.0 license
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(checkpoint=None):
"""Build and return a Segment Anything Model (SAM) h-size model."""
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
def build_sam_vit_l(checkpoint=None):
"""Build and return a Segment Anything Model (SAM) l-size model."""
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
"""Build and return a Segment Anything Model (SAM) b-size model."""
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
def build_mobile_sam(checkpoint=None):
"""Build and return Mobile Segment Anything Model (Mobile-SAM)."""
return _build_sam(
encoder_embed_dim=[64, 128, 160, 320],
encoder_depth=[2, 2, 6, 2],
encoder_num_heads=[2, 4, 5, 10],
encoder_global_attn_indexes=None,
mobile_sam=True,
checkpoint=checkpoint,
)
def _build_sam(encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
mobile_sam=False):
"""Builds the selected SAM model architecture."""
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
image_encoder = (TinyViT(
img_size=1024,
in_chans=3,
num_classes=1000,
embed_dims=encoder_embed_dim,
depths=encoder_depth,
num_heads=encoder_num_heads,
window_sizes=[7, 7, 14, 7],
mlp_ratio=4.0,
drop_rate=0.0,
drop_path_rate=0.0,
use_checkpoint=False,
mbconv_expand_ratio=4.0,
local_conv_size=3,
layer_lr_decay=0.8,
) if mobile_sam else ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
))
sam = Sam(
image_encoder=image_encoder,
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
),
pixel_mean=[123.675, 116.28, 103.53],
pixel_std=[58.395, 57.12, 57.375],
)
if checkpoint is not None: | checkpoint = attempt_download_asset(checkpoint) | 0 | 2023-11-16 12:49:59+00:00 | 12k |
i-super/Saleor | saleor/graphql/account/tests/mutations/staff/test_customer_delete.py | [
{
"identifier": "WebhookEventAsyncType",
"path": "saleor/webhook/event_types.py",
"snippet": "class WebhookEventAsyncType:\n ANY = \"any_events\"\n\n ACCOUNT_CONFIRMATION_REQUESTED = \"account_confirmation_requested\"\n ACCOUNT_EMAIL_CHANGED = \"account_email_changed\"\n ACCOUNT_CHANGE_EMAIL_REQUESTED = \"account_change_email_requested\"\n ACCOUNT_SET_PASSWORD_REQUESTED = \"account_set_password_requested\"\n ACCOUNT_CONFIRMED = \"account_confirmed\"\n ACCOUNT_DELETE_REQUESTED = \"account_delete_requested\"\n ACCOUNT_DELETED = \"account_deleted\"\n\n ADDRESS_CREATED = \"address_created\"\n ADDRESS_UPDATED = \"address_updated\"\n ADDRESS_DELETED = \"address_deleted\"\n\n APP_INSTALLED = \"app_installed\"\n APP_UPDATED = \"app_updated\"\n APP_DELETED = \"app_deleted\"\n APP_STATUS_CHANGED = \"app_status_changed\"\n\n ATTRIBUTE_CREATED = \"attribute_created\"\n ATTRIBUTE_UPDATED = \"attribute_updated\"\n ATTRIBUTE_DELETED = \"attribute_deleted\"\n\n ATTRIBUTE_VALUE_CREATED = \"attribute_value_created\"\n ATTRIBUTE_VALUE_UPDATED = \"attribute_value_updated\"\n ATTRIBUTE_VALUE_DELETED = \"attribute_value_deleted\"\n\n CATEGORY_CREATED = \"category_created\"\n CATEGORY_UPDATED = \"category_updated\"\n CATEGORY_DELETED = \"category_deleted\"\n\n CHANNEL_CREATED = \"channel_created\"\n CHANNEL_UPDATED = \"channel_updated\"\n CHANNEL_DELETED = \"channel_deleted\"\n CHANNEL_STATUS_CHANGED = \"channel_status_changed\"\n CHANNEL_METADATA_UPDATED = \"channel_metadata_updated\"\n\n GIFT_CARD_CREATED = \"gift_card_created\"\n GIFT_CARD_UPDATED = \"gift_card_updated\"\n GIFT_CARD_DELETED = \"gift_card_deleted\"\n GIFT_CARD_SENT = \"gift_card_sent\"\n GIFT_CARD_STATUS_CHANGED = \"gift_card_status_changed\"\n GIFT_CARD_METADATA_UPDATED = \"gift_card_metadata_updated\"\n GIFT_CARD_EXPORT_COMPLETED = \"gift_card_export_completed\"\n\n MENU_CREATED = \"menu_created\"\n MENU_UPDATED = \"menu_updated\"\n MENU_DELETED = \"menu_deleted\"\n MENU_ITEM_CREATED = \"menu_item_created\"\n MENU_ITEM_UPDATED = \"menu_item_updated\"\n MENU_ITEM_DELETED = \"menu_item_deleted\"\n\n ORDER_CREATED = \"order_created\"\n ORDER_CONFIRMED = \"order_confirmed\"\n ORDER_PAID = \"order_paid\"\n ORDER_FULLY_PAID = \"order_fully_paid\"\n ORDER_REFUNDED = \"order_refunded\"\n ORDER_FULLY_REFUNDED = \"order_fully_refunded\"\n ORDER_UPDATED = \"order_updated\"\n ORDER_CANCELLED = \"order_cancelled\"\n ORDER_EXPIRED = \"order_expired\"\n ORDER_FULFILLED = \"order_fulfilled\"\n ORDER_METADATA_UPDATED = \"order_metadata_updated\"\n ORDER_BULK_CREATED = \"order_bulk_created\"\n\n FULFILLMENT_CREATED = \"fulfillment_created\"\n FULFILLMENT_CANCELED = \"fulfillment_canceled\"\n FULFILLMENT_APPROVED = \"fulfillment_approved\"\n FULFILLMENT_METADATA_UPDATED = \"fulfillment_metadata_updated\"\n FULFILLMENT_TRACKING_NUMBER_UPDATED = \"fulfillment_tracking_number_updated\"\n\n DRAFT_ORDER_CREATED = \"draft_order_created\"\n DRAFT_ORDER_UPDATED = \"draft_order_updated\"\n DRAFT_ORDER_DELETED = \"draft_order_deleted\"\n\n SALE_CREATED = \"sale_created\"\n SALE_UPDATED = \"sale_updated\"\n SALE_DELETED = \"sale_deleted\"\n SALE_TOGGLE = \"sale_toggle\"\n\n PROMOTION_CREATED = \"promotion_created\"\n PROMOTION_UPDATED = \"promotion_updated\"\n PROMOTION_DELETED = \"promotion_deleted\"\n PROMOTION_STARTED = \"promotion_started\"\n PROMOTION_ENDED = \"promotion_ended\"\n\n PROMOTION_RULE_CREATED = \"promotion_rule_created\"\n PROMOTION_RULE_UPDATED = \"promotion_rule_updated\"\n PROMOTION_RULE_DELETED = \"promotion_rule_deleted\"\n\n INVOICE_REQUESTED = \"invoice_requested\"\n INVOICE_DELETED = \"invoice_deleted\"\n INVOICE_SENT = \"invoice_sent\"\n\n CUSTOMER_CREATED = \"customer_created\"\n CUSTOMER_UPDATED = \"customer_updated\"\n CUSTOMER_DELETED = \"customer_deleted\"\n CUSTOMER_METADATA_UPDATED = \"customer_metadata_updated\"\n\n COLLECTION_CREATED = \"collection_created\"\n COLLECTION_UPDATED = \"collection_updated\"\n COLLECTION_DELETED = \"collection_deleted\"\n COLLECTION_METADATA_UPDATED = \"collection_metadata_updated\"\n\n PRODUCT_CREATED = \"product_created\"\n PRODUCT_UPDATED = \"product_updated\"\n PRODUCT_DELETED = \"product_deleted\"\n PRODUCT_METADATA_UPDATED = \"product_metadata_updated\"\n PRODUCT_EXPORT_COMPLETED = \"product_export_completed\"\n\n PRODUCT_MEDIA_CREATED = \"product_media_created\"\n PRODUCT_MEDIA_UPDATED = \"product_media_updated\"\n PRODUCT_MEDIA_DELETED = \"product_media_deleted\"\n\n PRODUCT_VARIANT_CREATED = \"product_variant_created\"\n PRODUCT_VARIANT_UPDATED = \"product_variant_updated\"\n PRODUCT_VARIANT_DELETED = \"product_variant_deleted\"\n PRODUCT_VARIANT_METADATA_UPDATED = \"product_variant_metadata_updated\"\n\n PRODUCT_VARIANT_OUT_OF_STOCK = \"product_variant_out_of_stock\"\n PRODUCT_VARIANT_BACK_IN_STOCK = \"product_variant_back_in_stock\"\n PRODUCT_VARIANT_STOCK_UPDATED = \"product_variant_stock_updated\"\n\n CHECKOUT_CREATED = \"checkout_created\"\n CHECKOUT_UPDATED = \"checkout_updated\"\n CHECKOUT_FULLY_PAID = \"checkout_fully_paid\"\n CHECKOUT_METADATA_UPDATED = \"checkout_metadata_updated\"\n\n NOTIFY_USER = \"notify_user\" # deprecated\n\n PAGE_CREATED = \"page_created\"\n PAGE_UPDATED = \"page_updated\"\n PAGE_DELETED = \"page_deleted\"\n\n PAGE_TYPE_CREATED = \"page_type_created\"\n PAGE_TYPE_UPDATED = \"page_type_updated\"\n PAGE_TYPE_DELETED = \"page_type_deleted\"\n\n PERMISSION_GROUP_CREATED = \"permission_group_created\"\n PERMISSION_GROUP_UPDATED = \"permission_group_updated\"\n PERMISSION_GROUP_DELETED = \"permission_group_deleted\"\n\n SHIPPING_PRICE_CREATED = \"shipping_price_created\"\n SHIPPING_PRICE_UPDATED = \"shipping_price_updated\"\n SHIPPING_PRICE_DELETED = \"shipping_price_deleted\"\n\n SHIPPING_ZONE_CREATED = \"shipping_zone_created\"\n SHIPPING_ZONE_UPDATED = \"shipping_zone_updated\"\n SHIPPING_ZONE_DELETED = \"shipping_zone_deleted\"\n SHIPPING_ZONE_METADATA_UPDATED = \"shipping_zone_metadata_updated\"\n\n STAFF_CREATED = \"staff_created\"\n STAFF_UPDATED = \"staff_updated\"\n STAFF_DELETED = \"staff_deleted\"\n STAFF_SET_PASSWORD_REQUESTED = \"staff_set_password_requested\"\n\n TRANSACTION_ITEM_METADATA_UPDATED = \"transaction_item_metadata_updated\"\n\n TRANSLATION_CREATED = \"translation_created\"\n TRANSLATION_UPDATED = \"translation_updated\"\n\n WAREHOUSE_CREATED = \"warehouse_created\"\n WAREHOUSE_UPDATED = \"warehouse_updated\"\n WAREHOUSE_DELETED = \"warehouse_deleted\"\n WAREHOUSE_METADATA_UPDATED = \"warehouse_metadata_updated\"\n\n VOUCHER_CREATED = \"voucher_created\"\n VOUCHER_UPDATED = \"voucher_updated\"\n VOUCHER_DELETED = \"voucher_deleted\"\n VOUCHER_METADATA_UPDATED = \"voucher_metadata_updated\"\n VOUCHER_CODE_EXPORT_COMPLETED = \"voucher_code_export_completed\"\n\n OBSERVABILITY = \"observability\"\n\n THUMBNAIL_CREATED = \"thumbnail_created\"\n\n SHOP_METADATA_UPDATED = \"shop_metadata_updated\"\n\n EVENT_MAP: dict[str, dict[str, Any]] = {\n ACCOUNT_CONFIRMATION_REQUESTED: {\n \"name\": \"Account confirmation requested\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ACCOUNT_CHANGE_EMAIL_REQUESTED: {\n \"name\": \"Account change email requested\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ACCOUNT_EMAIL_CHANGED: {\n \"name\": \"Account email changed\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ACCOUNT_SET_PASSWORD_REQUESTED: {\n \"name\": \"Account set password requested\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ACCOUNT_CONFIRMED: {\n \"name\": \"Account confirmed\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ACCOUNT_DELETE_REQUESTED: {\n \"name\": \"Account delete requested\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ACCOUNT_DELETED: {\n \"name\": \"Account delete confirmed\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ADDRESS_CREATED: {\n \"name\": \"Address created\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ADDRESS_UPDATED: {\n \"name\": \"Address updated\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ADDRESS_DELETED: {\n \"name\": \"Address deleted\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n APP_INSTALLED: {\n \"name\": \"App created\",\n \"permission\": AppPermission.MANAGE_APPS,\n },\n APP_UPDATED: {\n \"name\": \"App updated\",\n \"permission\": AppPermission.MANAGE_APPS,\n },\n APP_DELETED: {\n \"name\": \"App deleted\",\n \"permission\": AppPermission.MANAGE_APPS,\n },\n APP_STATUS_CHANGED: {\n \"name\": \"App status changed\",\n \"permission\": AppPermission.MANAGE_APPS,\n },\n ATTRIBUTE_CREATED: {\n \"name\": \"Attribute created\",\n \"permission\": None,\n },\n ATTRIBUTE_UPDATED: {\n \"name\": \"Attribute updated\",\n \"permission\": None,\n },\n ATTRIBUTE_DELETED: {\n \"name\": \"Attribute deleted\",\n \"permission\": None,\n },\n ATTRIBUTE_VALUE_CREATED: {\n \"name\": \"Attribute value created\",\n \"permission\": None,\n },\n ATTRIBUTE_VALUE_UPDATED: {\n \"name\": \"Attribute value updated\",\n \"permission\": None,\n },\n ATTRIBUTE_VALUE_DELETED: {\n \"name\": \"Attribute value deleted\",\n \"permission\": None,\n },\n CATEGORY_CREATED: {\n \"name\": \"Category created\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n CATEGORY_UPDATED: {\n \"name\": \"Category updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n CATEGORY_DELETED: {\n \"name\": \"Category deleted\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n CHANNEL_CREATED: {\n \"name\": \"Channel created\",\n \"permission\": ChannelPermissions.MANAGE_CHANNELS,\n },\n CHANNEL_UPDATED: {\n \"name\": \"Channel updated\",\n \"permission\": ChannelPermissions.MANAGE_CHANNELS,\n },\n CHANNEL_DELETED: {\n \"name\": \"Channel deleted\",\n \"permission\": ChannelPermissions.MANAGE_CHANNELS,\n },\n CHANNEL_STATUS_CHANGED: {\n \"name\": \"Channel status changed\",\n \"permission\": ChannelPermissions.MANAGE_CHANNELS,\n },\n CHANNEL_METADATA_UPDATED: {\n \"name\": \"Channel metadata updated\",\n \"permission\": ChannelPermissions.MANAGE_CHANNELS,\n },\n GIFT_CARD_CREATED: {\n \"name\": \"Gift card created\",\n \"permission\": GiftcardPermissions.MANAGE_GIFT_CARD,\n },\n GIFT_CARD_UPDATED: {\n \"name\": \"Gift card updated\",\n \"permission\": GiftcardPermissions.MANAGE_GIFT_CARD,\n },\n GIFT_CARD_DELETED: {\n \"name\": \"Gift card deleted\",\n \"permission\": GiftcardPermissions.MANAGE_GIFT_CARD,\n },\n GIFT_CARD_SENT: {\n \"name\": \"Gift card sent\",\n \"permission\": GiftcardPermissions.MANAGE_GIFT_CARD,\n },\n GIFT_CARD_STATUS_CHANGED: {\n \"name\": \"Gift card status changed\",\n \"permission\": GiftcardPermissions.MANAGE_GIFT_CARD,\n },\n GIFT_CARD_METADATA_UPDATED: {\n \"name\": \"Gift card metadata updated\",\n \"permission\": GiftcardPermissions.MANAGE_GIFT_CARD,\n },\n GIFT_CARD_EXPORT_COMPLETED: {\n \"name\": \"Gift card export completed\",\n \"permission\": GiftcardPermissions.MANAGE_GIFT_CARD,\n },\n MENU_CREATED: {\n \"name\": \"Menu created\",\n \"permission\": MenuPermissions.MANAGE_MENUS,\n },\n MENU_UPDATED: {\n \"name\": \"Menu updated\",\n \"permission\": MenuPermissions.MANAGE_MENUS,\n },\n MENU_DELETED: {\n \"name\": \"Menu deleted\",\n \"permission\": MenuPermissions.MANAGE_MENUS,\n },\n MENU_ITEM_CREATED: {\n \"name\": \"Menu item created\",\n \"permission\": MenuPermissions.MANAGE_MENUS,\n },\n MENU_ITEM_UPDATED: {\n \"name\": \"Menu item updated\",\n \"permission\": MenuPermissions.MANAGE_MENUS,\n },\n MENU_ITEM_DELETED: {\n \"name\": \"Menu item deleted\",\n \"permission\": MenuPermissions.MANAGE_MENUS,\n },\n ORDER_CREATED: {\n \"name\": \"Order created\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_CONFIRMED: {\n \"name\": \"Order confirmed\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_PAID: {\n \"name\": \"Order paid\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_FULLY_PAID: {\n \"name\": \"Order fully paid\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_REFUNDED: {\n \"name\": \"Order refunded\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_FULLY_REFUNDED: {\n \"name\": \"Order fully refunded\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_UPDATED: {\n \"name\": \"Order updated\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_CANCELLED: {\n \"name\": \"Order cancelled\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_EXPIRED: {\n \"name\": \"Order expired\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_FULFILLED: {\n \"name\": \"Order fulfilled\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_METADATA_UPDATED: {\n \"name\": \"Order metadata updated\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_BULK_CREATED: {\n \"name\": \"Order bulk created\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n FULFILLMENT_CREATED: {\n \"name\": \"Fulfillment created\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n FULFILLMENT_CANCELED: {\n \"name\": \"Fulfillment cancelled\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n FULFILLMENT_APPROVED: {\n \"name\": \"Fulfillment approved\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n FULFILLMENT_METADATA_UPDATED: {\n \"name\": \"Fulfillment metadata updated\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n FULFILLMENT_TRACKING_NUMBER_UPDATED: {\n \"name\": \"Fulfillment tracking number updated.\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n DRAFT_ORDER_CREATED: {\n \"name\": \"Draft order created\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n DRAFT_ORDER_UPDATED: {\n \"name\": \"Draft order updated\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n DRAFT_ORDER_DELETED: {\n \"name\": \"Draft order deleted\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n SALE_CREATED: {\n \"name\": \"Sale created\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n SALE_UPDATED: {\n \"name\": \"Sale updated\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n SALE_DELETED: {\n \"name\": \"Sale deleted\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n SALE_TOGGLE: {\n \"name\": \"Sale toggle\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_CREATED: {\n \"name\": \"Promotion created\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_UPDATED: {\n \"name\": \"Promotion updated\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_DELETED: {\n \"name\": \"Promotion deleted\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_STARTED: {\n \"name\": \"Promotion started\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_ENDED: {\n \"name\": \"Promotion ended\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_RULE_CREATED: {\n \"name\": \"Promotion rule created\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_RULE_UPDATED: {\n \"name\": \"Promotion rule updated\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_RULE_DELETED: {\n \"name\": \"Promotion rule deleted\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n INVOICE_REQUESTED: {\n \"name\": \"Invoice requested\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n INVOICE_DELETED: {\n \"name\": \"Invoice deleted\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n INVOICE_SENT: {\n \"name\": \"Invoice sent\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n CUSTOMER_CREATED: {\n \"name\": \"Customer created\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n CUSTOMER_UPDATED: {\n \"name\": \"Customer updated\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n CUSTOMER_DELETED: {\n \"name\": \"Customer deleted\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n CUSTOMER_METADATA_UPDATED: {\n \"name\": \"Customer metadata updated\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n COLLECTION_CREATED: {\n \"name\": \"Collection created\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n COLLECTION_UPDATED: {\n \"name\": \"Collection updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n COLLECTION_DELETED: {\n \"name\": \"Collection deleted\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n COLLECTION_METADATA_UPDATED: {\n \"name\": \"Collection metadata updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_CREATED: {\n \"name\": \"Product created\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_UPDATED: {\n \"name\": \"Product updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_DELETED: {\n \"name\": \"Product deleted\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_METADATA_UPDATED: {\n \"name\": \"Product metadata updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_EXPORT_COMPLETED: {\n \"name\": \"Product export completed\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_MEDIA_CREATED: {\n \"name\": \"Product media created\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_MEDIA_UPDATED: {\n \"name\": \"Product media updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_MEDIA_DELETED: {\n \"name\": \"Product media deleted\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_VARIANT_CREATED: {\n \"name\": \"Product variant created\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_VARIANT_UPDATED: {\n \"name\": \"Product variant updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_VARIANT_DELETED: {\n \"name\": \"Product variant deleted\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_VARIANT_METADATA_UPDATED: {\n \"name\": \"Product variant metadata updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_VARIANT_OUT_OF_STOCK: {\n \"name\": \"Product variant stock changed\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_VARIANT_BACK_IN_STOCK: {\n \"name\": \"Product variant back in stock\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_VARIANT_STOCK_UPDATED: {\n \"name\": \"Product variant stock updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n CHECKOUT_CREATED: {\n \"name\": \"Checkout created\",\n \"permission\": CheckoutPermissions.MANAGE_CHECKOUTS,\n },\n CHECKOUT_UPDATED: {\n \"name\": \"Checkout updated\",\n \"permission\": CheckoutPermissions.MANAGE_CHECKOUTS,\n },\n CHECKOUT_FULLY_PAID: {\n \"name\": \"Checkout fully paid\",\n \"permission\": CheckoutPermissions.MANAGE_CHECKOUTS,\n },\n CHECKOUT_METADATA_UPDATED: {\n \"name\": \"Checkout metadata updated\",\n \"permission\": CheckoutPermissions.MANAGE_CHECKOUTS,\n },\n NOTIFY_USER: {\n \"name\": \"Notify user\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n PAGE_CREATED: {\n \"name\": \"Page created\",\n \"permission\": PagePermissions.MANAGE_PAGES,\n },\n PAGE_UPDATED: {\n \"name\": \"Page updated\",\n \"permission\": PagePermissions.MANAGE_PAGES,\n },\n PAGE_DELETED: {\n \"name\": \"Page deleted\",\n \"permission\": PagePermissions.MANAGE_PAGES,\n },\n PAGE_TYPE_CREATED: {\n \"name\": \"Page type created\",\n \"permission\": PageTypePermissions.MANAGE_PAGE_TYPES_AND_ATTRIBUTES,\n },\n PAGE_TYPE_UPDATED: {\n \"name\": \"Page type updated\",\n \"permission\": PageTypePermissions.MANAGE_PAGE_TYPES_AND_ATTRIBUTES,\n },\n PAGE_TYPE_DELETED: {\n \"name\": \"Page type deleted\",\n \"permission\": PageTypePermissions.MANAGE_PAGE_TYPES_AND_ATTRIBUTES,\n },\n PERMISSION_GROUP_CREATED: {\n \"name\": \"Permission group created\",\n \"permission\": AccountPermissions.MANAGE_STAFF,\n },\n PERMISSION_GROUP_UPDATED: {\n \"name\": \"Permission group updated\",\n \"permission\": AccountPermissions.MANAGE_STAFF,\n },\n PERMISSION_GROUP_DELETED: {\n \"name\": \"Permission group deleted\",\n \"permission\": AccountPermissions.MANAGE_STAFF,\n },\n SHIPPING_PRICE_CREATED: {\n \"name\": \"Shipping price created\",\n \"permission\": ShippingPermissions.MANAGE_SHIPPING,\n },\n SHIPPING_PRICE_UPDATED: {\n \"name\": \"Shipping price updated\",\n \"permission\": ShippingPermissions.MANAGE_SHIPPING,\n },\n SHIPPING_PRICE_DELETED: {\n \"name\": \"Shipping price deleted\",\n \"permission\": ShippingPermissions.MANAGE_SHIPPING,\n },\n SHIPPING_ZONE_CREATED: {\n \"name\": \"Shipping zone created\",\n \"permission\": ShippingPermissions.MANAGE_SHIPPING,\n },\n SHIPPING_ZONE_UPDATED: {\n \"name\": \"Shipping zone updated\",\n \"permission\": ShippingPermissions.MANAGE_SHIPPING,\n },\n SHIPPING_ZONE_DELETED: {\n \"name\": \"Shipping zone deleted\",\n \"permission\": ShippingPermissions.MANAGE_SHIPPING,\n },\n SHIPPING_ZONE_METADATA_UPDATED: {\n \"name\": \"Shipping zone metadata updated\",\n \"permission\": ShippingPermissions.MANAGE_SHIPPING,\n },\n STAFF_CREATED: {\n \"name\": \"Staff created\",\n \"permission\": AccountPermissions.MANAGE_STAFF,\n },\n STAFF_UPDATED: {\n \"name\": \"Staff updated\",\n \"permission\": AccountPermissions.MANAGE_STAFF,\n },\n STAFF_DELETED: {\n \"name\": \"Staff deleted\",\n \"permission\": AccountPermissions.MANAGE_STAFF,\n },\n STAFF_SET_PASSWORD_REQUESTED: {\n \"name\": \"Setting a password for a staff is requested\",\n \"permission\": AccountPermissions.MANAGE_STAFF,\n },\n TRANSACTION_ITEM_METADATA_UPDATED: {\n \"name\": \"Transaction item metadata updated\",\n \"permission\": PaymentPermissions.HANDLE_PAYMENTS,\n },\n TRANSLATION_CREATED: {\n \"name\": \"Translation created\",\n \"permission\": SitePermissions.MANAGE_TRANSLATIONS,\n },\n TRANSLATION_UPDATED: {\n \"name\": \"Translation updated\",\n \"permission\": SitePermissions.MANAGE_TRANSLATIONS,\n },\n WAREHOUSE_CREATED: {\n \"name\": \"Warehouse created\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n WAREHOUSE_UPDATED: {\n \"name\": \"Warehouse updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n WAREHOUSE_DELETED: {\n \"name\": \"Warehouse deleted\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n WAREHOUSE_METADATA_UPDATED: {\n \"name\": \"Warehouse metadata updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n VOUCHER_CREATED: {\n \"name\": \"Voucher created\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n VOUCHER_UPDATED: {\n \"name\": \"Voucher updated\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n VOUCHER_DELETED: {\n \"name\": \"Voucher deleted\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n VOUCHER_METADATA_UPDATED: {\n \"name\": \"Voucher metadata updated\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n VOUCHER_CODE_EXPORT_COMPLETED: {\n \"name\": \"Voucher code export completed\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n OBSERVABILITY: {\n \"name\": \"Observability\",\n \"permission\": AppPermission.MANAGE_OBSERVABILITY,\n },\n THUMBNAIL_CREATED: {\n \"name\": \"Thumbnail created\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n SHOP_METADATA_UPDATED: {\n \"name\": \"Shop metadata updated\",\n \"permission\": SitePermissions.MANAGE_SETTINGS,\n },\n }\n\n CHOICES = [\n (ANY, \"Any events\"),\n ] + [\n (event_name, event_data[\"name\"]) for event_name, event_data in EVENT_MAP.items()\n ]\n PERMISSIONS: dict[str, Optional[BasePermissionEnum]] = {\n event_name: event_data[\"permission\"]\n for event_name, event_data in EVENT_MAP.items()\n }\n\n ALL = [event[0] for event in CHOICES]"
},
{
"identifier": "get_graphql_content",
"path": "saleor/graphql/tests/utils.py",
"snippet": "def get_graphql_content(response, *, ignore_errors: bool = False):\n \"\"\"Extract GraphQL content from the API response.\n\n Optionally ignore protocol-level errors, eg. schema errors or lack of\n permissions.\n \"\"\"\n content = get_graphql_content_from_response(response)\n if not ignore_errors:\n assert \"errors\" not in content, content[\"errors\"]\n return content"
},
{
"identifier": "CustomerDelete",
"path": "saleor/graphql/account/mutations/staff/customer_delete.py",
"snippet": "class CustomerDelete(CustomerDeleteMixin, UserDelete):\n class Meta:\n description = \"Deletes a customer.\"\n doc_category = DOC_CATEGORY_USERS\n model = models.User\n object_type = User\n permissions = (AccountPermissions.MANAGE_USERS,)\n error_type_class = AccountError\n error_type_field = \"account_errors\"\n webhook_events_info = [\n WebhookEventInfo(\n type=WebhookEventAsyncType.CUSTOMER_DELETED,\n description=\"A customer account was deleted.\",\n )\n ]\n\n class Arguments:\n id = graphene.ID(required=False, description=\"ID of a customer to delete.\")\n external_reference = graphene.String(\n required=False,\n description=f\"External ID of a customer to update. {ADDED_IN_310}\",\n )\n\n @classmethod\n def perform_mutation(cls, root, info: ResolveInfo, /, **data):\n results = super().perform_mutation(root, info, **data)\n cls.post_process(info)\n return results\n\n @classmethod\n def post_save_action(cls, info: ResolveInfo, instance, cleaned_input):\n manager = get_plugin_manager_promise(info.context).get()\n cls.call_event(manager.customer_deleted, instance)"
}
] | from functools import partial
from unittest.mock import ANY, Mock, patch
from django.core.exceptions import ValidationError
from django.utils.functional import SimpleLazyObject
from freezegun import freeze_time
from ......webhook.event_types import WebhookEventAsyncType
from .....tests.utils import get_graphql_content
from ....mutations.staff import CustomerDelete
import graphene
import pytest | 7,788 | externalReference
}
}
}
"""
@patch("saleor.account.signals.delete_from_storage_task.delay")
@patch("saleor.graphql.account.mutations.base.account_events.customer_deleted_event")
def test_customer_delete(
mocked_deletion_event,
delete_from_storage_task_mock,
staff_api_client,
staff_user,
customer_user,
image,
permission_manage_users,
media_root,
):
query = CUSTOMER_DELETE_MUTATION
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
customer_user.avatar = image
customer_user.save(update_fields=["avatar"])
variables = {"id": customer_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
data = content["data"]["customerDelete"]
assert data["errors"] == []
assert data["user"]["id"] == customer_id
# Ensure the customer was properly deleted
# and any related event was properly triggered
mocked_deletion_event.assert_called_once_with(
staff_user=staff_user, app=None, deleted_count=1
)
delete_from_storage_task_mock.assert_called_once_with(customer_user.avatar.name)
@freeze_time("2018-05-31 12:00:01")
@patch("saleor.plugins.webhook.plugin.get_webhooks_for_event")
@patch("saleor.plugins.webhook.plugin.trigger_webhooks_async")
def test_customer_delete_trigger_webhook(
mocked_webhook_trigger,
mocked_get_webhooks_for_event,
any_webhook,
staff_api_client,
customer_user,
permission_manage_users,
settings,
):
# given
mocked_get_webhooks_for_event.return_value = [any_webhook]
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
variables = {"id": customer_id}
# when
response = staff_api_client.post_graphql(
CUSTOMER_DELETE_MUTATION, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
data = content["data"]["customerDelete"]
# then
assert data["errors"] == []
assert data["user"]["id"] == customer_id
mocked_webhook_trigger.assert_called_once_with(
None,
WebhookEventAsyncType.CUSTOMER_DELETED,
[any_webhook],
customer_user,
SimpleLazyObject(lambda: staff_api_client.user),
legacy_data_generator=ANY,
)
assert isinstance(
mocked_webhook_trigger.call_args.kwargs["legacy_data_generator"], partial
)
@patch("saleor.account.signals.delete_from_storage_task.delay")
@patch("saleor.graphql.account.mutations.base.account_events.customer_deleted_event")
def test_customer_delete_by_app(
mocked_deletion_event,
delete_from_storage_task_mock,
app_api_client,
app,
customer_user,
image,
permission_manage_users,
media_root,
):
query = CUSTOMER_DELETE_MUTATION
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
customer_user.avatar = image
customer_user.save(update_fields=["avatar"])
variables = {"id": customer_id}
response = app_api_client.post_graphql(
query, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
data = content["data"]["customerDelete"]
assert data["errors"] == []
assert data["user"]["id"] == customer_id
# Ensure the customer was properly deleted
# and any related event was properly triggered
assert mocked_deletion_event.call_count == 1
args, kwargs = mocked_deletion_event.call_args
assert kwargs["deleted_count"] == 1
assert kwargs["staff_user"] is None
assert kwargs["app"] == app
delete_from_storage_task_mock.assert_called_once_with(customer_user.avatar.name)
def test_customer_delete_errors(customer_user, admin_user, staff_user):
info = Mock(context=Mock(user=admin_user))
with pytest.raises(ValidationError) as e:
|
CUSTOMER_DELETE_MUTATION = """
mutation CustomerDelete($id: ID, $externalReference: String) {
customerDelete(id: $id, externalReference: $externalReference) {
errors {
field
message
}
user {
id
externalReference
}
}
}
"""
@patch("saleor.account.signals.delete_from_storage_task.delay")
@patch("saleor.graphql.account.mutations.base.account_events.customer_deleted_event")
def test_customer_delete(
mocked_deletion_event,
delete_from_storage_task_mock,
staff_api_client,
staff_user,
customer_user,
image,
permission_manage_users,
media_root,
):
query = CUSTOMER_DELETE_MUTATION
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
customer_user.avatar = image
customer_user.save(update_fields=["avatar"])
variables = {"id": customer_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
data = content["data"]["customerDelete"]
assert data["errors"] == []
assert data["user"]["id"] == customer_id
# Ensure the customer was properly deleted
# and any related event was properly triggered
mocked_deletion_event.assert_called_once_with(
staff_user=staff_user, app=None, deleted_count=1
)
delete_from_storage_task_mock.assert_called_once_with(customer_user.avatar.name)
@freeze_time("2018-05-31 12:00:01")
@patch("saleor.plugins.webhook.plugin.get_webhooks_for_event")
@patch("saleor.plugins.webhook.plugin.trigger_webhooks_async")
def test_customer_delete_trigger_webhook(
mocked_webhook_trigger,
mocked_get_webhooks_for_event,
any_webhook,
staff_api_client,
customer_user,
permission_manage_users,
settings,
):
# given
mocked_get_webhooks_for_event.return_value = [any_webhook]
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
variables = {"id": customer_id}
# when
response = staff_api_client.post_graphql(
CUSTOMER_DELETE_MUTATION, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
data = content["data"]["customerDelete"]
# then
assert data["errors"] == []
assert data["user"]["id"] == customer_id
mocked_webhook_trigger.assert_called_once_with(
None,
WebhookEventAsyncType.CUSTOMER_DELETED,
[any_webhook],
customer_user,
SimpleLazyObject(lambda: staff_api_client.user),
legacy_data_generator=ANY,
)
assert isinstance(
mocked_webhook_trigger.call_args.kwargs["legacy_data_generator"], partial
)
@patch("saleor.account.signals.delete_from_storage_task.delay")
@patch("saleor.graphql.account.mutations.base.account_events.customer_deleted_event")
def test_customer_delete_by_app(
mocked_deletion_event,
delete_from_storage_task_mock,
app_api_client,
app,
customer_user,
image,
permission_manage_users,
media_root,
):
query = CUSTOMER_DELETE_MUTATION
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
customer_user.avatar = image
customer_user.save(update_fields=["avatar"])
variables = {"id": customer_id}
response = app_api_client.post_graphql(
query, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
data = content["data"]["customerDelete"]
assert data["errors"] == []
assert data["user"]["id"] == customer_id
# Ensure the customer was properly deleted
# and any related event was properly triggered
assert mocked_deletion_event.call_count == 1
args, kwargs = mocked_deletion_event.call_args
assert kwargs["deleted_count"] == 1
assert kwargs["staff_user"] is None
assert kwargs["app"] == app
delete_from_storage_task_mock.assert_called_once_with(customer_user.avatar.name)
def test_customer_delete_errors(customer_user, admin_user, staff_user):
info = Mock(context=Mock(user=admin_user))
with pytest.raises(ValidationError) as e: | CustomerDelete.clean_instance(info, staff_user) | 2 | 2023-11-13 05:00:35+00:00 | 12k |
Aues6uen11Z/Zafkiel | zafkiel/ui/ui.py | [
{
"identifier": "ImageTemplate",
"path": "zafkiel/device/template.py",
"snippet": "class ImageTemplate(Template):\n def __init__(\n self,\n filename: str,\n record_pos: tuple = None,\n keyword: Keyword = None,\n threshold: float = None,\n target_pos: int = TargetPos.MID,\n resolution: tuple = (1280, 720),\n rgb: bool = False,\n scale_max: int = 800,\n scale_step: float = 0.005,\n template_path: str = 'templates'\n ):\n\n super().__init__(filename, threshold, target_pos, record_pos, resolution, rgb, scale_max, scale_step)\n\n self.template_path = template_path # under root path\n self.keyword = keyword\n if self.keyword is not None and self.keyword.name == '':\n \"\"\"\n Please note that due to the __post_init__ method of the Keyword class running before this 'name' assignment, \n its 'instances' dictionary will get a dictionary item with an empty string key.\n This means that each instance of the Keyword class that omits the 'name' parameter will be constantly \n overwritten. If you want to use Keyword().instances for special purposes, you must initialize 'name'.\n \"\"\"\n self.keyword.name = self.name\n\n @cached_property\n def filepath(self) -> str:\n if self._filepath:\n return self._filepath\n for dir_name in G.BASEDIR:\n filepath = os.path.join(dir_name, self.template_path, self.filename)\n if os.path.isfile(filepath):\n self._filepath = filepath\n return self._filepath\n return self.filename\n\n @cached_property\n def name(self) -> str:\n return Path(self.filename).stem\n\n @cached_property\n def image(self) -> ndarray:\n return self._imread()\n\n @cached_property\n def height(self) -> int:\n return self.image.shape[0]\n\n @cached_property\n def width(self) -> int:\n return self.image.shape[1]\n\n def _has_border(self) -> bool:\n \"\"\"\n If game running in a bordered process, coordinates need to be corrected.\n\n Returns:\n Whether the game running in a bordered process.\n \"\"\"\n actual_ratio = G.DEVICE.get_current_resolution()[0] / G.DEVICE.get_current_resolution()[1]\n template_ratio = self.resolution[0] / self.resolution[1]\n return actual_ratio != template_ratio\n\n def ratio(self, screen_height: float = None) -> float:\n \"\"\"\n Calculate the ratio of the current screen to the template image.\n \"\"\"\n if screen_height is None:\n if self._has_border():\n border = Config.BORDER[0] + Config.BORDER[2]\n else:\n border = 0\n screen_height = G.DEVICE.get_current_resolution()[1] - border\n\n return screen_height / self.resolution[1]\n\n @cached_property\n def area(self) -> tuple:\n \"\"\"\n Calculate the area of the template image on the current screen.\n\n Returns:\n Upper left and lower right corner coordinate.\n \"\"\"\n screen_resolution = G.DEVICE.get_current_resolution()\n\n if self._has_border():\n border = Config.BORDER\n else:\n border = (0, 0, 0)\n\n screen_width = screen_resolution[0] - border[1] * 2\n screen_height = screen_resolution[1] - border[0] - border[2]\n\n ratio = self.ratio(screen_height)\n x1 = screen_width / 2 + self.record_pos[0] * screen_width - self.width / 2 * ratio + border[1]\n y1 = screen_height / 2 + self.record_pos[1] * screen_width - self.height / 2 * ratio + border[0]\n x2 = screen_width / 2 + self.record_pos[0] * screen_width + self.width / 2 * ratio + border[1]\n y2 = screen_height / 2 + self.record_pos[1] * screen_width + self.height / 2 * ratio + border[0]\n return x1, y1, x2, y2"
},
{
"identifier": "logger",
"path": "zafkiel/logger.py",
"snippet": ""
},
{
"identifier": "API",
"path": "zafkiel/device/api.py",
"snippet": "class API:\n \"\"\"\n Device Setup APIs\n \"\"\"\n\n @staticmethod\n def init_device(platform=\"Android\", uuid=None, **kwargs):\n return init_device(platform, uuid, **kwargs)\n\n @staticmethod\n def connect_device(uri):\n return connect_device(uri)\n\n @staticmethod\n def device():\n return device()\n\n @staticmethod\n def set_current(idx):\n set_current(idx)\n\n @staticmethod\n def auto_setup(\n basedir: str = None,\n devices: list = None,\n firing_time: int = 30,\n logdir: bool = None,\n project_root: str = None,\n compress: int = None\n ):\n \"\"\"\n Auto setup running env and try to connect device if no device is connected.\n\n Args:\n basedir: basedir of script, __file__ is also acceptable.\n devices: connect_device uri in list.\n firing_time: Game starts taking time, this value should be set larger in old machine.\n logdir: log dir for script report, default is None for no log, set to ``True`` for ``<basedir>/log``.\n project_root: Project root dir for `using` api.\n compress: The compression rate of the screenshot image, integer in range [1, 99], default is 10\n\n Examples:\n auto_setup(__file__)\n auto_setup(__file__, devices=[\"Android://127.0.0.1:5037/SJE5T17B17\"],\n ... logdir=True, project_root=r\"D:\\\\test\\\\logs\", compress=90)\n \"\"\"\n if basedir:\n if os.path.isfile(basedir):\n basedir = os.path.dirname(basedir)\n if basedir not in G.BASEDIR:\n G.BASEDIR.append(basedir)\n if devices:\n startup_time = Timer(firing_time).start()\n for dev in devices:\n while not startup_time.reached():\n try:\n connect_device(dev)\n break\n except ElementNotFoundError:\n time.sleep(3)\n if startup_time.reached():\n raise NotRunningError(dev)\n if logdir:\n logdir = script_log_dir(basedir, logdir)\n set_logdir(logdir)\n if project_root:\n ST.PROJECT_ROOT = project_root\n if compress:\n ST.SNAPSHOT_QUALITY = compress\n\n \"\"\"\n Device Operations\n \"\"\"\n\n @staticmethod\n def app_is_running() -> bool:\n \"\"\"\n Platforms:\n Windows\n\n Returns:\n Whether app is running\n \"\"\"\n return G.DEVICE.app_is_running()\n\n @staticmethod\n def stop_app(package=None):\n \"\"\"\n Stop the target application on device\n\n Return:\n Has the Windows application stopped, on Android and iOS no return.\n\n Platforms:\n Android, iOS, Windows\n\n Example:\n stop_app(\"com.netease.cloudmusic\")\n stop_app() # only test on Windows\n \"\"\"\n return G.DEVICE.stop_app(package)\n\n @staticmethod\n @logwrap\n def touch(\n v: Template or tuple,\n times: int = 1,\n blind: bool = False,\n interval: float = 0.05,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr,\n **kwargs\n ) -> tuple:\n \"\"\"\n Perform the touch action on the device screen\n\n Args:\n v: Target to touch, either a ``ImageTemplate`` instance or absolute coordinates (x, y).\n times: How many touches to be performed\n blind: Whether to recognize Template, sometimes we only need to click without caring about the image.\n interval: Time interval between two touches.\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n **kwargs: Platform specific `kwargs`, please refer to corresponding docs.\n\n Returns:\n Final position to be clicked, e.g. (100, 100)\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n Click absolute coordinates:\n touch((100, 100))\n Click 2 times:\n touch((100, 100), times=2)\n Under Android and Windows platforms, you can set the click duration:\n touch((100, 100), duration=2)\n Right click(Windows):\n touch((100, 100), right_click=True)\n \"\"\"\n if isinstance(v, Template):\n if blind:\n center_pos = (v.area[2] + v.area[0]) / 2, (v.area[3] + v.area[1]) / 2\n else:\n center_pos = loop_find(v, timeout=ST.FIND_TIMEOUT, cls=cls, ocr_mode=ocr_mode)\n\n h = v.height * v.ratio()\n w = v.width * v.ratio() # actual height and width of target in screen\n pos = random_rectangle_point(center_pos, h, w)\n else:\n try_log_screen()\n pos = v\n for _ in range(times):\n G.DEVICE.touch(pos, **kwargs)\n time.sleep(interval)\n delay_after_operation()\n return pos\n\n @logwrap\n def find_click(\n self,\n rec_template: Template,\n touch_template: Template = None,\n times: int = 1,\n timeout: float = 1,\n blind: bool = False,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr\n ) -> bool:\n \"\"\"\n Find the template image and click it or another image area.\n\n Args:\n rec_template: \"Template\" instance to be found.\n touch_template: \"ImageTemplate\" instance to be clicked, defaults to None which means click rec_template.\n times: How many touches to be performed.\n timeout: Time interval to wait for the match.\n blind: Whether to recognize Template, same as parameter of touch().\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Returns:\n bool: Whether the target image appear and click it.\n \"\"\"\n try:\n pos = self.wait(rec_template, timeout=timeout, ocr_mode=ocr_mode, cls=cls)\n h = rec_template.height * rec_template.ratio()\n w = rec_template.width * rec_template.ratio() # actual height and width of target in screen\n pos = random_rectangle_point(pos, h, w)\n except TargetNotFoundError:\n return False\n\n if touch_template:\n self.touch(touch_template, times, blind, ocr_mode=ocr_mode, cls=cls)\n logger.info((f\"Click{pos} {times} times\" if times > 1 else f\"Click{pos}\") + f\" @{touch_template.name}\")\n else:\n self.touch(pos, times)\n logger.info((f\"Click{pos} {times} times\" if times > 1 else f\"Click{pos}\") + f\" @{rec_template.name}\")\n return True\n\n @staticmethod\n @logwrap\n def exists(v: Template, timeout: float = 0, ocr_mode: int = 0, cls: Type[Ocr] = Ocr) -> bool or tuple:\n \"\"\"\n Check whether given target exists on device screen\n\n Args:\n v: target to be checked\n timeout: time limit, default is 0 which means loop_find will only search once\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Returns:\n False if target is not found, otherwise returns the coordinates of the target\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n if exists(ImageTemplate(r\"tpl1606822430589.png\")):\n touch(ImageTemplate(r\"tpl1606822430589.png\"))\n\n Since ``exists()`` will return the coordinates,\n we can directly click on this return value to reduce one image search:\n\n pos = exists(ImageTemplate(r\"tpl1606822430589.png\"))\n if pos:\n touch(pos)\n \"\"\"\n try:\n pos = loop_find(v, timeout=timeout, ocr_mode=ocr_mode, cls=cls)\n except TargetNotFoundError:\n return False\n else:\n return pos\n\n @staticmethod\n @logwrap\n def wait(\n v: Template,\n timeout: float = None,\n interval: float = 0.5,\n interval_func: Callable = None,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr\n ) -> tuple:\n \"\"\"\n Wait to match the Template on the device screen\n\n Args:\n v: target object to wait for, Template instance\n timeout: time interval to wait for the match, default is None which is ``ST.FIND_TIMEOUT``\n interval: time interval in seconds to attempt to find a match\n interval_func: called after each unsuccessful attempt to find the corresponding match\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Raises:\n TargetNotFoundError: raised if target is not found after the time limit expired\n\n Returns:\n coordinates of the matched target\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n wait(Template(r\"tpl1606821804906.png\")) # timeout after ST.FIND_TIMEOUT\n # find Template every 3 seconds, timeout after 120 seconds\n wait(Template(r\"tpl1606821804906.png\"), timeout=120, interval=3)\n\n You can specify a callback function every time the search target fails::\n\n def notfound():\n print(\"No target found\")\n wait(Template(r\"tpl1607510661400.png\"), interval_func=notfound)\n \"\"\"\n if timeout is None:\n timeout = ST.FIND_TIMEOUT\n pos = loop_find(v, timeout=timeout, interval=interval, interval_func=interval_func, ocr_mode=ocr_mode, cls=cls)\n\n return pos\n\n @staticmethod\n def swipe(\n v1: Template or tuple,\n v2: Template or tuple = None,\n vector: tuple = None,\n blind1: bool = False,\n blind2: bool = False,\n **kwargs\n ) -> tuple:\n \"\"\"\n Perform the swipe action on the device screen.\n\n There are two ways of assigning the parameters\n * ``swipe(v1, v2=Template(...))`` # swipe from v1 to v2\n * ``swipe(v1, vector=(x, y))`` # swipe starts at v1 and moves along the vector.\n\n Args:\n v1: the start point of swipe, either a Template instance or absolute coordinates (x, y)\n v2: the end point of swipe, either a Template instance or absolute coordinates (x, y)\n vector: a vector coordinates of swipe action, either absolute coordinates (x, y) or percentage of\n screen e.g.(0.5, 0.5)\n blind1: Whether to recognize Template1, same as parameter of touch().\n blind2: Whether to recognize Template2, same as parameter of touch().\n **kwargs: platform specific `kwargs`, please refer to corresponding docs\n\n Raises:\n general exception when not enough parameters to perform swap action have been provided\n\n Returns:\n Origin position and target position\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n swipe(Template(r\"tpl1606814865574.png\"), vector=[-0.0316, -0.3311])\n swipe((100, 100), (200, 200))\n\n Custom swiping duration and number of steps(Android and iOS)::\n\n # swiping lasts for 1 second, divided into 6 steps\n swipe((100, 100), (200, 200), duration=1, steps=6)\n \"\"\"\n if isinstance(v1, Template):\n if blind1:\n pos1 = (v1.area[2] + v1.area[0]) / 2, (v1.area[3] + v1.area[1]) / 2\n else:\n pos1 = loop_find(v1, timeout=ST.FIND_TIMEOUT)\n else:\n try_log_screen()\n pos1 = v1\n\n if v2:\n if isinstance(v2, Template):\n if blind2:\n pos2 = (v2.area[2] + v2.area[0]) / 2, (v2.area[3] + v2.area[1]) / 2\n else:\n pos2 = loop_find(v2, timeout=ST.FIND_TIMEOUT_TMP)\n else:\n pos2 = v2\n elif vector:\n if vector[0] <= 1 and vector[1] <= 1:\n w, h = G.DEVICE.get_current_resolution()\n vector = (int(vector[0] * w), int(vector[1] * h))\n pos2 = (pos1[0] + vector[0], pos1[1] + vector[1])\n else:\n raise ScriptError(\"no enough params for swipe\")\n\n G.DEVICE.swipe(pos1, pos2, **kwargs)\n delay_after_operation()\n logger.info(f\"Swipe {pos1} -> {pos2}\")\n return pos1, pos2\n\n @staticmethod\n def screenshot():\n \"\"\"\n Returns:\n Screenshot image\n \"\"\"\n return G.DEVICE.snapshot(filename=None, quality=ST.SNAPSHOT_QUALITY)\n\n @staticmethod\n def snapshot(filename=None, msg=\"\", quality=None, max_size=None):\n \"\"\"\n Returns:\n {\"screen\": filename, \"resolution\": resolution of the screen} or None\n \"\"\"\n return snapshot(filename, msg, quality, max_size)\n\n @staticmethod\n def shell(cmd):\n return shell(cmd)\n\n @staticmethod\n def start_app(package, activity=None):\n start_app(package, activity)\n\n @staticmethod\n def clear_app(package):\n clear_app(package)\n\n @staticmethod\n def install(filepath, **kwargs):\n return install(filepath, **kwargs)\n\n @staticmethod\n def uninstall(package):\n return uninstall(package)\n\n @staticmethod\n def wake():\n wake()\n\n @staticmethod\n def home():\n home()\n\n @staticmethod\n def double_click(v):\n return double_click(v)\n\n @staticmethod\n def pinch(in_or_out='in', center=None, percent=0.5):\n pinch(in_or_out, center, percent)\n\n @staticmethod\n def key_event(keyname, **kwargs):\n keyevent(keyname, **kwargs)\n\n @staticmethod\n def text(txt, enter=True, **kwargs):\n text(txt, enter, **kwargs)\n\n @staticmethod\n def sleep(secs=1.0):\n sleep(secs)\n\n @staticmethod\n def find_all(v):\n return find_all(v)\n\n @staticmethod\n def get_clipboard(*args, **kwargs):\n return get_clipboard(*args, **kwargs)\n\n @staticmethod\n def set_clipboard(content, *args, **kwargs):\n set_clipboard(content, *args, **kwargs)"
},
{
"identifier": "Ocr",
"path": "zafkiel/ocr/ocr.py",
"snippet": "class Ocr:\n # Merge results with box distance <= thres\n merge_thres_x = 0\n merge_thres_y = 0\n\n def __init__(self, button: ImageTemplate, lang=None, name=None):\n \"\"\"\n Args:\n button:\n lang: If None, use in-game language\n name: If None, use button.name\n \"\"\"\n if lang is None:\n lang = Config.SERVER_LANG\n if name is None:\n name = button.name\n\n self.button: ImageTemplate = button\n self.lang: str = lang\n self.name: str = name\n\n @cached_property\n def model(self) -> TextSystem:\n return OCR_MODEL.get_by_lang(self.lang)\n\n @staticmethod\n def pre_process(image):\n \"\"\"\n To be overridden.\n \"\"\"\n return image\n\n @staticmethod\n def after_process(result):\n \"\"\"\n To be overridden.\n \"\"\"\n return result\n\n def format_result(self, result) -> str:\n \"\"\"\n To be overridden.\n \"\"\"\n return result\n\n def ocr_single_line(self, image):\n # pre process\n start_time = time.time()\n image = crop(image, self.button.area)\n image = self.pre_process(image)\n # ocr\n result, _ = self.model.ocr_single_line(image)\n # after proces\n result = self.after_process(result)\n result = self.format_result(result)\n\n cost_time = time.time() - start_time\n logger.debug(f'OCR <{self.name}> cost {cost_time:.2f}s: {result}')\n return result\n\n def filter_detected(self, result: BoxedResult) -> bool:\n \"\"\"\n Return False to drop result.\n To be overridden.\n \"\"\"\n return True\n\n def detect_and_ocr(self, image, direct_ocr=False) -> list[BoxedResult]:\n \"\"\"\n Args:\n image:\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n\n Returns:\n\n \"\"\"\n # pre process\n start_time = time.time()\n if not direct_ocr:\n image = crop(image, self.button.area)\n image = self.pre_process(image)\n # ocr\n results: list[BoxedResult] = self.model.detect_and_ocr(image)\n # after proces\n for result in results:\n if not direct_ocr:\n result.box += self.button.area[:2]\n result.box = tuple(corner2area(result.box))\n\n results = [result for result in results if self.filter_detected(result)]\n results = merge_buttons(results, thres_x=self.merge_thres_x, thres_y=self.merge_thres_y)\n for result in results:\n result.ocr_text = self.after_process(result.ocr_text)\n\n cost_time = time.time() - start_time\n logger.debug(f\"OCR <{self.name}> cost {cost_time:.2f}s: {', '.join([result.ocr_text for result in results])}\")\n return results\n\n @staticmethod\n def _match_result(\n result: str,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True,\n ignore_digit=True):\n \"\"\"\n Args:\n result (str):\n keyword_classes: A list of `Keyword` class or classes inherited `Keyword`\n\n Returns:\n If matched, return `Keyword` object or objects inherited `Keyword`\n If not match, return None\n \"\"\"\n if not isinstance(keyword_classes, list):\n keyword_classes = [keyword_classes]\n\n # Digits will be considered as the index of keyword\n if ignore_digit:\n if result.isdigit():\n return None\n\n # Try in current lang\n for keyword_class in keyword_classes:\n try:\n matched = keyword_class.find(\n result,\n lang=lang,\n ignore_punctuation=ignore_punctuation\n )\n return matched\n except ScriptError:\n continue\n\n return None\n\n def matched_single_line(\n self,\n image,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True\n ):\n \"\"\"\n Args:\n image: Image to detect\n keyword_classes: `Keyword` class or classes inherited `Keyword`, or a list of them.\n lang:\n ignore_punctuation:\n\n Returns:\n If matched, return `Keyword` object or objects inherited `Keyword`\n If not match, return None\n \"\"\"\n result = self.ocr_single_line(image)\n\n result = self._match_result(\n result,\n keyword_classes=keyword_classes,\n lang=lang,\n ignore_punctuation=ignore_punctuation,\n )\n\n logger.debug(f'<{self.name}> matched: {str(result)}')\n return result\n\n def _product_button(\n self,\n boxed_result: BoxedResult,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True,\n ignore_digit=True\n ) -> OcrResultButton:\n if not isinstance(keyword_classes, list):\n keyword_classes = [keyword_classes]\n\n matched_keyword = self._match_result(\n boxed_result.ocr_text,\n keyword_classes=keyword_classes,\n lang=lang,\n ignore_punctuation=ignore_punctuation,\n ignore_digit=ignore_digit,\n )\n button = OcrResultButton(boxed_result, matched_keyword)\n return button\n\n def matched_ocr(self, image, keyword_classes, direct_ocr=False) -> list[OcrResultButton]:\n \"\"\"\n Match all instances of 'keyword_classes' on the screen.\n\n Args:\n image: Screenshot\n keyword_classes: `Keyword` class or classes inherited `Keyword`, or a list of them.\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n\n Returns:\n List of matched OcrResultButton.\n OCR result which didn't matched known keywords will be dropped.\n \"\"\"\n results = self.detect_and_ocr(image, direct_ocr=direct_ocr)\n results = [self._product_button(result, keyword_classes) for result in results]\n results = [result for result in results if result.is_keyword_matched]\n\n if results:\n logger.debug(f\"<{self.name}> matched: {', '.join([str(result) for result in results])}\")\n # else:\n # logger.debug(f\"<{self.name}> matching failed\")\n return results\n\n def ocr_match_keyword(self, image, keyword_instance, direct_ocr=False, mode: int = OCR_EQUAL, threshold=0.75) \\\n -> list[OcrResultButton]:\n \"\"\"\n Match a specified keyword instance on the screen.\n\n Args:\n image: Screenshot\n keyword_instance: Instance of `Keyword` class or its subclass.\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n mode: Match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n threshold: Similarity threshold, default 0.75, only work when mode is OCR_SIMILAR.\n\n Returns:\n List of matched OcrResultButton or empty list.\n \"\"\"\n boxed_results = self.detect_and_ocr(image, direct_ocr=direct_ocr)\n final_results = []\n for boxed_result in boxed_results:\n for keyword in keyword_instance.keywords_to_find():\n if mode == OCR_EQUAL and boxed_result.ocr_text != keyword:\n continue\n elif mode == OCR_CONTAINS and keyword not in boxed_result.ocr_text:\n continue\n elif mode == OCR_SIMILAR:\n similarity = SequenceMatcher(None, boxed_result.ocr_text, keyword).ratio()\n if similarity < threshold:\n continue\n button = OcrResultButton(boxed_result, keyword_instance)\n final_results.append(button)\n\n if final_results:\n logger.debug(f\"<{self.name}> matched: {', '.join([str(result) for result in final_results])}\")\n # else:\n # logger.debug(f\"<{self.name}> matching failed\")\n return final_results"
},
{
"identifier": "Page",
"path": "zafkiel/ui/page.py",
"snippet": "class Page:\n \"\"\"\n Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/page.py\n \"\"\"\n\n # Key: str, page name like \"page_main\"\n # Value: Page, page instance\n all_pages = {}\n\n @classmethod\n def clear_connection(cls):\n for page in cls.all_pages.values():\n page.parent = None\n\n @classmethod\n def init_connection(cls, destination: Page):\n \"\"\"Initialize an A* path finding among pages.\n\n Args:\n destination:\n \"\"\"\n cls.clear_connection()\n\n visited = [destination]\n visited = set(visited)\n while True:\n new = visited.copy()\n for page in visited:\n for link in cls.iter_pages():\n if link in visited:\n continue\n if page in link.links:\n link.parent = page\n new.add(link)\n if len(new) == len(visited):\n break\n visited = new\n\n @classmethod\n def iter_pages(cls, start_page: Page = None):\n pages = list(cls.all_pages.values())\n if start_page is not None and start_page in pages:\n # Move start_page to the front of the list\n pages.remove(start_page)\n pages.insert(0, start_page)\n cls.all_pages = {page.name: page for page in pages}\n return cls.all_pages.values()\n\n @classmethod\n def iter_check_buttons(cls):\n for page in cls.all_pages.values():\n yield page.check_button\n\n def __init__(self, check_button: Template, switch: Switch = None):\n self.check_button = check_button\n self.switch = switch\n self.links = {}\n (filename, line_number, function_name, text) = traceback.extract_stack()[-2]\n self.name = text[:text.find('=')].strip()\n self.parent = None\n Page.all_pages[self.name] = self\n\n def __eq__(self, other):\n return self.name == other.name\n\n def __hash__(self):\n return hash(self.name)\n\n def __str__(self):\n return self.name\n\n __repr__ = __str__\n\n def link(self, button: Template, destination: Page):\n self.links[destination] = button"
},
{
"identifier": "run_once",
"path": "zafkiel/decorator.py",
"snippet": "def run_once(f):\n \"\"\"\n From https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/base/decorator.py\n Run a function only once, no matter how many times it has been called.\n\n Examples:\n @run_once\n def my_function(foo, bar):\n return foo + bar\n\n while 1:\n my_function()\n\n Examples:\n def my_function(foo, bar):\n return foo + bar\n\n action = run_once(my_function)\n while 1:\n action()\n \"\"\"\n\n def wrapper(*args, **kwargs):\n if not wrapper.has_run:\n wrapper.has_run = True\n return f(*args, **kwargs)\n\n wrapper.has_run = False\n return wrapper"
},
{
"identifier": "NotRunningError",
"path": "zafkiel/exception.py",
"snippet": "class NotRunningError(Exception):\n pass"
},
{
"identifier": "PageUnknownError",
"path": "zafkiel/exception.py",
"snippet": "class PageUnknownError(Exception):\n pass"
},
{
"identifier": "ScriptError",
"path": "zafkiel/exception.py",
"snippet": "class ScriptError(Exception):\n pass"
},
{
"identifier": "Timer",
"path": "zafkiel/timer.py",
"snippet": "class Timer:\n def __init__(self, limit, count=0):\n \"\"\"\n From https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/base/timer.py\n\n Args:\n limit (int, float): Timer limit\n count (int): Timer reach confirm count. Default to 0.\n When using a structure like this, must set a count.\n Otherwise, it goes wrong if screenshot time cost greater than limit.\n\n if self.appear(MAIN_CHECK):\n if confirm_timer.reached():\n pass\n else:\n confirm_timer.reset()\n\n Also, It's a good idea to set `count`, to make program run more stable on slow computers.\n Expected speed is 0.35 second / screenshot.\n \"\"\"\n self.limit = limit\n self.count = count\n self._current = 0\n self._reach_count = count\n\n def start(self):\n if not self.started():\n self._current = time.time()\n self._reach_count = 0\n\n return self\n\n def started(self):\n return bool(self._current)\n\n def current(self):\n \"\"\"\n Returns:\n float\n \"\"\"\n if self.started():\n return time.time() - self._current\n else:\n return 0.\n\n def set_current(self, current, count=0):\n self._current = time.time() - current\n self._reach_count = count\n\n def reached(self):\n \"\"\"\n Returns:\n bool\n \"\"\"\n self._reach_count += 1\n return time.time() - self._current > self.limit and self._reach_count > self.count\n\n def reset(self):\n self._current = time.time()\n self._reach_count = 0\n return self\n\n def clear(self):\n self._current = 0\n self._reach_count = self.count\n return self\n\n def reached_and_reset(self):\n \"\"\"\n Returns:\n bool:\n \"\"\"\n if self.reached():\n self.reset()\n return True\n else:\n return False\n\n def wait(self):\n \"\"\"\n Wait until timer reached.\n \"\"\"\n diff = self._current + self.limit - time.time()\n if diff > 0:\n time.sleep(diff)\n\n def show(self):\n logger.info(str(self))\n\n def __str__(self):\n return f'Timer(limit={round(self.current(), 3)}/{self.limit}, count={self._reach_count}/{self.count})'\n\n __repr__ = __str__"
},
{
"identifier": "Switch",
"path": "zafkiel/ui/switch.py",
"snippet": "class Switch:\n \"\"\"\n A wrapper to handle switches in game, switch among states with retries.\n Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py\n\n Examples:\n # Definitions\n submarine_hunt = Switch('Submarine_hunt', offset=120)\n submarine_hunt.add_state('on', check_button=Template(r\"assets/ON.png\"))\n submarine_hunt.add_state('off', check_button=Template(r\"assets/OFF.png\"))\n\n # Change state to ON\n submarine_view.set(TPL_ON)\n \"\"\"\n\n def __init__(self, name: str = 'Switch', is_selector: bool = False):\n \"\"\"\n Args:\n name:\n is_selector: True if this is a multi choice, click to choose one of the switches.\n For example: | [Daily] | Urgent | -> click -> | Daily | [Urgent] |\n False if this is a switch, click the switch itself, and it changed in the same position.\n For example: | [ON] | -> click -> | [OFF] |\n \"\"\"\n self.name = name\n self.is_choice = is_selector\n self.state_list = []\n\n def __str__(self):\n return self.name\n\n __repr__ = __str__\n\n def add_state(self, state: str, check_button: Template, click_button: Template = None):\n \"\"\"\n Args:\n state: Must match check_button.name\n check_button:\n click_button:\n \"\"\"\n self.state_list.append({\n 'state': state,\n 'check_button': check_button,\n 'click_button': click_button if click_button is not None else check_button,\n })\n\n def get_data(self, state: Template) -> dict:\n \"\"\"\n Args:\n state:\n\n Returns:\n Dictionary in add_state\n\n Raises:\n ScriptError: If state invalid\n \"\"\"\n for row in self.state_list:\n if row['state'] == state.name:\n return row\n\n raise ScriptError(f'Switch {self.name} received an invalid state {state}')"
}
] | from zafkiel.device.template import ImageTemplate as Template
from zafkiel.logger import logger
from zafkiel.device.api import API
from zafkiel.ocr.ocr import Ocr
from zafkiel.ui.page import Page
from zafkiel.decorator import run_once
from zafkiel.exception import NotRunningError, PageUnknownError, ScriptError
from zafkiel.timer import Timer
from zafkiel.ui.switch import Switch | 9,333 |
class UI(API):
"""
Processing interface related functions.
Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/ui.py
and https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py
"""
# Make ui_current mutable so that it can be shared among subclasses of the UI class.
ui_current: dict = {'page': None}
popup_list: list = []
def ui_switch_appear(self, switch: Switch) -> bool:
"""
Args:
switch:
"""
if self.ui_get_current_page().switch != switch:
return False
for data in switch.state_list:
if self.exists(data['check_button']):
return True
return False
def ui_get_current_state(self, switch: Switch) -> str:
"""
Args:
switch:
Returns:
state name or 'unknown'.
"""
if self.ui_current['page'].switch != switch:
logger.warning(f"{self.ui_current['page']} does not have {switch}")
return 'unknown'
for data in switch.state_list:
if self.exists(data['check_button']):
return data['state']
return 'unknown'
def ui_page_appear(self, page: Page, timeout: float = 0) -> bool or tuple:
"""
Args:
page:
timeout: Seconds to find.
Returns:
If found, return tuple of (x, y), else return False.
"""
return self.exists(page.check_button, timeout)
def ui_get_current_page(self):
"""
Returns:
Page:
Raises:
NotRunningError:
PageUnknownError:
"""
@run_once
def app_check():
if not self.app_is_running():
raise NotRunningError("Game not running")
|
class UI(API):
"""
Processing interface related functions.
Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/ui.py
and https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py
"""
# Make ui_current mutable so that it can be shared among subclasses of the UI class.
ui_current: dict = {'page': None}
popup_list: list = []
def ui_switch_appear(self, switch: Switch) -> bool:
"""
Args:
switch:
"""
if self.ui_get_current_page().switch != switch:
return False
for data in switch.state_list:
if self.exists(data['check_button']):
return True
return False
def ui_get_current_state(self, switch: Switch) -> str:
"""
Args:
switch:
Returns:
state name or 'unknown'.
"""
if self.ui_current['page'].switch != switch:
logger.warning(f"{self.ui_current['page']} does not have {switch}")
return 'unknown'
for data in switch.state_list:
if self.exists(data['check_button']):
return data['state']
return 'unknown'
def ui_page_appear(self, page: Page, timeout: float = 0) -> bool or tuple:
"""
Args:
page:
timeout: Seconds to find.
Returns:
If found, return tuple of (x, y), else return False.
"""
return self.exists(page.check_button, timeout)
def ui_get_current_page(self):
"""
Returns:
Page:
Raises:
NotRunningError:
PageUnknownError:
"""
@run_once
def app_check():
if not self.app_is_running():
raise NotRunningError("Game not running")
| timeout = Timer(10, count=20).start() | 9 | 2023-11-12 09:33:35+00:00 | 12k |
medkit-lib/medkit | medkit/io/srt.py | [
{
"identifier": "Attribute",
"path": "medkit/core/attribute.py",
"snippet": "class Attribute(dict_conv.SubclassMapping):\n \"\"\"\n Medkit attribute, to be added to an annotation\n\n Attributes\n ----------\n label:\n The attribute label\n value:\n The value of the attribute. Should be either simple built-in types (int,\n float, bool, str) or collections of these types (list, dict, tuple). If\n you need structured complex data you should create a subclass of\n `Attribute`.\n metadata:\n The metadata of the attribute\n uid:\n The identifier of the attribute\n \"\"\"\n\n label: str\n value: Optional[Any]\n metadata: Dict[str, Any]\n uid: str\n\n def __init__(\n self,\n label: str,\n value: Optional[Any] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n ):\n if metadata is None:\n metadata = {}\n if uid is None:\n uid = generate_id()\n\n self.uid = uid\n self.label = label\n self.value = value\n self.metadata = metadata\n\n def __init_subclass__(cls):\n Attribute.register_subclass(cls)\n super().__init_subclass__()\n\n def to_dict(self) -> Dict[str, Any]:\n attribute_dict = dict(\n uid=self.uid,\n label=self.label,\n value=self.value,\n metadata=self.metadata,\n )\n dict_conv.add_class_name_to_data_dict(self, attribute_dict)\n return attribute_dict\n\n def to_brat(self) -> Optional[Any]:\n \"\"\"\n Return a value compatible with the brat format\n \"\"\"\n\n return self.value\n\n def to_spacy(self) -> Optional[Any]:\n \"\"\"\n Return a value compatible with spaCy\n \"\"\"\n\n return self.value\n\n def copy(self) -> Attribute:\n \"\"\"\n Create a new attribute that is a copy of the current instance, but\n with a new identifier\n\n This is used when we want to duplicate an existing attribute onto a\n different annotation.\n \"\"\"\n return dataclasses.replace(self, uid=generate_id())\n\n @classmethod\n def from_dict(cls, attribute_dict: Dict[str, Any]) -> Self:\n \"\"\"\n Creates an Attribute from a dict\n\n Parameters\n ----------\n attribute_dict: dict\n A dictionary from a serialized Attribute as generated by to_dict()\n \"\"\"\n\n subclass = cls.get_subclass_for_data_dict(attribute_dict)\n if subclass is not None:\n return subclass.from_dict(attribute_dict)\n\n return cls(\n uid=attribute_dict[\"uid\"],\n label=attribute_dict[\"label\"],\n value=attribute_dict[\"value\"],\n metadata=attribute_dict[\"metadata\"],\n )"
},
{
"identifier": "InputConverter",
"path": "medkit/core/conversion.py",
"snippet": "class InputConverter:\n \"\"\"Abstract class for converting external document to medkit documents\"\"\"\n\n @abc.abstractmethod\n def load(self, **kwargs) -> List[Document]:\n raise NotImplementedError"
},
{
"identifier": "OutputConverter",
"path": "medkit/core/conversion.py",
"snippet": "class OutputConverter:\n \"\"\"Abstract class for converting medkit document to external format\"\"\"\n\n @abc.abstractmethod\n def save(self, docs: List[Document], **kwargs) -> Optional[List]:\n raise NotImplementedError"
},
{
"identifier": "generate_id",
"path": "medkit/core/id.py",
"snippet": "def generate_id() -> str:\n return str(uuid.uuid1())"
},
{
"identifier": "OperationDescription",
"path": "medkit/core/operation_desc.py",
"snippet": "class OperationDescription:\n \"\"\"Description of a specific instance of an operation\n\n Parameters\n ----------\n uid:\n The unique identifier of the instance described\n name:\n The name of the operation. Can be the same as `class_name` or something\n more specific, for operations with a behavior that can be customized\n (for instance a rule-based entity matcher with user-provided rules, or a\n model-based entity matcher with a user-provided model)\n class_name:\n The name of the class of the operation\n config:\n The specific configuration of the instance\n \"\"\"\n\n uid: str\n name: str\n class_name: Optional[str] = None\n config: Dict[str, Any] = dataclasses.field(default_factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n return dict(uid=self.uid, name=self.name, class_name=self.class_name, config=self.config)"
},
{
"identifier": "ProvTracer",
"path": "medkit/core/prov_tracer.py",
"snippet": "class ProvTracer:\n \"\"\"Provenance tracing component.\n\n `ProvTracer` is intended to gather provenance information about how all data\n generated by medkit. For each data item (for instance an annotation or an\n attribute), `ProvTracer` can tell the operation that created it, the data\n items that were used to create it, and reciprocally, the data items that were\n derived from it (cf. :class:`~Prov`).\n\n Provenance-compatible operations should inform the provenance tracer of each\n data item that through the :meth:`~.add_prov` method.\n\n Users wanting to gather provenance information should instantiate one unique\n `ProvTracer` object and provide it to all operations involved in their data\n processing flow. Once all operations have been executed, they may then\n retrieve provenance info for specific data items through\n :meth:`~.get_prov`, or for all items with :meth:`~.get_provs`.\n\n Composite operations relying on inner operations (such as pipelines)\n shouldn't call :meth:`~.add_prov` method. Instead, they should instantiate\n their own internal `ProvTracer` and provide it to the operations they rely\n on, then use :meth:`~.add_prov_from_sub_tracer` to integrate\n information from this internal sub-provenance tracer into the main\n provenance tracer that was provided to them.\n\n This will build sub-provenance information, that can be retrieved later\n through :meth:`~.get_sub_prov_tracer` or :meth:`~.get_sub_prov_tracers`. The\n inner operations of a composite operation can themselves be composite\n operations, leading to a tree-like structure of nested provenance tracers.\n \"\"\"\n\n def __init__(self, store: Optional[ProvStore] = None, _graph: Optional[ProvGraph] = None):\n \"\"\"\n Parameters\n ----------\n store:\n Store that will contain all traced data items.\n \"\"\"\n if store is None:\n store = create_prov_store()\n if _graph is None:\n _graph = ProvGraph()\n\n self.store: ProvStore = store\n self._graph: ProvGraph = _graph\n\n def add_prov(\n self,\n data_item: IdentifiableDataItem,\n op_desc: OperationDescription,\n source_data_items: List[IdentifiableDataItem],\n ):\n \"\"\"\n Append provenance information about a specific data item.\n\n Parameters\n ----------\n data_item:\n Data item that was created.\n op_desc:\n Description of the operation that created the data item.\n source_data_items:\n Data items that were used by the operation to create the data item.\n \"\"\"\n assert not self._graph.has_node(\n data_item.uid\n ), f\"Provenance of data item with identifier {data_item.uid} was already added\"\n\n self.store.store_data_item(data_item)\n self.store.store_op_desc(op_desc)\n # add source data items to store\n for source_data_item in source_data_items:\n self.store.store_data_item(source_data_item)\n\n # add node to graph\n source_ids = [s.uid for s in source_data_items]\n self._graph.add_node(data_item.uid, op_desc.uid, source_ids)\n\n def add_prov_from_sub_tracer(\n self,\n data_items: List[IdentifiableDataItem],\n op_desc: OperationDescription,\n sub_tracer: ProvTracer,\n ):\n \"\"\"Append provenance information about data items created by a composite\n operation relying on inner operations (such as a pipeline) having its\n own internal sub-provenance tracer.\n\n Parameters\n ----------\n data_items:\n Data items created by the composite operation. Should not include\n internal intermediate data items, only the output of the operation.\n op_desc:\n Description of the composite operation that created the data items.\n sub_tracer:\n Internal sub-provenance tracer of the composite operation.\n \"\"\"\n assert self.store is sub_tracer.store\n self.store.store_op_desc(op_desc)\n\n sub_graph = sub_tracer._graph\n self._graph.add_sub_graph(op_desc.uid, sub_graph)\n\n for data_item in data_items:\n # ignore data items already known\n # (can happen with attributes being copied from one annotation to another)\n if self._graph.has_node(data_item.uid):\n # check operation_id is consistent\n node = self._graph.get_node(data_item.uid)\n if node.operation_id != op_desc.uid:\n raise RuntimeError(\n \"Trying to add provenance for sub graph for data item with uid\"\n f\" {data_item.uid} that already has a node, but with different\"\n \" operation_id\"\n )\n continue\n self._add_prov_from_sub_tracer_for_data_item(data_item.uid, op_desc.uid, sub_graph)\n\n def _add_prov_from_sub_tracer_for_data_item(\n self,\n data_item_id: str,\n operation_id: str,\n sub_graph: ProvGraph,\n ):\n assert not self._graph.has_node(data_item_id)\n assert sub_graph.has_node(data_item_id)\n\n # find source ids\n source_ids = []\n seen = set()\n queue = collections.deque([data_item_id])\n while queue:\n sub_graph_node_id = queue.popleft()\n seen.add(sub_graph_node_id)\n\n sub_graph_node = sub_graph.get_node(sub_graph_node_id)\n if sub_graph_node.operation_id is None:\n source_ids.append(sub_graph_node_id)\n queue.extend(uid for uid in sub_graph_node.source_ids if uid not in seen)\n\n # add new node on main graph representing\n # the data item generation by the composed operation\n self._graph.add_node(data_item_id, operation_id, source_ids)\n\n def has_prov(self, data_item_id: str) -> bool:\n \"\"\"Check if the provenance tracer has provenance information about a\n specific data item.\n\n .. note::\n This will return `False` if we have provenance info about a data\n item but only in a sub-provenance tracer.\n\n Parameters\n ----------\n data_item_id:\n Id of the data item.\n\n Returns\n -------\n bool\n `True` if there is provenance info that can be retrieved with\n :meth:`~get_prov()`.\n \"\"\"\n return self._graph.has_node(data_item_id)\n\n def get_prov(self, data_item_id: str) -> Prov:\n \"\"\"Return provenance information about a specific data item.\n\n Parameters\n ----------\n data_item_id:\n Id of the data item.\n\n Returns\n -------\n Prov\n Provenance info about the data item.\n \"\"\"\n if not self._graph.has_node(data_item_id):\n raise ValueError(\n f\"No provenance info available for data item with id {data_item_id}.\"\n \" Make sure the id is valid and provenance tracking was enabled for\"\n \" the operation that generated it.\"\n )\n\n node = self._graph.get_node(data_item_id)\n return self._build_prov_from_node(node)\n\n def get_provs(self) -> List[Prov]:\n \"\"\"Return all provenance information about all data items known to the tracer.\n\n .. note::\n Nested provenance info from sub-provenance tracers will not be returned.\n\n Returns\n -------\n List[Prov]\n Provenance info about all known data items.\n \"\"\"\n return [self._build_prov_from_node(node) for node in self._graph.get_nodes()]\n\n def has_sub_prov_tracer(self, operation_id: str) -> bool:\n \"\"\"Check if the provenance tracer has a sub-provenance tracer for a\n specific composite operation (such as a pipeline).\n\n .. note::\n This will return `False` if there is a sub-provenance tracer for\n the operation but that is not a direct child (i.e. that is deeper\n in the hierarchy).\n\n Parameters\n -----------\n operation_id:\n Id of the composite operation.\n\n Returns\n -------\n bool\n `True` if there is a sub-provenance tracer for the operation.\n \"\"\"\n return self._graph.has_sub_graph(operation_id)\n\n def get_sub_prov_tracer(self, operation_id: str) -> ProvTracer:\n \"\"\"Return a sub-provenance tracer containing sub-provenance information from a\n specific composite operation.\n\n Parameters\n ----------\n operation_id:\n Id of the composite operation.\n\n Returns\n -------\n ProvTracer\n The sub-provenance tracer containing sub-provenance information from the\n operation.\n \"\"\"\n sub_graph = self._graph.get_sub_graph(operation_id)\n return ProvTracer(store=self.store, _graph=sub_graph)\n\n def get_sub_prov_tracers(self) -> List[ProvTracer]:\n \"\"\"\n Return all sub-provenance tracers of the provenance tracer.\n\n .. note::\n This will not return sub-provenance tracers that are not direct\n children of this tracer (i.e. that are deeper in the hierarchy).\n\n Returns\n -------\n List[ProvTracer]\n All sub-provenance tracers of this provenance tracer.\n \"\"\"\n return [ProvTracer(store=self.store, _graph=sub_graph) for sub_graph in self._graph.get_sub_graphs()]\n\n def _build_prov_from_node(self, node: ProvNode):\n data_item = self.store.get_data_item(node.data_item_id)\n op_desc = self.store.get_op_desc(node.operation_id) if node.operation_id is not None else None\n source_data_items = [self.store.get_data_item(uid) for uid in node.source_ids]\n derived_data_items = [self.store.get_data_item(uid) for uid in node.derived_ids]\n return Prov(data_item, op_desc, source_data_items, derived_data_items)"
},
{
"identifier": "Segment",
"path": "medkit/core/audio/annotation.py",
"snippet": "class Segment(dict_conv.SubclassMapping):\n \"\"\"Audio segment referencing part of an :class:`~.core.audio.AudioDocument`.\n\n Attributes\n ----------\n uid:\n Unique identifier of the segment.\n label:\n Label of the segment.\n audio:\n The audio signal of the segment. It must be consistent with the span,\n in the sense that it must correspond to the audio signal of the document\n at the span boundaries. But it can be a modified, processed version of this\n audio signal.\n span:\n Span (in seconds) indicating the part of the document's full signal that\n this segment references.\n attrs:\n Attributes of the segment. Stored in a\n :class:{~medkit.core.AttributeContainer} but can be passed as a list at\n init.\n metadata:\n Metadata of the segment.\n keys:\n Pipeline output keys to which the annotation belongs to.\n \"\"\"\n\n uid: str\n label: str\n audio: AudioBuffer\n span: Span\n attrs: AttributeContainer\n metadata: Dict[str, Any]\n keys: Set[str]\n\n def __init__(\n self,\n label: str,\n audio: AudioBuffer,\n span: Span,\n attrs: Optional[List[Attribute]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n ):\n if attrs is None:\n attrs = []\n if metadata is None:\n metadata = {}\n if uid is None:\n uid = generate_id()\n\n self.label = label\n self.audio = audio\n self.span = span\n self.metadata = metadata\n self.keys = set()\n self.uid = uid\n\n self.attrs = AttributeContainer(owner_id=self.uid)\n for attr in attrs:\n self.attrs.add(attr)\n\n def __init_subclass__(cls):\n Segment.register_subclass(cls)\n super().__init_subclass__()\n\n def to_dict(self) -> Dict[str, Any]:\n # convert MemoryAudioBuffer to PlaceholderAudioBuffer\n # because we can't serialize the actual signal\n if isinstance(self.audio, MemoryAudioBuffer):\n placeholder = PlaceholderAudioBuffer.from_audio_buffer(self.audio)\n audio = placeholder.to_dict()\n else:\n audio = self.audio.to_dict()\n\n span = self.span.to_dict()\n attrs = [a.to_dict() for a in self.attrs]\n segment_dict = dict(\n uid=self.uid,\n label=self.label,\n audio=audio,\n span=span,\n attrs=attrs,\n metadata=self.metadata,\n )\n dict_conv.add_class_name_to_data_dict(self, segment_dict)\n return segment_dict\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> Segment:\n subclass = cls.get_subclass_for_data_dict(data)\n if subclass is not None:\n return subclass.from_dict(data)\n\n audio = AudioBuffer.from_dict(data[\"audio\"])\n span = Span.from_dict(data[\"span\"])\n attrs = [Attribute.from_dict(a) for a in data[\"attrs\"]]\n return cls(\n label=data[\"label\"],\n audio=audio,\n span=span,\n attrs=attrs,\n uid=data[\"uid\"],\n metadata=data[\"metadata\"],\n )"
},
{
"identifier": "FileAudioBuffer",
"path": "medkit/core/audio/audio_buffer.py",
"snippet": "class FileAudioBuffer(AudioBuffer):\n \"\"\"\n Audio buffer giving access to audio files stored on the filesystem (to use\n when manipulating unmodified raw audio).\n\n Supports all file formats handled by `libsndfile`\n (http://www.mega-nerd.com/libsndfile/#Features)\n \"\"\"\n\n def __init__(\n self,\n path: Union[str, Path],\n trim_start: Optional[int] = None,\n trim_end: Optional[int] = None,\n sf_info: Optional[Any] = None,\n ):\n \"\"\"\n Parameters\n ----------\n path:\n Path to the audio file.\n trim_start:\n First sample of audio file to consider.\n trim_end:\n First sample of audio file to exclude.\n sf_info:\n Optional metadata dict returned by soundfile.\n \"\"\"\n path = Path(path)\n if sf_info is None:\n sf_info = sf.info(path)\n\n assert trim_start is None or 0 <= trim_start <= sf_info.frames\n assert trim_end is None or 0 <= trim_end <= sf_info.frames\n\n if trim_start is None:\n trim_start = 0\n if trim_end is None:\n trim_end = sf_info.frames\n\n sample_rate = sf_info.samplerate\n nb_samples = trim_end - trim_start\n nb_channels = sf_info.channels\n\n super().__init__(sample_rate=sample_rate, nb_samples=nb_samples, nb_channels=nb_channels)\n\n self.path = path\n self._trim_end = trim_end\n self._trim_start = trim_start\n self._sf_info = sf_info\n\n def read(self, copy: bool = False) -> np.ndarray:\n signal, _ = sf.read(\n self.path,\n start=self._trim_start,\n stop=self._trim_end,\n always_2d=True,\n dtype=np.float32,\n )\n return signal.T\n\n def trim(self, start: Optional[int] = None, end: Optional[int] = None) -> AudioBuffer:\n assert start is None or 0 <= start <= self.nb_samples\n assert end is None or 0 <= end <= self.nb_samples\n\n if start is not None:\n new_trim_start = self._trim_start + start\n else:\n new_trim_start = self._trim_start\n if end is not None:\n new_trim_end = self._trim_start + end\n else:\n new_trim_end = self._trim_end\n assert new_trim_start <= new_trim_end\n return FileAudioBuffer(self.path, new_trim_start, new_trim_end, self._sf_info)\n\n def to_dict(self) -> Dict[str, Any]:\n buffer_dict = dict(\n path=str(self.path),\n trim_start=self._trim_start,\n trim_end=self._trim_end,\n )\n dict_conv.add_class_name_to_data_dict(self, buffer_dict)\n return buffer_dict\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> Self:\n return cls(path=data[\"path\"], trim_start=data[\"trim_start\"], trim_end=data[\"trim_end\"])\n\n def __eq__(self, other: object) -> bool:\n if type(other) is not self.__class__:\n return False\n return self.path == other.path and self._trim_end == other._trim_end and self._trim_start == other._trim_start"
},
{
"identifier": "AudioDocument",
"path": "medkit/core/audio/document.py",
"snippet": "class AudioDocument(dict_conv.SubclassMapping):\n \"\"\"\n Document holding audio annotations.\n\n Attributes\n ----------\n uid:\n Unique identifier of the document.\n audio:\n Audio buffer containing the entire signal of the document.\n anns: :class:`~.audio.AudioAnnotationContainer`\n Annotations of the document. Stored in an\n :class:`~.audio.AudioAnnotationContainer` but can be passed as a list at init.\n attrs: :class:`~.core.AttributeContainer`\n Attributes of the document. Stored in an\n :class:`~.core.AttributeContainer` but can be passed as a list at init\n metadata:\n Document metadata.\n raw_segment: :class:`~.audio.Segment`\n Auto-generated segment containing the full unprocessed document audio.\n \"\"\"\n\n RAW_LABEL: ClassVar[str] = \"RAW_AUDIO\"\n \"\"\"Label to be used for raw segment\"\"\"\n\n uid: str\n anns: AudioAnnotationContainer\n attrs: AttributeContainer\n metadata: Dict[str, Any]\n raw_segment: Segment\n\n def __init__(\n self,\n audio: AudioBuffer,\n anns: Optional[Sequence[Segment]] = None,\n attrs: Optional[Sequence[Attribute]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n ):\n if anns is None:\n anns = []\n if attrs is None:\n attrs = []\n if metadata is None:\n metadata = {}\n if uid is None:\n uid = generate_id()\n\n self.uid = uid\n self.metadata = metadata\n\n # auto-generated raw segment to hold the audio buffer\n self.raw_segment = self._generate_raw_segment(audio, uid)\n\n self.anns = AudioAnnotationContainer(doc_id=self.uid, raw_segment=self.raw_segment)\n for ann in anns:\n self.anns.add(ann)\n\n self.attrs = AttributeContainer(owner_id=self.uid)\n for attr in attrs:\n self.attrs.add(attr)\n\n @classmethod\n def _generate_raw_segment(cls, audio: AudioBuffer, doc_id: str) -> Segment:\n uid = str(generate_deterministic_id(reference_id=doc_id))\n\n return Segment(\n label=cls.RAW_LABEL,\n span=Span(0.0, audio.duration),\n audio=audio,\n uid=uid,\n )\n\n @property\n def audio(self) -> AudioBuffer:\n return self.raw_segment.audio\n\n def __init_subclass__(cls):\n AudioDocument.register_subclass(cls)\n super().__init_subclass__()\n\n def to_dict(self, with_anns: bool = True) -> Dict[str, Any]:\n # convert MemoryAudioBuffer to PlaceholderAudioBuffer\n # because we can't serialize the actual signal\n if isinstance(self.audio, MemoryAudioBuffer):\n placeholder = PlaceholderAudioBuffer.from_audio_buffer(self.audio)\n audio = placeholder.to_dict()\n else:\n audio = self.audio.to_dict()\n doc_dict: Dict[str, Any] = dict(\n uid=self.uid,\n audio=audio,\n metadata=self.metadata,\n )\n if with_anns:\n doc_dict[\"anns\"] = [a.to_dict() for a in self.anns]\n if self.attrs:\n doc_dict[\"attrs\"] = [a.to_dict() for a in self.attrs]\n\n dict_conv.add_class_name_to_data_dict(self, doc_dict)\n return doc_dict\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> Self:\n subclass = cls.get_subclass_for_data_dict(data)\n if subclass is not None:\n return subclass.from_dict(data)\n\n audio = AudioBuffer.from_dict(data[\"audio\"])\n anns = [Segment.from_dict(a) for a in data.get(\"anns\", [])]\n attrs = [Attribute.from_dict(a) for a in data.get(\"attrs\", [])]\n return cls(\n uid=data[\"uid\"],\n audio=audio,\n anns=anns,\n attrs=attrs,\n metadata=data[\"metadata\"],\n )\n\n @classmethod\n def from_file(cls, path: os.PathLike) -> Self:\n \"\"\"\n Create document from an audio file\n\n Parameters\n ----------\n path:\n Path to the audio file. Supports all file formats handled by\n `libsndfile` (http://www.mega-nerd.com/libsndfile/#Features)\n\n Returns\n -------\n AudioDocument\n Audio document with signal of `path` as audio. The file path is\n included in the document metadata.\n \"\"\"\n\n path = Path(path)\n audio = FileAudioBuffer(path)\n return cls(audio=audio, metadata={\"path_to_audio\": str(path.absolute())})\n\n @classmethod\n def from_dir(\n cls,\n path: os.PathLike,\n pattern: str = \"*.wav\",\n ) -> List[Self]:\n \"\"\"\n Create documents from audio files in a directory\n\n Parameters\n ----------\n path:\n Path of the directory containing audio files\n pattern:\n Glob pattern to match audio files in `path`. Supports all file\n formats handled by `libsndfile`\n (http://www.mega-nerd.com/libsndfile/#Features)\n\n Returns\n -------\n List[AudioDocument]\n Audio documents with signal of each file as audio\n \"\"\"\n\n path = Path(path)\n files = sorted(path.glob(pattern))\n return [cls.from_file(f) for f in files]"
},
{
"identifier": "Span",
"path": "medkit/core/audio/span.py",
"snippet": "class Span(NamedTuple):\n \"\"\"\n Boundaries of a slice of audio.\n\n Attributes\n ----------\n start:\n Starting point in the original audio, in seconds.\n end:\n Ending point in the original audio, in seconds.\n \"\"\"\n\n start: float\n end: float\n\n @property\n def length(self):\n \"\"\"Length of the span, in seconds\"\"\"\n return self.end - self.start\n\n def to_dict(self) -> Dict[str, Any]:\n span_dict = dict(start=self.start, end=self.end)\n dict_conv.add_class_name_to_data_dict(self, span_dict)\n return span_dict\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> Span:\n return cls(start=data[\"start\"], end=data[\"end\"])"
}
] | import logging
import pysrt
from pathlib import Path
from typing import List, Optional, Union
from medkit.core import (
Attribute,
InputConverter,
OperationDescription,
OutputConverter,
ProvTracer,
generate_id,
)
from medkit.core.audio import AudioDocument, FileAudioBuffer, Segment, Span | 8,081 | prov_tracer:
The provenance tracer used to trace the provenance.
"""
self._prov_tracer = prov_tracer
def load(
self,
srt_dir: Union[str, Path],
audio_dir: Optional[Union[str, Path]] = None,
audio_ext: str = ".wav",
) -> List[AudioDocument]:
"""
Load all .srt files in a directory into a list of
:class:`~medkit.core.audio.document.AudioDocument` objects.
For each .srt file, they must be a corresponding audio file with the
same basename, either in the same directory or in an separated audio
directory.
Parameters
----------
srt_dir:
Directory containing the .srt files.
audio_dir:
Directory containing the audio files corresponding to the .srt files,
if they are not in `srt_dir`.
audio_ext:
File extension to use for audio files.
Returns
-------
List[AudioDocument]
List of generated documents.
"""
srt_dir = Path(srt_dir)
audio_dir = Path(audio_dir) if audio_dir else None
docs = []
for srt_file in sorted(srt_dir.glob("*.srt")):
# corresponding audio file must have same base name with audio extension,
# either in the same directory or in audio_dir if provided
if audio_dir:
audio_file = (audio_dir / srt_file.stem).with_suffix(audio_ext)
else:
audio_file = srt_file.with_suffix(audio_ext)
doc = self.load_doc(srt_file, audio_file)
docs.append(doc)
if len(docs) == 0:
logger.warning(f"No .srt found in '{srt_dir}'")
return docs
def load_doc(self, srt_file: Union[str, Path], audio_file: Union[str, Path]) -> AudioDocument:
"""Load a single .srt file into an
:class:`~medkit.core.audio.document.AudioDocument` containing
turn segments with transcription attributes.
Parameters
----------
srt_file:
Path to the .srt file.
audio_file:
Path to the corresponding audio file.
Returns
-------
AudioDocument:
Generated document.
"""
audio_file = Path(audio_file)
srt_items = pysrt.open(str(srt_file))
full_audio = FileAudioBuffer(path=audio_file)
segments = [self._build_segment(srt_item, full_audio) for srt_item in srt_items]
doc = AudioDocument(audio=full_audio)
for segment in segments:
doc.anns.add(segment)
return doc
def load_segments(self, srt_file: Union[str, Path], audio_file: Union[str, Path]) -> List[Segment]:
"""Load a .srt file and return a list of
:class:`~medkit.core.audio.annotation.Segment` objects corresponding to
turns, with transcription attributes.
Parameters
----------
srt_file:
Path to the .srt file.
audio_file:
Path to the corresponding audio file.
Returns
-------
List[:class:`~medkit.core.audio.annotation.Segment`]:
Turn segments as found in the .srt file, with transcription
attributes attached.
"""
audio_file = Path(audio_file)
srt_items = pysrt.open(str(srt_file))
full_audio = FileAudioBuffer(path=audio_file)
segments = [self._build_segment(srt_item, full_audio) for srt_item in srt_items]
return segments
def _build_segment(self, srt_item: pysrt.SubRipItem, full_audio: FileAudioBuffer) -> Segment:
# milliseconds to seconds
start = srt_item.start.ordinal / 1000
end = srt_item.end.ordinal / 1000
audio = full_audio.trim_duration(start, end)
segment = Segment(label=self.turn_segment_label, span=Span(start, end), audio=audio)
| """
This module needs extra-dependencies not installed as core dependencies of medkit.
To install them, use `pip install medkit-lib[srt-io-convert]`.
"""
__all__ = ["SRTInputConverter", "SRTOutputConverter"]
logger = logging.getLogger(__name__)
class SRTInputConverter(InputConverter):
"""
Convert .srt files containing transcription information into turn segments
with transcription attributes.
For each turn in a .srt file, a
:class:`~medkit.core.audio.annotation.Segment` will be created, with an
associated :class:`~medkit.core.Attribute` holding the transcribed text as
value. The segments can be retrieved directly or as part of an
:class:`~medkit.core.audio.document.AudioDocument` instance.
If a :class:`~medkit.core.ProvTracer` is set, provenance information will be
added for each segment and each attribute (referencing the input converter
as the operation).
"""
def __init__(
self,
turn_segment_label: str = "turn",
transcription_attr_label: str = "transcribed_text",
converter_id: Optional[str] = None,
):
"""
Parameters
----------
turn_segment_label:
Label to use for segments representing turns in the .srt file.
transcription_attr_label:
Label to use for segments attributes containing the transcribed text.
converter_id:
Identifier of the converter.
"""
if converter_id is None:
converter_id = generate_id()
self.uid = converter_id
self.turn_segment_label = turn_segment_label
self.transcription_attr_label = transcription_attr_label
self._prov_tracer: Optional[ProvTracer] = None
@property
def description(self) -> OperationDescription:
"""Contains all the input converter init parameters."""
return OperationDescription(
uid=self.uid,
name=self.__class__.__name__,
class_name=self.__class__.__name__,
config={
"turn_segment_label": self.turn_segment_label,
"transcription_attr_label": self.transcription_attr_label,
},
)
def set_prov_tracer(self, prov_tracer: ProvTracer):
"""Enable provenance tracing.
Parameters
----------
prov_tracer:
The provenance tracer used to trace the provenance.
"""
self._prov_tracer = prov_tracer
def load(
self,
srt_dir: Union[str, Path],
audio_dir: Optional[Union[str, Path]] = None,
audio_ext: str = ".wav",
) -> List[AudioDocument]:
"""
Load all .srt files in a directory into a list of
:class:`~medkit.core.audio.document.AudioDocument` objects.
For each .srt file, they must be a corresponding audio file with the
same basename, either in the same directory or in an separated audio
directory.
Parameters
----------
srt_dir:
Directory containing the .srt files.
audio_dir:
Directory containing the audio files corresponding to the .srt files,
if they are not in `srt_dir`.
audio_ext:
File extension to use for audio files.
Returns
-------
List[AudioDocument]
List of generated documents.
"""
srt_dir = Path(srt_dir)
audio_dir = Path(audio_dir) if audio_dir else None
docs = []
for srt_file in sorted(srt_dir.glob("*.srt")):
# corresponding audio file must have same base name with audio extension,
# either in the same directory or in audio_dir if provided
if audio_dir:
audio_file = (audio_dir / srt_file.stem).with_suffix(audio_ext)
else:
audio_file = srt_file.with_suffix(audio_ext)
doc = self.load_doc(srt_file, audio_file)
docs.append(doc)
if len(docs) == 0:
logger.warning(f"No .srt found in '{srt_dir}'")
return docs
def load_doc(self, srt_file: Union[str, Path], audio_file: Union[str, Path]) -> AudioDocument:
"""Load a single .srt file into an
:class:`~medkit.core.audio.document.AudioDocument` containing
turn segments with transcription attributes.
Parameters
----------
srt_file:
Path to the .srt file.
audio_file:
Path to the corresponding audio file.
Returns
-------
AudioDocument:
Generated document.
"""
audio_file = Path(audio_file)
srt_items = pysrt.open(str(srt_file))
full_audio = FileAudioBuffer(path=audio_file)
segments = [self._build_segment(srt_item, full_audio) for srt_item in srt_items]
doc = AudioDocument(audio=full_audio)
for segment in segments:
doc.anns.add(segment)
return doc
def load_segments(self, srt_file: Union[str, Path], audio_file: Union[str, Path]) -> List[Segment]:
"""Load a .srt file and return a list of
:class:`~medkit.core.audio.annotation.Segment` objects corresponding to
turns, with transcription attributes.
Parameters
----------
srt_file:
Path to the .srt file.
audio_file:
Path to the corresponding audio file.
Returns
-------
List[:class:`~medkit.core.audio.annotation.Segment`]:
Turn segments as found in the .srt file, with transcription
attributes attached.
"""
audio_file = Path(audio_file)
srt_items = pysrt.open(str(srt_file))
full_audio = FileAudioBuffer(path=audio_file)
segments = [self._build_segment(srt_item, full_audio) for srt_item in srt_items]
return segments
def _build_segment(self, srt_item: pysrt.SubRipItem, full_audio: FileAudioBuffer) -> Segment:
# milliseconds to seconds
start = srt_item.start.ordinal / 1000
end = srt_item.end.ordinal / 1000
audio = full_audio.trim_duration(start, end)
segment = Segment(label=self.turn_segment_label, span=Span(start, end), audio=audio) | transcription_attr = Attribute(label=self.transcription_attr_label, value=srt_item.text) | 0 | 2023-11-13 16:28:56+00:00 | 12k |
interpretml/LLM-Tabular-Memorization-Checker | tabmemcheck/functions.py | [
{
"identifier": "LLM_Interface",
"path": "tabmemcheck/llm.py",
"snippet": "class LLM_Interface:\n \"\"\"The interface to the language model.\"\"\"\n\n # if true, the tests use the chat_completion function, otherwise the completion function\n chat_mode = False\n\n def completion(self, prompt, temperature, max_tokens):\n \"\"\"Returns: The response (string)\"\"\"\n\n def chat_completion(self, messages, temperature, max_tokens):\n \"\"\"Returns: The response (string)\"\"\"\n raise NotImplementedError"
},
{
"identifier": "ChatWrappedLLM",
"path": "tabmemcheck/llm.py",
"snippet": "class ChatWrappedLLM(LLM_Interface):\n \"\"\"Wrap a base language model (i.e. an LLM_Interface that only implements the completion method) to act as a chat completion model.\n\n The wrapped model take queries via the chat_completion interface. It transforms the messages list into a single textual prompt using the provided prompt_fn.\n \"\"\"\n\n def __init__(self, llm, prompt_fn, ends_with: str = None):\n assert not llm.chat_mode, \"The wrapped model must be a base model.\"\n self.llm = llm\n self.chat_mode = True\n self.wrapper_fn = prompt_fn\n self.ends_with = ends_with\n\n def chat_completion(self, messages, temperature, max_tokens):\n prompt = self.wrapper_fn(messages)\n # print(prompt)\n response = self.llm.completion(prompt, temperature, max_tokens)\n # print(response)\n if (\n self.ends_with is not None\n ): # we frequently use '\\n\\n' as the end of the relevant part of the response\n if self.ends_with in response:\n response = response[: response.find(self.ends_with)]\n return response\n\n def __repr__(self) -> str:\n return self.llm.__repr__()"
},
{
"identifier": "send_chat_completion",
"path": "tabmemcheck/llm.py",
"snippet": "def send_chat_completion(llm: LLM_Interface, messages, max_tokens=None, logfile=None):\n \"\"\"Send chat completion with retrying and logging.\n\n Returns: The response (string))\"\"\"\n config = tabmem.config\n if max_tokens is None:\n max_tokens = config.max_tokens\n response = llm.chat_completion(messages, config.temperature, max_tokens)\n if config.sleep > 0.0:\n time.sleep(config.sleep)\n # logging\n log(messages, response, logfile)\n # printing\n if config.print_prompts or config.print_next_prompt:\n pretty_print_messages(messages)\n if config.print_prompts or config.print_responses or config.print_next_prompt:\n pretty_print_response(response)\n # reset print_next_prompt\n config.print_next_prompt = False\n # return string response\n return response"
},
{
"identifier": "send_completion",
"path": "tabmemcheck/llm.py",
"snippet": "def send_completion(llm: LLM_Interface, prompt, max_tokens=None, logfile=None):\n config = tabmem.config\n if max_tokens is None:\n max_tokens = config.max_tokens\n response = llm.completion(prompt, config.temperature, max_tokens)\n # logging\n log(prompt, response, logfile)\n # printing\n if config.print_prompts or config.print_next_prompt:\n pretty_print_completion(prompt, response)\n elif config.print_responses:\n pretty_print_response(response)\n # reset print_next_prompt\n config.print_next_prompt = False\n # return string response\n return response"
},
{
"identifier": "bcolors",
"path": "tabmemcheck/llm.py",
"snippet": "class bcolors:\n HEADER = \"\\033[95m\"\n OKBLUE = \"\\033[94m\"\n OKCYAN = \"\\033[96m\"\n OKGREEN = \"\\033[92m\"\n WARNING = \"\\033[93m\"\n FAIL = \"\\033[91m\"\n ENDC = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n\n # Regular Colors\n Black = \"\\033[0;30m\" # Black\n Red = \"\\033[0;31m\" # Red\n Green = \"\\033[0;32m\" # Green\n Yellow = \"\\033[0;33m\" # Yellow\n Blue = \"\\033[0;34m\" # Blue\n Purple = \"\\033[0;35m\" # Purple\n Cyan = \"\\033[0;36m\" # Cyan\n White = \"\\033[0;37m\" # White\n\n # Background\n On_Black = \"\\033[40m\" # Black\n On_Red = \"\\033[41m\" # Red\n On_Green = \"\\033[42m\" # Green\n On_Yellow = \"\\033[43m\" # Yellow\n On_Blue = \"\\033[44m\" # Blue\n On_Purple = \"\\033[45m\" # Purple\n On_Cyan = \"\\033[46m\" # Cyan\n On_White = \"\\033[47m\" # White"
},
{
"identifier": "statistical_feature_prediction_test",
"path": "tabmemcheck/row_independence.py",
"snippet": "@retry(\n stop=stop_after_attempt(10)\n) # the automated fitting can fail for an unlucky choice of the test rows (I think. at least it can fail with certain probability due to bad label encoding. this is a quick fix)\ndef statistical_feature_prediction_test(\n csv_file, feature_name, num_prefix_rows=5, confidence_level=0.95, verbose=False\n):\n \"\"\"Train a gradient boosted tree and a linear classifer to predict the value of feature {feature_name} in the n-th row of the csv file,\n using all the features of the previous {num_prefix_rows} rows.\n\n Returns: True if the null of no overalp is rejected, False otherwise.\n \"\"\"\n # load the file as a pandas dataframe\n df = utils.load_csv_df(csv_file)\n feature_names = utils.get_feature_names(csv_file)\n\n # auto-adjust the number of prefix rows bases on the size of the dataset\n # (it is more important to have a big test set, so that we can detect strong effects (row id) on small datasets with significance)\n num_prefix_rows = 5\n if len(df) < 1000:\n num_prefix_rows = 3\n if len(df) < 500:\n num_prefix_rows = 2\n if len(df) < 200:\n num_prefix_rows = 1\n\n # we need to make a strict separation between train and test rows\n # this means that we exclude the {num_prefix_rows} rows before any test row from the training set\n test_rows = np.random.choice(\n len(df), size=(len(df) // (1 + num_prefix_rows)) // 2, replace=False\n )\n\n # regression or classification?\n classification = False\n if df[feature_name].dtype == \"object\":\n classification = True\n elif (\n len(df[feature_name].unique()) < 25\n and len(df[feature_name].unique()) / len(df) < 0.05\n ):\n # if the feature takes only a couple of values, classification\n df[feature_name] = df[feature_name].astype(\"category\").cat.codes\n classification = True\n\n # convert all numbers to floats\n for fn in feature_names:\n if df[fn].dtype == \"int64\":\n df[fn] = df[fn].astype(float)\n\n # convert stings to categorical features\n for fn in feature_names:\n if df[fn].dtype == \"object\":\n df[fn] = df[fn].astype(\"category\").cat.codes\n\n # impute all missing values with the mean\n df = df.fillna(df.mean())\n\n # construct the prediction problem\n X_train, X_test = [], []\n y_train, y_test = [], []\n for i_row in range(num_prefix_rows, len(df)):\n # the value of the feature in the test row\n y_i = df[feature_name].iloc[i_row]\n # all the values of the previous num_prefix_rows rows\n X_i = df.iloc[i_row - num_prefix_rows : i_row].values.flatten()\n # is this row train, test, or excluded?\n if i_row in test_rows: # test\n X_test.append(X_i)\n y_test.append(y_i)\n else:\n excluded = False\n for dist in range(num_prefix_rows):\n if i_row + dist + 1 in test_rows: # excluded\n excluded = True\n if not excluded: # train\n X_train.append(X_i)\n y_train.append(y_i)\n X_train, X_test = np.array(X_train), np.array(X_test)\n y_train, y_test = np.array(y_train), np.array(y_test)\n\n # train a gradient boosted tree and logistic/linear regression\n gbtree = XGBRegressor()\n linear_clf = make_pipeline(StandardScaler(), LinearRegression())\n if classification:\n gbtree = XGBClassifier()\n linear_clf = make_pipeline(StandardScaler(), LogisticRegression())\n # ignore convergence warnings etc.\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n gbtree.fit(X_train, y_train)\n linear_clf.fit(X_train, y_train)\n # for the test, we choose the classifier with the lower TRAINING error\n # (we can do this without adjusting the confidence level)\n final_model = gbtree\n if linear_clf.score(X_train, y_train) < gbtree.score(X_train, y_train):\n final_model = linear_clf\n # the final predictions\n y_pred = final_model.predict(X_test)\n\n # evaluation\n if classification:\n # measure the predictive accuracy\n score, ci = utils.accuracy(y_pred, y_test, confidence_level=confidence_level)\n # the best unconditional predictor: always predicting the most common class\n y_pred = np.repeat(np.argmax(np.bincount(y_train)), len(y_test))\n baseline_score, baseline_ci = utils.accuracy(y_pred, y_test)\n if verbose:\n print(f\"Accuracy: {score:.3} ({ci.low:.3}, {ci.high:.3})\")\n print(\n f\"Baseline (most common class): {baseline_score:.3} ({baseline_ci.low:.3}, {baseline_ci.high:.3})\"\n )\n else:\n # measure the mean squared error\n score, ci = utils.mean_squared_error(\n y_pred, y_test, confidence_level=confidence_level\n )\n # the mean absolute error of the mean\n baseline_score, baseline_ci = utils.mean_squared_error(\n np.repeat(np.mean(y_train), len(y_test)), y_test\n )\n if verbose:\n print(f\"Mean squared error: {score:.3} ({ci.low:.3}, {ci.high:.3})\")\n print(\n f\"Baseline (mean): {baseline_score:.3} ({baseline_ci.low:.3}, {baseline_ci.high:.3})\"\n )\n\n # is the gbtree significantly better than the baseline?\n if classification:\n if ci.low > baseline_ci.high:\n return True\n else:\n if ci.high < baseline_ci.low:\n return True\n return False"
},
{
"identifier": "prefix_suffix_chat_completion",
"path": "tabmemcheck/chat_completion.py",
"snippet": "def prefix_suffix_chat_completion(\n llm: LLM_Interface,\n prefixes: list[str],\n suffixes: list[str],\n system_prompt: str,\n few_shot=None,\n num_queries=100,\n out_file=None,\n rng=None,\n):\n \"\"\"A basic chat completion function. Takes a list of prefixes and suffixes and a system prompt.\n Sends {num_queries} prompts of the format\n\n System: <system_prompt>\n User: <prefix> |\n Assistant: <suffix> |\n ... | {few_shot} times, or one example from each (prefixes, suffixes) pair in a {few_shot} list.\n User: <prefix> | In the second case, few_shot = [([prefixes], [suffixes]), ..., ([prefixes], [suffixes])]\n Assistant: <suffix> |\n User: <prefix>\n Assistant: <response> (= test suffix?)\n\n The num_queries prefixes and suffixes are randomly selected from the respective lists.\n The function guarantees that the test suffix (as a complete string) is not contained in any of the few-shot prefixes or suffixes.\n\n Stores the results in a csv file.\n\n Returns: the test prefixes, test suffixes, and responses\n \"\"\"\n assert len(prefixes) == len(\n suffixes\n ), \"prefixes and suffixes must have the same length\"\n\n # randomly shuffle the prefixes and suffixes\n if rng is None:\n rng = np.random.default_rng()\n idx = rng.permutation(len(prefixes))\n prefixes = [prefixes[i] for i in idx]\n suffixes = [suffixes[i] for i in idx]\n\n # the number of points to evaluate\n num_points = min(num_queries, len(prefixes))\n\n test_prefixes = []\n test_suffixes = []\n responses = []\n for i_testpoint in range(num_points):\n # system prompt\n messages = [\n {\n \"role\": \"system\",\n \"content\": system_prompt,\n },\n ]\n # few-shot examples?\n if few_shot is not None:\n # if few_shot is an integer, include few_shot examples from the original prefixes and suffixes\n if isinstance(few_shot, int):\n for _ in range(few_shot):\n idx = None\n retries = 0\n # select a random prefix/suffix pair\n while (\n idx is None\n or idx == i_testpoint\n # assert that the test suffix is not contained in the few-shot prefixes or suffixes\n or suffixes[i_testpoint] in prefixes[idx]\n or suffixes[i_testpoint] in suffixes[idx]\n ):\n idx = rng.choice(len(prefixes))\n retries += 1\n if retries > 100:\n raise Exception(\n \"Unable to construct a query where the desired output is not contained in the few-shot data.\\nDid you provide the test dataset as few-shot example?\"\n )\n prefix = prefixes[idx]\n suffix = suffixes[idx]\n messages.append({\"role\": \"user\", \"content\": prefix})\n messages.append({\"role\": \"assistant\", \"content\": suffix})\n # if few_shot is a list of (prefixes, suffixes)-tuples, inlude one example from each tuple\n elif isinstance(few_shot, list):\n for fs_prefixes, fs_suffixes in few_shot:\n fs_prefix, fs_suffix = None, None\n retries = 0\n # select a random prefix/suffix pair\n while (\n fs_prefix is None\n # assert that the test suffix is not contained in the few-shot prefixes or suffixes\n or suffixes[i_testpoint] in fs_prefix\n or suffixes[i_testpoint] in fs_suffix\n ):\n fs_idx = rng.choice(len(fs_prefixes))\n fs_prefix = fs_prefixes[fs_idx]\n fs_suffix = fs_suffixes[fs_idx]\n retries += 1\n if retries > 100:\n raise Exception(\n \"Unable to construct a query where the desired output is not contained in the few-shot data.\\nDid you provide the test dataset as few-shot example?\"\n )\n messages.append({\"role\": \"user\", \"content\": fs_prefix})\n messages.append({\"role\": \"assistant\", \"content\": fs_suffix})\n\n # test observation\n test_prefix = prefixes[i_testpoint]\n test_suffix = suffixes[i_testpoint]\n messages.append({\"role\": \"user\", \"content\": test_prefix})\n response = send_chat_completion(llm, messages)\n # store prefix, suffix and response\n test_prefixes.append(test_prefix)\n test_suffixes.append(test_suffix)\n responses.append(response)\n\n # save the results to file\n if out_file is not None:\n results_df = pd.DataFrame(\n {\n \"prefix\": test_prefixes,\n \"suffix\": test_suffixes,\n \"response\": responses,\n }\n )\n results_df.to_csv(\n out_file,\n index=False,\n )\n\n return test_prefixes, test_suffixes, responses"
},
{
"identifier": "row_chat_completion",
"path": "tabmemcheck/chat_completion.py",
"snippet": "def row_chat_completion(\n llm,\n csv_file,\n system_prompt,\n num_prefix_rows=10,\n num_queries=100,\n few_shot=7,\n out_file=None,\n):\n \"\"\"Row chat completion task. This task ask the LLM to predict the next row in the\n csv file, given the previous rows. This task is the basis for the row completion\n test, and also for the first token test. Uses prefix_suffix_chat_completion.\"\"\"\n # assert that few_shot is an integer\n assert isinstance(few_shot, int), \"For row completion, few_shot must be an integer.\"\n\n # load the file as a list of strings\n rows = utils.load_csv_rows(csv_file)\n\n # prepare data\n prefixes = []\n suffixes = []\n for idx in range(len(rows) - num_prefix_rows):\n prefixes.append(\"\\n\".join(rows[idx : idx + num_prefix_rows]))\n suffixes.append(rows[idx + num_prefix_rows])\n\n test_prefixes, test_suffixes, responses = prefix_suffix_chat_completion(\n llm,\n prefixes,\n suffixes,\n system_prompt,\n few_shot=few_shot,\n num_queries=num_queries,\n out_file=out_file,\n )\n\n return test_prefixes, test_suffixes, responses"
},
{
"identifier": "row_completion",
"path": "tabmemcheck/chat_completion.py",
"snippet": "def row_completion(\n llm,\n csv_file,\n num_prefix_rows=10,\n num_queries=100,\n out_file=None, # TODO support out_file\n):\n \"\"\"Plain language model variant of row_chat_completion\"\"\"\n # load the file as a list of strings\n rows = utils.load_csv_rows(csv_file)\n\n # choose num_queries rows to complete\n prefixes = []\n suffixes = []\n responses = []\n for idx in np.random.choice(\n len(rows) - num_prefix_rows, num_queries, replace=False\n ):\n # prepare query\n prefix = \"\\n\".join(rows[idx : idx + num_prefix_rows])\n suffix = rows[idx + num_prefix_rows]\n\n # send query\n response = send_completion(llm, prefix, max_tokens=1 + len(suffix))\n\n # keep only the first row in the response\n response = response.strip(\"\\n\").split(\"\\n\")[0]\n\n # store prefix, suffix and response\n prefixes.append(prefix)\n suffixes.append(suffix)\n responses.append(response)\n\n return prefixes, suffixes, responses"
},
{
"identifier": "feature_values_chat_completion",
"path": "tabmemcheck/chat_completion.py",
"snippet": "def feature_values_chat_completion(\n llm: LLM_Interface,\n csv_file: str,\n system_prompt,\n num_queries,\n few_shot=[], # list or integer\n cond_feature_names=[],\n fs_cond_feature_names=[], # a list of lists of conditional feature names for each few-shot example\n add_description=True,\n out_file=None,\n):\n \"\"\"Feature chat completion task. This task asks the LLM to complete the feature values of observations in the dataset.\n\n The prompt format is the following:\n System: <system_prompt>\n |\n | {few_shot} examples from other csv files.\n |\n User: Dataset: <dataset_name>\n Feature Names: Feature 1, Feature 2, ..., Feature n\n Feature Values: Feature 1 = value 1, Feature 2 = value 2, ..., Feature m = value m\n [Target: Feature k]\n Response: Feature m + 1 = value m + 1, ..., Feature n = value n [Feature k = value k]\n\n This can be modified in the following ways:\n - Remove dataset description and feature names ({add_description} parameter)\n - don't provide any conditional features\n - Don't use the feature names, but only the values. (TODO ? or maybe remove, latter for formatter class)\n\n Options:\n - few_shot: use few-shot examples from other csv files (list), or few_shot examples from the same csv file (int)\n - target & fs_targets: if target is not None, then the LLM is asked to complete only the value of the target feature.\n\n The feature names are ordered in the prompt as they are ordered in the csv file. In the future we might want to relax this.\n\n TODO test and debug this function\n \"\"\"\n # TODO assert that all the given feature names are valid (i.e. occur in the dataset, otherwise throw exception)\n\n dataset_name = utils.get_dataset_name(csv_file)\n conditional_sampling = (\n cond_feature_names is not None and len(cond_feature_names) > 0\n )\n\n # if the few-shot argument is a list, then csv_file should not be in there\n # the current option is to remove it (TODO issue warning)\n if isinstance(few_shot, list):\n few_shot = [\n x for x in few_shot if not dataset_name in utils.get_dataset_name(x)\n ]\n\n # if few-shot is an integer, then include few_shot examples from csv_file\n # this is implemented by replacing few_shot and fs_cond_feature_names with the appropriate lists\n if isinstance(few_shot, int):\n few_shot = [csv_file for _ in range(few_shot)]\n fs_cond_feature_names = [cond_feature_names for _ in range(len(few_shot))]\n\n # issue a warning if conditional_sampling, but no fs_cond_feature_names\n if conditional_sampling and len(few_shot) > 0 and len(fs_cond_feature_names) == 0:\n print(\n llm.bcolors.WARNING\n + \"WARNING: feature_chat_completion: Conditional sampling, but no conditional feature names for the few-shot examples provided.\"\n + llm.bcolors.ENDC\n )\n\n # prefixes and suffixes for the main dataset\n if conditional_sampling:\n prefixes, samples = utils.load_cond_samples(\n csv_file, cond_feature_names, add_description=add_description\n )\n else:\n prefix, samples = utils.load_samples(csv_file)\n prefixes = [prefix] * len(samples)\n\n # prefixes and suffixes for the few-shot examples\n few_shot_prefixes_suffixes = []\n for fs_idx, fs_csv_file in enumerate(few_shot):\n if conditional_sampling:\n fs_prefixes, fs_samples = utils.load_cond_samples(\n fs_csv_file,\n fs_cond_feature_names[fs_idx],\n add_description=add_description,\n )\n few_shot_prefixes_suffixes.append((fs_prefixes, fs_samples))\n else:\n fs_prefix, fs_samples = utils.load_samples(fs_csv_file)\n few_shot_prefixes_suffixes.append(\n ([fs_prefix] * len(fs_samples), fs_samples)\n )\n\n # execute chat queries\n test_prefixes, test_suffixes, responses = prefix_suffix_chat_completion(\n llm,\n prefixes,\n samples,\n system_prompt,\n few_shot=few_shot_prefixes_suffixes,\n num_queries=num_queries,\n out_file=out_file,\n )\n\n return test_prefixes, test_suffixes, responses"
}
] | import os
import numpy as np
import pandas as pd
import tabmemcheck as tabmem
import tabmemcheck.analysis as analysis
import tabmemcheck.utils as utils
from typing import Any, Union
from difflib import SequenceMatcher
from tabmemcheck.llm import (
LLM_Interface,
ChatWrappedLLM,
send_chat_completion,
send_completion,
bcolors,
)
from tabmemcheck.row_independence import statistical_feature_prediction_test
from tabmemcheck.chat_completion import (
prefix_suffix_chat_completion,
row_chat_completion,
row_completion,
feature_values_chat_completion,
) | 8,039 | frac_duplicates = 1 - len(set(rows)) / len(rows)
if frac_duplicates == 0:
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ "All the rows in the dataset are unique."
)
else:
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ f"{100*frac_duplicates:.2f}% of the rows in this dataset are duplicates."
)
# ask the model to perform row chat completion (execute the the prompt)
if llm.chat_mode:
test_prefixes, test_suffixes, responses = row_chat_completion(
llm,
csv_file,
system_prompt,
num_prefix_rows,
num_queries,
few_shot,
out_file,
)
else:
test_prefixes, test_suffixes, responses = row_completion(
llm, csv_file, num_prefix_rows, num_queries, out_file
)
# count the number of exact matches
# NOTE here we assume that the test suffix is a single row that is unique, i.e. no duplicate rows
num_exact_matches = 0
for test_suffix, response in zip(test_suffixes, responses):
if test_suffix.strip() in response.strip():
num_exact_matches += 1
# the statistical test using the levenshtein distance TODO taken out of current version although it works
# test_prefix_rows = [prefix.split("\n") for prefix in test_prefixes]
# test_result = analysis.levenshtein_distance_t_test(
# responses, test_suffixes, test_prefix_rows
# )
# print the result
print(
bcolors.BOLD
+ "Row Completion Test: "
+ bcolors.ENDC
+ f"{num_exact_matches}/{num_queries} exact matches."
# + bcolors.BOLD
# + "\nLevenshtein distance test (p-value): "
# + bcolors.ENDC
# + f"{test_result.pvalue:.3f}."
)
return test_prefixes, test_suffixes, responses
####################################################################################
# Feature Completion
####################################################################################
def feature_completion_test(
csv_file: str,
llm: Union[LLM_Interface, str],
feature_name: str = None,
num_queries=100,
few_shot=5,
out_file=None,
system_prompt: str = "default",
):
"""Feature completion test where we attempt to predict a single rare feature & count the number of exact matches.
The basic prompt format is the following:
System: <system_prompt>
User: Feature 1 = value 1, Feature 2 = value 2, ..., Feature n = value n
Response: Feature {feature_name} = value
This can be modified in the following ways:
- Include few-shot examples from other csv files.
- Don't use the feature names, but only the values.
"""
llm = __llm_setup(llm)
# TODO statistical analysis of the uniqueness of the feature (i.e., is the test appropriate?!)
if system_prompt == "default": # default system prompt?
system_prompt = tabmem.config.system_prompts["feature-completion"]
# if no feature value is provided, automatically select the most unique feature
if feature_name is None:
feature_name, frac_unique_values = analysis.find_most_unique_feature(csv_file)
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ f"Using feature {feature_name} with {100*frac_unique_values:.2f}% unique values."
)
# all the other features are the conditional features
feature_names = utils.get_feature_names(csv_file)
cond_feature_names = [f for f in feature_names if f != feature_name]
if not llm.chat_mode: # wrap base model to take chat queries
def build_prompt(messages):
prompt = ""
for m in messages:
if m["role"] == "user":
prompt += m["content"]
elif m["role"] == "assistant":
prompt += ", " + m["content"] + "\n\n"
prompt += ", "
return prompt
llm = ChatWrappedLLM(llm, build_prompt, ends_with="\n\n")
# execute the prompt
|
DEFAULT_FEW_SHOT_CSV_FILES = [
"iris.csv",
"adult-train.csv",
"titanic-train.csv",
"uci-wine.csv",
"california-housing.csv",
]
def __difflib_similar(csv_file_1, csv_file_2):
sm = SequenceMatcher(
None, utils.load_csv_string(csv_file_1), utils.load_csv_string(csv_file_2)
)
if sm.quick_ratio() > 0.9:
return sm.ratio() > 0.9
return False
def __validate_few_shot_files(csv_file, few_shot_csv_files):
"""check if the csv_file is contained in the few_shot_csv_files."""
dataset_name = utils.get_dataset_name(csv_file)
few_shot_names = [utils.get_dataset_name(x) for x in few_shot_csv_files]
if dataset_name in few_shot_names:
# replace the dataset_name with open-ml diabetes
few_shot_csv_files = [
x for x in few_shot_csv_files if utils.get_dataset_name(x) != dataset_name
]
few_shot_csv_files.append("openml-diabetes.csv")
# now test with difflib if the dataset contents are very similar
for fs_file in few_shot_csv_files:
if __difflib_similar(csv_file, fs_file):
print(
bcolors.BOLD
+ "Warning: "
+ bcolors.ENDC
+ f"The dataset is very similar to the few-shot dataset {utils.get_dataset_name(fs_file)}."
)
return few_shot_csv_files
def __llm_setup(llm: Union[LLM_Interface, str]):
# if llm is a string, assume open ai model
if isinstance(llm, str):
llm = tabmem.openai_setup(llm)
return llm
def __print_info(csv_file, llm, few_shot_csv_files):
"""Print some information about the csv file and the model."""
print(
bcolors.BOLD
+ "Dataset: "
+ bcolors.ENDC
+ f"{utils.get_dataset_name(csv_file)}"
)
print(bcolors.BOLD + "Model: " + bcolors.ENDC + f"{llm}")
print(
bcolors.BOLD
+ "Few-Shot: "
+ bcolors.ENDC
+ ", ".join(
[utils.get_dataset_name(fs_csv_file) for fs_csv_file in few_shot_csv_files]
)
)
####################################################################################
# All the tests
####################################################################################
def run_all_tests(
csv_file: str,
llm: Union[LLM_Interface, str],
few_shot_csv_files=DEFAULT_FEW_SHOT_CSV_FILES,
feature_name=None,
):
llm = __llm_setup(llm)
few_shot_csv_files = __validate_few_shot_files(csv_file, few_shot_csv_files)
__print_info(csv_file, llm, few_shot_csv_files)
feature_names_test(csv_file, llm, few_shot_csv_files=few_shot_csv_files)
# todo feature values
header_test(csv_file, llm, few_shot_csv_files=few_shot_csv_files)
# draw 10 zero-knowledge samples
print(
bcolors.BOLD
+ "Drawing 10 zero-knowledge samples at temperature 0.7:"
+ bcolors.ENDC
)
temp = tabmem.config.temperature
tabmem.config.temperature = 0.7
samples_df = sample(
csv_file, llm, num_queries=10, few_shot_csv_files=few_shot_csv_files
)
# print the data frame unless it is empty
if (not samples_df.empty) and len(samples_df) > 0:
pd.set_option("display.expand_frame_repr", False)
print(samples_df)
if len(samples_df) < 10:
print(f"The model provided {len(samples_df)} valid samples.")
else:
print("The model was not able to provide valid samples.")
tabmem.config.temperature = temp
row_completion_test(csv_file, llm, num_queries=25)
feature_completion_test(csv_file, llm, num_queries=25, feature_name=feature_name)
first_token_test(csv_file, llm, num_queries=25)
####################################################################################
# Feature Names
####################################################################################
def feature_names_test(
csv_file: str,
llm: Union[LLM_Interface, str],
num_prefix_features: int = None,
few_shot_csv_files=DEFAULT_FEW_SHOT_CSV_FILES,
system_prompt: str = "default",
):
"""Test if the model knows the names of the features.
The prompt format is:
System: <system_prompt>
User: Dataset: <dataset_name>
Feature 1, Feature 2, ..., Feature n
Response: Feature n+1, Feature n+2, ..., Feature m
This can be modified in the following ways:
- Include few-shot examples from other csv files.
"""
llm = __llm_setup(llm)
few_shot_csv_files = __validate_few_shot_files(csv_file, few_shot_csv_files)
# default system prompt?
if system_prompt == "default":
system_prompt = tabmem.config.system_prompts["feature-names"]
dataset_name = utils.get_dataset_name(csv_file)
feature_names = utils.get_feature_names(csv_file)
# by default, use 1/4 of the features as prefix, but at least one
if num_prefix_features is None:
num_prefix_features = max(1, len(feature_names) // 4)
# remove the current csv file from the few-shot csv files should it be present there
few_shot_csv_files = [x for x in few_shot_csv_files if not dataset_name in x]
# setup for the few-shot examples
fs_dataset_names = [utils.get_dataset_name(x) for x in few_shot_csv_files]
fs_feature_names = [
utils.get_feature_names(fs_csv_file) for fs_csv_file in few_shot_csv_files
]
fs_prefix_feature = [
utils.adjust_num_prefix_features(csv_file, num_prefix_features, fs_csv_file)
for fs_csv_file in few_shot_csv_files
]
if llm.chat_mode:
# construt the prompt
prefixes = [
f"Dataset: {dataset_name}. Feature Names: "
+ ", ".join(feature_names[:num_prefix_features])
]
suffixes = [", ".join(feature_names[num_prefix_features:])]
few_shot = []
for fs_dataset_name, fs_feature_name, fs_prefix_feature in zip(
fs_dataset_names, fs_feature_names, fs_prefix_feature
):
few_shot.append(
(
[
f"Dataset: {fs_dataset_name}. Feature Names: "
+ ", ".join(fs_feature_name[:fs_prefix_feature])
],
[", ".join(fs_feature_name[fs_prefix_feature:])],
)
)
# execute the the prompt
_, _, responses = prefix_suffix_chat_completion(
llm,
prefixes,
suffixes,
system_prompt,
few_shot=few_shot,
num_queries=1,
)
response = responses[0]
else:
# construct the prompt
prompt = ""
for fs_dataset_name, fs_feature_name, fs_prefix_feature in zip(
fs_dataset_names, fs_feature_names, fs_prefix_feature
):
prompt += (
f"Dataset: {fs_dataset_name}.\nNumber of Features: {len(fs_feature_name)}\nFeature Names: "
+ ", ".join(fs_feature_name)
+ "\n\n"
)
prompt += (
f"Dataset: {dataset_name}\nNumber of Features: {len(feature_names)}\nFeature Names: "
+ ", ".join(feature_names[:num_prefix_features])
+ ", "
)
# execute the prompt
response = send_completion(llm, prompt)
# consider the response only until the first '\n\n'
idx = response.find("\n\n")
if idx != -1:
response = response[:idx]
print(
bcolors.BOLD
+ "Feature Names Test\nFeature Names: "
+ bcolors.ENDC
+ ", ".join(feature_names[num_prefix_features:])
+ bcolors.BOLD
+ "\nModel Generation: "
+ bcolors.ENDC
+ response
)
# TODO do some sort of evaluation
# for example, return true if it completes all but X of the feature names, correcting for upper/lower case
# at least do formatted printing of the results
####################################################################################
# Feature Values
####################################################################################
####################################################################################
# Header Test
####################################################################################
def header_test(
csv_file: str,
llm: Union[LLM_Interface, str],
split_rows: list[int] = [2, 4, 6, 8],
completion_length: int = 500,
few_shot_csv_files: list[str] = DEFAULT_FEW_SHOT_CSV_FILES,
system_prompt: str = "default",
):
"""Header test, using other csv files as few-shot examples.
Splits the csv file at random positions in rows 2, 4, 6, and 8. Performs 1 query for each split. Reports the best completion.
NOTE: This test might fail if the header and rows of the csv file are very long, and the model has a small context window.
NOTE: in the end, this is the case for all of our tests :)
"""
llm = __llm_setup(llm)
few_shot_csv_files = __validate_few_shot_files(csv_file, few_shot_csv_files)
# default system prompt?
if system_prompt == "default":
system_prompt = tabmem.config.system_prompts["header"]
# load the csv file as a single contiguous string. also load the rows to determine offsets within the string
data = utils.load_csv_string(csv_file, header=True)
csv_rows = utils.load_csv_rows(csv_file, header=True)
# load the few-shot examples
few_shot_data = []
for fs_csv_file in few_shot_csv_files:
fs_data = utils.load_csv_string(fs_csv_file, header=True)
few_shot_data.append(fs_data)
# perform the test multiple times, cutting the dataset at random positions in rows split_rows
num_completions = -1
header, completion = None, None
for i_row in split_rows:
offset = np.sum([len(row) for row in csv_rows[: i_row - 1]])
offset += np.random.randint(
len(csv_rows[i_row]) // 3, 2 * len(csv_rows[i_row]) // 3
)
prefixes = [data[:offset]]
suffixes = [data[offset : offset + completion_length]]
few_shot = [
([fs_data[:offset]], [fs_data[offset : offset + completion_length]])
for fs_data in few_shot_data
]
# chat mode: use few-shot examples
if llm.chat_mode:
_, _, response = prefix_suffix_chat_completion(
llm, prefixes, suffixes, system_prompt, few_shot=few_shot, num_queries=1
)
response = response[0]
else: # otherwise, plain completion
response = send_completion(llm, prefixes[0])
# find the first digit where the response and the completion disagree
idx = -1000
for idx, (c, r) in enumerate(zip(data[offset:], response)):
if c != r:
break
if idx == len(response) - 1 and response[idx] == data[offset + idx]:
idx += 1 # no disagreement found, set idx to length of the response
# is this the best completion so far?
if idx > num_completions:
num_completions = idx
header = prefixes[0]
completion = response
# for the printing, we first color all green up to the first disagreement
completion_print = bcolors.Green + completion[:num_completions]
# then color red up to the beginning of the next row, if any
remaining_completion = completion[num_completions:]
idx = remaining_completion.find("\n")
if idx == -1:
completion_print += bcolors.Red + remaining_completion
else:
completion_print += bcolors.Red + remaining_completion[:idx] + "\n"
remaining_completion = remaining_completion[idx + 1 :]
# for all additional rows, green up to the first disagreement, all red after that
completion_rows = remaining_completion.split("\n")
# the corresponding next row in the csv file
data_idx = data[len(header) + num_completions :].find("\n")
data_rows = data[len(header) + num_completions + data_idx + 1 :].split("\n")
for completion_row, data_row in zip(completion_rows, data_rows):
if completion_row == data_row:
completion_print += bcolors.Green + completion_row + "\n"
continue
# not equal, find the first disagreement
idx = -1000
for idx, (c, r) in enumerate(zip(data_row, completion_row)):
if c != r:
break
if idx == len(completion_row) - 1 and completion_row[idx] == data_row[idx]:
idx += 1
# print first part green, second part red
completion_print += (
bcolors.Green
+ completion_row[:idx]
+ bcolors.Red
+ completion_row[idx:]
+ "\n"
)
# remove final new line
completion_print = completion_print.rstrip("\n")
# print the result
print(
bcolors.BOLD
+ "Header Test: "
+ bcolors.ENDC
+ bcolors.Black
+ header
+ completion_print
+ bcolors.ENDC
+ bcolors.BOLD
+ "\nHeader Test Legend: "
+ bcolors.ENDC
+ "Prompt "
+ bcolors.Green
+ "Correct "
+ bcolors.Red
+ "Incorrect"
+ bcolors.ENDC
)
# TODO return true if it completes the given row, as well as the next row.
# TODO count the number of correctly completed rows and print this number
####################################################################################
# Row Completion
####################################################################################
def row_completion_test(
csv_file: str,
llm: Union[LLM_Interface, str],
num_prefix_rows=10,
num_queries=50,
few_shot=7,
out_file=None,
system_prompt: str = "default",
):
"""Row completion test: Complete the next row of the csv file, given the previous rows."""
llm = __llm_setup(llm)
if system_prompt == "default": # default system prompt?
system_prompt = tabmem.config.system_prompts["row-completion"]
# what fraction of the rows are duplicates?
rows = utils.load_csv_rows(csv_file)
frac_duplicates = 1 - len(set(rows)) / len(rows)
if frac_duplicates == 0:
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ "All the rows in the dataset are unique."
)
else:
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ f"{100*frac_duplicates:.2f}% of the rows in this dataset are duplicates."
)
# ask the model to perform row chat completion (execute the the prompt)
if llm.chat_mode:
test_prefixes, test_suffixes, responses = row_chat_completion(
llm,
csv_file,
system_prompt,
num_prefix_rows,
num_queries,
few_shot,
out_file,
)
else:
test_prefixes, test_suffixes, responses = row_completion(
llm, csv_file, num_prefix_rows, num_queries, out_file
)
# count the number of exact matches
# NOTE here we assume that the test suffix is a single row that is unique, i.e. no duplicate rows
num_exact_matches = 0
for test_suffix, response in zip(test_suffixes, responses):
if test_suffix.strip() in response.strip():
num_exact_matches += 1
# the statistical test using the levenshtein distance TODO taken out of current version although it works
# test_prefix_rows = [prefix.split("\n") for prefix in test_prefixes]
# test_result = analysis.levenshtein_distance_t_test(
# responses, test_suffixes, test_prefix_rows
# )
# print the result
print(
bcolors.BOLD
+ "Row Completion Test: "
+ bcolors.ENDC
+ f"{num_exact_matches}/{num_queries} exact matches."
# + bcolors.BOLD
# + "\nLevenshtein distance test (p-value): "
# + bcolors.ENDC
# + f"{test_result.pvalue:.3f}."
)
return test_prefixes, test_suffixes, responses
####################################################################################
# Feature Completion
####################################################################################
def feature_completion_test(
csv_file: str,
llm: Union[LLM_Interface, str],
feature_name: str = None,
num_queries=100,
few_shot=5,
out_file=None,
system_prompt: str = "default",
):
"""Feature completion test where we attempt to predict a single rare feature & count the number of exact matches.
The basic prompt format is the following:
System: <system_prompt>
User: Feature 1 = value 1, Feature 2 = value 2, ..., Feature n = value n
Response: Feature {feature_name} = value
This can be modified in the following ways:
- Include few-shot examples from other csv files.
- Don't use the feature names, but only the values.
"""
llm = __llm_setup(llm)
# TODO statistical analysis of the uniqueness of the feature (i.e., is the test appropriate?!)
if system_prompt == "default": # default system prompt?
system_prompt = tabmem.config.system_prompts["feature-completion"]
# if no feature value is provided, automatically select the most unique feature
if feature_name is None:
feature_name, frac_unique_values = analysis.find_most_unique_feature(csv_file)
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ f"Using feature {feature_name} with {100*frac_unique_values:.2f}% unique values."
)
# all the other features are the conditional features
feature_names = utils.get_feature_names(csv_file)
cond_feature_names = [f for f in feature_names if f != feature_name]
if not llm.chat_mode: # wrap base model to take chat queries
def build_prompt(messages):
prompt = ""
for m in messages:
if m["role"] == "user":
prompt += m["content"]
elif m["role"] == "assistant":
prompt += ", " + m["content"] + "\n\n"
prompt += ", "
return prompt
llm = ChatWrappedLLM(llm, build_prompt, ends_with="\n\n")
# execute the prompt | _, test_suffixes, responses = feature_values_chat_completion( | 9 | 2023-11-14 18:34:51+00:00 | 12k |
WindowsSov8forUs/bestdori_api | bestdori/songs.py | [
{
"identifier": "Chart",
"path": "bestdori/charts.py",
"snippet": "class Chart(list[NoteType]):\n '''谱面类,统合针对谱面的一层操作\n\n 参数:\n chart (list[dict[str, Any]]): 原始谱面代码'''\n # 初始化\n def __init__(self, chart: list[dict[str, Any]]) -> None:\n '''谱面类,统合针对谱面的一层操作\n\n 参数:\n chart (list[dict[str, Any]]): 原始谱面代码'''\n super().__init__()\n for note in chart:\n # 遍历分类添加\n if note['type'] in ['Long', 'Slide']:\n self.append(Slide(**note))\n elif note['type'] == 'BPM':\n self.append(BPM(**note))\n elif note['type'] == 'Single':\n self.append(Single(**note))\n elif note['type'] == 'Directional':\n self.append(Directional(**note))\n else:\n # 删除其他音符\n continue\n return\n \n # 谱面规范化处理\n @classmethod\n def normalize(cls, chart: list[dict[str, Any]]) -> 'Chart':\n '''谱面规范化处理\n\n 参数:\n chart (list[dict[str, Any]]): 待处理谱面\n\n 返回:\n Chart: 处理后谱面\n '''\n normalized_chart: cls = cls(chart)\n # 对谱面进行排序\n normalized_chart.sort(key=lambda x: x.beat)\n # 处理可能出现的 BPM 错位\n if not isinstance(normalized_chart[0], BPM):\n offset: float = -1.0 # 记录 offset 修正\n # 第一位不是 BPM,找寻真正的 BPM 线\n for note in normalized_chart:\n if isinstance(note, BPM):\n offset = note.beat\n break\n if offset < 0: # 没有找到 BPM\n raise ValueError('谱面内未找到 BPM 线。')\n # 对谱面节拍进行修正\n for note in normalized_chart:\n note.beat_move(-offset)\n if isinstance(note, Slide):\n for connection in note.connections:\n if connection.beat < 0:\n connection.beat = 0\n else:\n break\n else:\n if note.beat < 0:\n note.beat = 0\n else:\n break\n \n # 处理可能出现的不合法滑条节点\n for note in normalized_chart:\n if not isinstance(note, Slide):\n continue\n index: int = 0\n for connection in note.connections:\n if index < (len(note.connections) - 1):\n if connection.flick:\n connection.flick = False\n if 0 < index < (len(note.connections) - 1):\n if connection.skill:\n connection.skill = False\n index += 1\n \n # 对谱面节拍进行修正\n if normalized_chart[0].beat != 0:\n offset = normalized_chart[0].beat\n for note in normalized_chart:\n note.beat_move(-offset)\n return normalized_chart\n \n # 谱面数据统计\n def count(self) -> Statistics:\n '''谱面数据统计\n\n 返回:\n Statistics: 统计到的谱面详细数据\n '''\n # 初始化统计数据\n start_beat = 0.0 # 谱面开始 beat 值\n end_beat = 0.0 # 谱面结束 beat 值\n prev_bpm = 120.0 # 上一个 BPM 线的 BPM 值\n prev_bpm_beat = 0.0 # 上一个 BPM 线的 beat 值\n total_notes = 0 # 总物量\n bpm_list: list[dict[str, float]] = [] # BPM 统计列表,统计所有出现的 BPM 及其有效时间\n \n # 遍历谱面数据\n for note in self:\n # 谱面为一个字典列表,每一个 note 都是一个字典\n if isinstance(note, BPM): # 如果当前是 BPM\n if note.bpm >= 0: # 如果当前 BPM 大于等于 0\n # 如果不是谱面一开始的 BPM 线且已有 note 被记录(即已出现过有效 bpm )\n if note.beat > 0 and total_notes > 0:\n if prev_bpm_beat <= start_beat: # 如果上一个 BPM 线先于第一个 note\n prev_bpm_beat = start_beat\n bpm_duration = (note.beat - prev_bpm_beat) * 60.0 / prev_bpm # 计算持续时间\n bpm_flag: bool = False # 检测 BPM 表中是否已存在指定 BPM\n for bpm_dict in bpm_list:\n if bpm_dict['bpm'] == prev_bpm:\n bpm_dict['duration'] += bpm_duration\n bpm_flag = True\n break\n if not bpm_flag: # 如果 BPM 未被记录\n bpm_dict = {\n 'bpm': prev_bpm,\n 'duration': bpm_duration\n }\n bpm_list.append(bpm_dict)\n prev_bpm = note.bpm\n prev_bpm_beat = note.beat\n continue\n \n if isinstance(note, (Single, Directional)): # 如果当前是单键或方向滑键\n # 记录 beat\n if end_beat < note.beat: # 如果当前 beat 更靠后\n end_beat = note.beat # 始终记录结束 beat\n if start_beat <= 0 or start_beat > note.beat: # 如果未记录起始 beat 或已记录的并不是起始 beat\n start_beat = note.beat\n \n total_notes += 1 # 累加一个物量\n continue\n \n if isinstance(note, Slide): # 如果是绿条\n # 绿条将会有一个 `connections` 列表用于记录节点\n for connection in note.connections:\n if not connection.hidden: # 忽略隐藏节点\n # 记录 beat\n if end_beat < connection.beat: # 如果当前 beat 更靠后\n end_beat = connection.beat # 始终记录结束 beat\n if start_beat <= 0 or start_beat > connection.beat: # 如果未记录起始 beat 或已记录的并不是起始 beat\n start_beat = connection.beat\n \n total_notes += 1 # 累加一个物量\n continue\n \n # 当走出遍历后表明谱面已遍历至最后一个 note ,进行最后的处理\n if prev_bpm_beat < end_beat: # 如果最后一个 note 在最后一个 BPM 线之前\n bpm_duration = (end_beat - prev_bpm_beat) * 60.0 / prev_bpm # 计算持续时间\n bpm_flag: bool = False # 检测 BPM 表中是否已存在指定 BPM\n for bpm_dict in bpm_list:\n if bpm_dict['bpm'] == prev_bpm:\n bpm_dict['duration'] += bpm_duration\n bpm_flag = True\n break\n if not bpm_flag: # 如果 BPM 未被记录\n bpm_dict = {\n 'bpm': prev_bpm,\n 'duration': bpm_duration\n }\n bpm_list.append(bpm_dict)\n \n # 遍历 BPM 列表,计算总时长并获取 BPM 数值\n duration = 0.0 # 谱面总持续时长\n bpm_main = 0.0 # 主要 BPM\n bpm_main_dura = 0.0 # 主要 BPM 持续时长\n bpm_min = 2147483647.0 # 最低 BPM\n bpm_max = 0.0 # 最高 BPM\n for bpm_info in bpm_list: # 遍历\n if bpm_info['duration'] > bpm_main_dura: # 如果持续时间更长\n bpm_main_dura = bpm_info['duration']\n bpm_main = bpm_info['bpm']\n if bpm_min > bpm_info['bpm']: # 如果更小\n bpm_min = bpm_info['bpm']\n if bpm_max < bpm_info['bpm']: # 如果更大\n bpm_max = bpm_info['bpm']\n duration += bpm_info['duration'] # 累加持续时长\n \n return Statistics(\n duration,\n total_notes,\n [bpm_min, bpm_max] if bpm_min != bpm_max else [bpm_min],\n bpm_main\n )\n\n # 转换为字典列表对象\n def to_list(self) -> list[dict[str, Any]]:\n '''将 `Chart` 谱面转换为 `list[dict[str, Any]]` 对象'''\n chart_data: list[dict[str, Any]] = []\n for note in self:\n chart_data.append(note.__dict__)\n return chart_data\n \n # 转换为 json 字符串\n def json(self) -> str:\n '''将 `Chart` 谱面转换为 `json` 字符串'''\n return dumps(self.to_list(), ensure_ascii=False)\n\n # 通过 json 字符串转换为 Chart 谱面\n @classmethod\n def from_json(cls, data: str) -> 'Chart':\n '''通过 `json` 字符串转换为 `Chart` 谱面\n\n 参数:\n data (str): 谱面 `json` 字符串\n\n 返回:\n Chart: 谱面对象 `bestdori.chart.Chart`\n '''\n return cls(loads(data))\n \n # 获取官方谱面\n @classmethod\n def get_chart(\n cls,\n id_: int,\n diff: Literal['easy', 'normal', 'hard', 'expert', 'special']='expert',\n proxy: Optional[str]=None\n ) -> 'Chart':\n '''获取官方谱面\n\n 参数:\n id_ (int): 谱面 ID\n \n diff (Literal['easy', 'normal', 'hard', 'expert', 'special'], optional): 难度名称\n \n proxy (Optional[str], optional): 代理服务器\n\n 返回:\n Chart: 获取到的谱面对象 `bestdori.chart.Chart`\n '''\n response = Api(API['charts']['info'].format(id=id_, diff=diff), proxy).request('get')\n return cls.normalize(response.json())"
},
{
"identifier": "get_list",
"path": "bestdori/post.py",
"snippet": "@overload\ndef get_list(\n proxy: Optional[str]=None,\n *,\n search: str='',\n category_name: Literal['SELF_POST']='SELF_POST',\n category_id: Literal['chart']='chart',\n tags: list[Tag]=[],\n order: Literal['TIME_DESC', 'TIME_ASC']='TIME_DESC',\n limit: int=20,\n offset: int=0\n) -> dict[str, Any]:\n '''搜索社区谱面\n ```python\n # 以 'Arghena' 为关键词,搜索社区谱面\n Post.search(search='Arghena', caregory_name='SELF_POST', category_id='chart')\n ```\n\n 参数:\n proxy (Optional[str], optional): 代理服务器\n\n search (str, optional): 搜索关键词,默认为空\n \n category_name (Literal['SELF_POST'], optional): 搜索的帖子类型 `SELF_POST`\n \n category_id (Literal['chart', 'text'], optional): 搜索的画廊种类 `chart`\n \n tags (list[Tag], optional): 搜索的标签,默认为空\n \n order (Literal['TIME_DESC', 'TIME_ASC'], optional): 帖子排序,默认时间倒序\n \n limit (int, optional): 展示出的帖子数,默认 20\n \n offset (int, optional): 忽略前面的 `offset` 个帖子,默认 0\n\n 返回:\n dict[str, Any]: 搜索结果\n ```python\n result: bool # 是否有响应\n count: int # 搜索到的谱面总数\n posts: list[dict[str, Any]] # 列举出的谱面\n ```\n '''\n ..."
},
{
"identifier": "ASSETS",
"path": "bestdori/utils/utils.py",
"snippet": "ASSETS = {\n 'characters': {\n 'character_kv_image': 'ui/character_kv_image/{id:>03d}_rip/image.png',\n 'resourceset': 'characters/resourceset/{resource_set_name}_rip/{name}_{type}.png',\n 'livesd': 'characters/livesd/{sd_resource_name}_rip/sdchara.png'\n },\n 'event': {\n 'banner': 'event/{asset_bundle_name}/images_rip/banner.png',\n 'logo': 'event/{asset_bundle_name}/images_rip/logo.png',\n 'topscreen': 'event/{asset_bundle_name}/topscreen_rip/{type}_eventtop.png',\n 'loginbouns': 'event/loginbonus/{asset_bundle_name}_rip/background.png'\n },\n 'songs': {\n 'musicjacket': 'musicjacket/musicjacket{index:>03d}_rip/assets-star-forassetbundle-startapp-musicjacket-musicjacket{index:>03d}-{jacket_image}-jacket.png',\n 'sound': 'sound/bgm{id:>03d}_rip/bgm{id:>03d}.mp3',\n 'musicscore': ''\n },\n 'thumb': {\n 'chara': 'thumb/chara/card{id:>05d}_rip/{resource_set_name}_{type}.png',\n 'degree': 'thumb/degree_rip/{degree_name}.png',\n 'costume': 'thumb/costume/group{id}_rip/{asset_bundle_name}.png',\n },\n 'stamp': {\n 'get': 'stamp/01_rip/{image_name}.png'\n },\n 'homebanner': {\n 'get': 'homebanner_rip/{banner_asset_bundle_name}.png'\n },\n 'gacha': {\n 'screen': 'gacha/screen/gacha{id}_rip/{asset_name}.png'\n },\n 'comic': {\n 'comic': 'comic/comic_{type}/{asset_bundle_name}_rip/{asset_bundle_name}.png',\n 'thumbnail': 'comic/comic_{type}_thumbnail/{asset_bundle_name}_rip/{asset_bundle_name}.png'\n },\n 'missions': {\n 'info': 'missions/{id}.json',\n 'all': 'missions/all.{index}.json'\n },\n 'band': {\n 'logo': 'band/logo/{id:>03d}_rip/{type}.png'\n },\n 'live2d': {\n 'buildData': 'live2d/chara/{asset_bundle_name}_rip/buildData.asset'\n }\n}"
},
{
"identifier": "API",
"path": "bestdori/utils/utils.py",
"snippet": "API = {\n 'user': {\n 'info': 'user',\n 'login': 'user/login',\n 'me': 'user/me'\n },\n 'post': {\n 'basic': 'post/basic',\n 'details': 'post/details',\n 'list': 'post/list',\n 'tag': 'post/tag',\n 'post': 'post',\n 'find': 'post/find',\n 'like': 'post/like'\n },\n 'charts': {\n 'info': 'charts/{id}/{diff}.json'\n },\n 'characters': {\n 'info': 'characters/{id}.json',\n 'all': 'characters/all.{index}.json'\n },\n 'cards': {\n 'info': 'cards/{id}.json',\n 'all': 'cards/all.{index}.json'\n },\n 'costumes': {\n 'info': 'costumes/{id}.json',\n 'all': 'costumes/all.{index}.json'\n },\n 'events': {\n 'info': 'events/{id}.json',\n 'all': 'events/all.{index}.json',\n 'top': 'eventtop/data'\n },\n 'gacha': {\n 'info': 'gacha/{id}.json',\n 'all': 'gacha/all.{index}.json'\n },\n 'songs': {\n 'info': 'songs/{id}.json',\n 'all': 'songs/all.{index}.json'\n },\n 'loginCampaigns': {\n 'info': 'loginCampaigns/{id}.json',\n 'all': 'loginCampaigns/all.{index}.json'\n },\n 'bands': {\n 'all': 'bands/all.{index}.json',\n 'main': 'bands/main.{index}.json'\n },\n 'upload': {\n 'file': 'upload/file/{hash}',\n 'prepare': 'upload/prepare',\n 'upload': 'upload',\n 'status': 'upload/status/{hash}'\n },\n 'misc': {\n 'llsif': 'misc/llsif.{index}.json'\n },\n 'all': {\n 'skills': 'skills/all.{index}.json',\n 'stamps': 'stamps/all.{index}.json',\n 'degrees': 'degrees/all.{index}.json',\n 'meta': 'songs/meta/all.{index}.json',\n 'archives': 'archives/all.{index}.json',\n 'miracleTicketExchanges': 'miracleTicketExchanges/all.{index}.json',\n 'comics': 'comics/all.{index}.json',\n }\n}"
},
{
"identifier": "Assets",
"path": "bestdori/utils/network.py",
"snippet": "class Assets:\n '''获取 Bestdori 资源数据\n\n 参数:\n url (str): 请求的资源地址\n \n server (Literal['jp', 'en', 'tw', 'cn', 'kr']): 资源所在服务器\n \n proxy (Optional[str]): 代理服务器'''\n url: str\n '''请求的资源地址'''\n server: Literal['jp', 'en', 'tw', 'cn', 'kr', 'llsif']\n '''资源所在服务器'''\n proxy: Optional[str]=None\n '''代理服务器'''\n # 初始化\n def __init__(\n self,\n url: str,\n server: Literal['jp', 'en', 'tw', 'cn', 'kr', 'llsif'],\n proxy: Optional[str]=None\n ) -> None:\n '''获取 Bestdori 资源数据\n\n 参数:\n url (str): 请求的资源地址\n \n server (Literal['jp', 'en', 'tw', 'cn', 'kr', 'llsif']): 资源所在服务器\n \n proxy (Optional[str]): 代理服务器\n '''\n self.url = url\n self.server = server\n self.proxy = proxy\n return\n \n # 获取资源连接\n def get_url(self) -> str:\n '''获取资源连接\n\n 返回:\n str: 获取的资源连接 `str`\n '''\n # 如果服务器为 llsif 则转接方法\n if self.server == 'llsif':\n return self._get_niconi_url()\n \n # 处理接收到的 URL\n if self.url.startswith('http://') or self.url.startswith('https://'):\n self.url = self.url\n else:\n self.url = f'https://bestdori.com/assets/{self.server}/' + self.url\n return self.url\n \n # 从 card.niconi.co.ni 获取资源连接\n def _get_niconi_url(self) -> str:\n '''从 card.niconi.co.ni 获取资源连接\n\n 返回:\n str: 获取的资源连接 `str`\n '''\n # 处理接收到的 URL\n if self.url.startswith('http://') or self.url.startswith('https://'):\n self.url = self.url\n else:\n self.url = f'https://card.niconi.co.ni/asset/' + self.url\n return self.url\n \n # 获取资源\n def get(self) -> bytes:\n '''获取资源\n\n 返回:\n bytes: 获取的资源字节数据 `bytes`\n '''\n # 如果服务器为 llsif 则转接方法\n if self.server == 'llsif':\n return self._get_from_niconi()\n \n # 处理接收到的 URL\n if self.url.startswith('http://') or self.url.startswith('https://'):\n self.url = self.url\n else:\n self.url = f'https://bestdori.com/assets/{self.server}/' + self.url\n # 构建一个请求体\n request = Request('get', self.url)\n # 构建代理服务器字典\n if self.proxy is not None:\n proxies = {'http://': self.proxy, 'https://': self.proxy}\n else:\n proxies = None\n \n # 发送请求并获取响应\n with Client(proxies=cast(dict, proxies)) as client:\n response = client.send(request)\n client.close()\n \n response.raise_for_status()\n # 检测响应资源是否存在\n content_type = response.headers.get('content-type', None)\n if content_type is None or content_type == 'text/html':\n raise AssetsNotExistError(self.url)\n return response.content\n \n # 从 card.niconi.co.ni 获取资源\n def _get_from_niconi(self) -> bytes:\n '''从 card.niconi.co.ni 获取资源\n\n 返回:\n bytes: 获取的资源字节数据 `bytes`\n '''\n # 处理接收到的 URL\n if self.url.startswith('http://') or self.url.startswith('https://'):\n self.url = self.url\n else:\n self.url = f'https://card.niconi.co.ni/asset/' + self.url\n # 构建一个请求体\n request = Request('get', self.url)\n # 构建代理服务器字典\n if self.proxy is not None:\n proxies = {'http://': self.proxy, 'https://': self.proxy}\n else:\n proxies = None\n \n # 发送请求并获取响应\n with Client(proxies=cast(dict, proxies)) as client:\n response = client.send(request)\n client.close()\n \n response.raise_for_status()\n # 检测响应资源是否存在\n content_type = response.headers.get('content-type', None)\n if content_type is None or content_type == 'text/html':\n raise AssetsNotExistError(self.url)\n return response.content"
},
{
"identifier": "Api",
"path": "bestdori/utils/network.py",
"snippet": "class Api:\n '''向 Bestdori 发送 API 请求类\n\n 参数:\n api (str): 请求的 API 地址\n \n proxy (Optional[str]): 代理服务器'''\n api: str\n '''请求的 API 地址'''\n proxy: Optional[str]=None\n '''代理服务器'''\n headers: dict[str, str]\n '''请求头'''\n # 初始化\n def __init__(\n self,\n api: str,\n proxy: Optional[str]=None\n ) -> None:\n '''初始化'''\n self.api = api\n self.proxy = proxy\n self.headers = {'Content-Type': 'application/json;charset=UTF-8'}\n return\n \n # 请求发送\n def request(\n self,\n method: Literal['get', 'post'],\n *,\n cookies: Optional[Cookies]=None,\n params: Optional[dict[str, Any]]=None,\n data: Optional[dict[str, Any]]=None,\n files: Optional[dict[str, tuple[str, BufferedReader]]]=None\n ) -> Response:\n '''请求发送\n\n 参数:\n method (Literal['get', 'post']): API 调用方法\n \n cookies (Optional[Cookies], optional): Cookies\n \n params (Optional[dict[str, Any]], optional): 调用参数\n \n data (Optional[dict[str, Any]], optional): 调用参数,将以 `json` 字符串形式发送\n \n files (Optional[dict[str, tuple[str, BufferedReader]]], optional): 发送文件参数\n\n 返回:\n Response: 收到的响应\n '''\n # 处理接收到的 API\n if self.api.startswith('http://') or self.api.startswith('https://'):\n self.api = self.api\n else:\n self.api = 'https://bestdori.com/api/' + self.api\n # 构建一个请求体\n request = Request(\n method,\n self.api,\n cookies=cookies,\n params=params,\n data=cast(dict, dumps(data)) if data is not None else data,\n files=files,\n headers=self.headers if not self.api.endswith('/upload') else None\n )\n # 构建代理服务器字典\n if self.proxy is not None:\n proxies = {'http://': self.proxy, 'https://': self.proxy}\n else:\n proxies = None\n \n # 发送请求并获取响应\n with Client(proxies=cast(dict, proxies)) as client:\n response = client.send(request)\n client.close()\n \n # 处理接收到的响应\n response.raise_for_status()\n # 判断接收到的响应是否为 json 格式\n if 'application/json' not in (content_type := response.headers.get('content-type', None)):\n if content_type is not None:\n return response\n else:\n raise Exception('接收到的响应没有 content-type。')\n \n if isinstance((response_data := response.json()), dict):\n if (result := response_data.get('result', None)) is not None:\n if result is False:\n if (code := response_data.get('code', None)) is not None:\n if code in REQUEST_EXCEPTION.keys(): # 若错误码已被记录\n exception_class = REQUEST_EXCEPTION[code]\n if params is not None:\n raise exception_class(self.api, **params)\n elif data is not None:\n raise exception_class(self.api, **data)\n else:\n raise exception_class(self.api)\n else:\n raise RequestException(self.api, code)\n else:\n raise RequestException(self.api)\n return response"
},
{
"identifier": "DiffNotExistError",
"path": "bestdori/exceptions.py",
"snippet": "class DiffNotExistError(BaseException):\n '''歌曲难度不存在'''\n # 初始化\n def __init__(self, diff: str) -> None:\n msg = f'歌曲不存在难度 {diff}。'\n super().__init__(msg)\n self.message = msg\n '''错误信息'''\n return"
},
{
"identifier": "SongNotExistError",
"path": "bestdori/exceptions.py",
"snippet": "class SongNotExistError(BaseException):\n '''歌曲不存在'''\n # 初始化\n def __init__(self, id_: int) -> None:\n msg = f'歌曲 ID {id_} 不存在。'\n super().__init__(msg)\n self.message = msg\n '''错误信息'''\n return"
}
] | from typing import Optional, Literal, Any
from requests.exceptions import HTTPError
from .charts import Chart
from .post import get_list
from .utils.utils import ASSETS, API
from .utils.network import Assets, Api
from .exceptions import (
DiffNotExistError,
SongNotExistError
) | 8,202 | proxy: Optional[str]=None) -> None:
'''歌曲封面类'''
self._index: int = index
'''数据包序列号'''
self._jacket_image: str = jacket_image
'''封面文件名'''
self._server: Literal['jp', 'en', 'tw', 'cn', 'kr'] = server
'''封面所在服务器'''
self._proxy: Optional[str] = proxy
'''代理服务器'''
return
# 获取封面 url
@property
def url(self) -> str:
'''获取封面 url'''
return Assets(
ASSETS['songs']['musicjacket'].format(
index=self._index, jacket_image=self._jacket_image
), self._server, self._proxy
).get_url()
# 获取封面字节数据
@property
def bytes(self) -> bytes:
'''获取封面字节数据'''
return Assets(
ASSETS['songs']['musicjacket'].format(
index=self._index, jacket_image=self._jacket_image
), self._server, self._proxy
).get()
# 歌曲类
class Song:
'''歌曲类
参数:
id_ (int): 歌曲 ID
proxy (Optional[str], optional): 代理服务器
'''
# 初始化
def __init__(self, id_: int, proxy: Optional[str]=None) -> None:
'''歌曲类
参数:
id_ (int): 歌曲 ID
proxy (Optional[str], optional): 代理服务器
'''
self.id: int = id_
'''歌曲 ID'''
self._info: dict[str, Any] = {}
'''歌曲信息'''
self.proxy: Optional[str] = proxy
'''代理服务器'''
# 检测 ID 是否存在
all_id = get_all(0, proxy=proxy)
if not str(id_) in all_id.keys():
raise SongNotExistError(id_)
return
# 获取歌曲信息
def get_info(self) -> dict[str, Any]:
'''获取歌曲信息
返回:
dict[str, Any]: 歌曲详细信息
'''
if len(self._info) <= 0:
# 如果没有歌曲信息存储
response = Api(
API['songs']['info'].format(id=self.id), proxy=self.proxy
).request('get')
self._info = dict(response.json())
return self._info
# 获取歌曲所在服务器
@property
def server(self) -> Literal['jp', 'en', 'tw', 'cn', 'kr']:
'''获取歌曲所在服务器
返回:
Literal['jp', 'en', 'tw', 'cn', 'kr']: 歌曲所在服务器
'''
info = self.get_info()
# 获取 publishedAt 数据
if (published_at := info.get('publishedAt', None)) is None:
raise Exception('无法获取歌曲发布时间。')
# 根据 publishedAt 数据判断服务器
if published_at[0] is not None: return 'jp'
elif published_at[1] is not None: return 'en'
elif published_at[2] is not None: return 'tw'
elif published_at[3] is not None: return 'cn'
elif published_at[4] is not None: return 'kr'
else:
raise Exception('无法获取歌曲服务器。')
# 获取歌曲名称
@property
def name(self) -> str:
'''获取歌曲名称
返回:
str: 歌曲名称
'''
info = self.get_info()
# 获取 musicTitle 数据
if (music_title := info.get('musicTitle', None)) is None:
raise Exception('无法获取歌曲名称。')
# 获取第一个非 None 歌曲名称
try:
return next(filter(lambda x: x is not None, music_title))
except StopIteration:
raise Exception('无法获取歌曲名称。')
# 获取歌曲谱面
def get_chart(
self,
diff: Literal['easy', 'normal', 'hard', 'expert', 'special']='expert'
| '''`bestdori.songs`
BanG Dream! 歌曲相关操作'''
# 获取总歌曲信息
def get_all(index: Literal[0, 5]=5, proxy: Optional[str]=None) -> dict[str, dict[str, Any]]:
'''获取总歌曲信息
参数:
index (Literal[0, 5], optional): 指定获取哪种 `all.json`
`0`: 仅获取所有已有歌曲 ID `all.0.json`
`5`: 获取所有已有歌曲的简洁信息 `all.5.json`,默认为该项
proxy (Optional[str], optional): 代理服务器
返回:
dict[str, dict[str, Any]]: 获取到的总歌曲信息
'''
return Api(API['songs']['all'].format(index=index), proxy=proxy).request('get').json()
# 歌曲封面内部类
class Jacket:
'''歌曲封面类
参数:
url (str): 封面链接
bytes (bytes): 封面字节数据
'''
# 初始化
def __init__(
self,
index: int,
jacket_image: str,
server: Literal['jp', 'en', 'tw', 'cn', 'kr'],
proxy: Optional[str]=None) -> None:
'''歌曲封面类'''
self._index: int = index
'''数据包序列号'''
self._jacket_image: str = jacket_image
'''封面文件名'''
self._server: Literal['jp', 'en', 'tw', 'cn', 'kr'] = server
'''封面所在服务器'''
self._proxy: Optional[str] = proxy
'''代理服务器'''
return
# 获取封面 url
@property
def url(self) -> str:
'''获取封面 url'''
return Assets(
ASSETS['songs']['musicjacket'].format(
index=self._index, jacket_image=self._jacket_image
), self._server, self._proxy
).get_url()
# 获取封面字节数据
@property
def bytes(self) -> bytes:
'''获取封面字节数据'''
return Assets(
ASSETS['songs']['musicjacket'].format(
index=self._index, jacket_image=self._jacket_image
), self._server, self._proxy
).get()
# 歌曲类
class Song:
'''歌曲类
参数:
id_ (int): 歌曲 ID
proxy (Optional[str], optional): 代理服务器
'''
# 初始化
def __init__(self, id_: int, proxy: Optional[str]=None) -> None:
'''歌曲类
参数:
id_ (int): 歌曲 ID
proxy (Optional[str], optional): 代理服务器
'''
self.id: int = id_
'''歌曲 ID'''
self._info: dict[str, Any] = {}
'''歌曲信息'''
self.proxy: Optional[str] = proxy
'''代理服务器'''
# 检测 ID 是否存在
all_id = get_all(0, proxy=proxy)
if not str(id_) in all_id.keys():
raise SongNotExistError(id_)
return
# 获取歌曲信息
def get_info(self) -> dict[str, Any]:
'''获取歌曲信息
返回:
dict[str, Any]: 歌曲详细信息
'''
if len(self._info) <= 0:
# 如果没有歌曲信息存储
response = Api(
API['songs']['info'].format(id=self.id), proxy=self.proxy
).request('get')
self._info = dict(response.json())
return self._info
# 获取歌曲所在服务器
@property
def server(self) -> Literal['jp', 'en', 'tw', 'cn', 'kr']:
'''获取歌曲所在服务器
返回:
Literal['jp', 'en', 'tw', 'cn', 'kr']: 歌曲所在服务器
'''
info = self.get_info()
# 获取 publishedAt 数据
if (published_at := info.get('publishedAt', None)) is None:
raise Exception('无法获取歌曲发布时间。')
# 根据 publishedAt 数据判断服务器
if published_at[0] is not None: return 'jp'
elif published_at[1] is not None: return 'en'
elif published_at[2] is not None: return 'tw'
elif published_at[3] is not None: return 'cn'
elif published_at[4] is not None: return 'kr'
else:
raise Exception('无法获取歌曲服务器。')
# 获取歌曲名称
@property
def name(self) -> str:
'''获取歌曲名称
返回:
str: 歌曲名称
'''
info = self.get_info()
# 获取 musicTitle 数据
if (music_title := info.get('musicTitle', None)) is None:
raise Exception('无法获取歌曲名称。')
# 获取第一个非 None 歌曲名称
try:
return next(filter(lambda x: x is not None, music_title))
except StopIteration:
raise Exception('无法获取歌曲名称。')
# 获取歌曲谱面
def get_chart(
self,
diff: Literal['easy', 'normal', 'hard', 'expert', 'special']='expert' | ) -> Chart: | 0 | 2023-11-16 13:09:20+00:00 | 12k |
kampta/asic | commons/logger.py | [
{
"identifier": "images2grid",
"path": "commons/utils.py",
"snippet": "def images2grid(images, **grid_kwargs):\n # images should be (N, C, H, W)\n grid = make_grid(images, **grid_kwargs)\n out = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n return out"
},
{
"identifier": "map_minmax",
"path": "commons/utils.py",
"snippet": "def map_minmax(x, in_min, in_max, out_min, out_max):\n return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min"
},
{
"identifier": "compute_pck",
"path": "commons/utils.py",
"snippet": "def compute_pck(pred, target, vis=None, thresholds=None, img_size=256,\n alphas=None):\n if type(target) == list:\n target = torch.cat(target, dim=0).float().cpu()\n else:\n target = target.float().cpu()\n if type(pred) == list:\n pred = torch.cat(pred, dim=0).float().cpu()\n else:\n pred = pred.float().cpu()\n if vis is not None and type(vis) == list:\n vis = torch.cat(vis, dim=0).bool().cpu()\n elif vis is not None:\n vis = vis.bool().cpu()\n else:\n vis = torch.ones(target.size(0)).bool()\n target = target[vis]\n pred = pred[vis]\n\n if alphas is None:\n alphas = torch.arange(0.1, 0.009, -0.01)\n else:\n alphas = torch.tensor(alphas)\n correct = torch.zeros(len(alphas))\n\n err = (pred- target).norm(dim=-1)\n err = err.unsqueeze(0).repeat(len(alphas), 1)\n\n if thresholds is None:\n thresholds = alphas.unsqueeze(-1).repeat(1, err.size(1)) * img_size\n else:\n # Each keypoint within an image pair get same threshold\n # First get threshold (bbox) for all the visible keypoints\n if type(thresholds) == list:\n thresholds = torch.cat(thresholds, dim=0).float().cpu()\n thresholds = thresholds.unsqueeze(-1).repeat(1, vis.size(1))\n thresholds = thresholds[vis]\n # Next compute alpha x threshold for all the keypoints\n thresholds = thresholds.unsqueeze(0).repeat(len(alphas), 1)\n thresholds = thresholds * alphas.unsqueeze(-1)\n\n correct = err < thresholds\n correct = correct.sum(dim=-1) / len(target)\n\n print(\"PCK-Transfer: \", ','.join([f'{pck * 100:.2f}' for pck in correct]))\n return correct"
},
{
"identifier": "sample_tuples",
"path": "commons/utils.py",
"snippet": "def sample_tuples(N, k=1, count=None, seed=None):\n\n if seed is not None:\n np.random.seed(seed)\n\n if count is None: # return all possible (k+1) permutations\n # (N!/(N-k)!) x k array\n samples = np.array(list(permutations(range(N), k+1)))\n\n elif k == 1:\n p1 = np.random.choice(N, count)\n p2 = np.random.choice(N, count)\n return np.stack([p1, p2], axis=1)\n\n elif count == -1:\n samples = np.array(list(permutations(range(N), k)))\n samples = np.concatenate([samples, samples[:, 0].reshape(-1, 1)], axis=1)\n\n else: # sample count number of permutations\n # count x k array\n samples = np.zeros((count, k+1), dtype=int)\n for i in range(count):\n samples[i, :k] = np.random.choice(N, k, replace=False)\n # Force the last column to be same as the first column\n samples[:, k] = samples[:, 0]\n\n return samples"
},
{
"identifier": "pck_loop",
"path": "commons/utils.py",
"snippet": "def pck_loop(tuples, kps_all, transfer_fn, *args, ignore_interim=False, **kwargs):\n chain_length = tuples.shape[1] - 1\n gt_kps_all = []\n pred_kps_all = []\n vis_all = []\n for ch in range(chain_length):\n src_idx = tuples[:, ch]\n trg_idx = tuples[:, ch+1]\n\n if ch == 0:\n src_kps = kps_all[src_idx]\n else:\n src_kps = pred_kps\n\n pred_kps = transfer_fn(src_kps[..., :2], src_idx, trg_idx,\n *args, **kwargs)\n\n gt_kps_all.append(kps_all[trg_idx][..., :2])\n pred_kps_all.append(pred_kps)\n \n if ch == 0:\n vis = kps_all[src_idx][..., 2] * kps_all[trg_idx][..., 2] > 0\n else:\n vis = vis * kps_all[trg_idx][..., 2] > 0\n vis_all.append(vis)\n\n if ignore_interim:\n return gt_kps_all[-1], pred_kps_all[-1], vis_all[-1]\n else:\n vis_all = torch.cat(vis_all)\n gt_kps_all = torch.cat(gt_kps_all)\n pred_kps_all = torch.cat(pred_kps_all)\n return gt_kps_all, pred_kps_all, vis_all"
},
{
"identifier": "splat_points",
"path": "commons/draw.py",
"snippet": "@torch.inference_mode()\ndef splat_points(images, points, sigma, opacity, colorscale='turbo',\n colors=None, alpha_channel=None, blend_alg='alpha'):\n \"\"\"\n Highly efficient GPU-based splatting algorithm. This function is a wrapper\n for Splat2D to overlay points on images. For highest performance, use the\n colors argument directly instead of colorscale.\n images: (N, C, H, W) tensor in [-1, +1]\n points: (N, P, 2) tensor with values in [0, resolution - 1]\n (can be sub-pixel/non-integer coordinates)\n Can also be (N, K, P, 2) tensor, in which case points[:, i]\n gets a unique colorscale\n Expects points in (x, y) order.\n sigma: either float or (N,) tensor with values > 0\n controls the size of the splatted points\n opacity: float in [0, 1], controls the opacity of the splatted points\n colorscale: [Optional] str (or length-K list of str if points is size\n (N, K, P, 2)) indicating the Plotly colorscale to visualize\n points with\n colors: [Optional] (N, P, 3) tensor (or (N, K*P, 3)). If specified,\n colorscale will be ignored. Computing the colorscale\n often takes several orders of magnitude longer than the GPU-based\n splatting, so pre-computing the colors and passing them here\n instead of using the colorscale argument can provide a significant\n speed-up.\n alpha_channel: [Optional] (N, P, 1) tensor (or (N, K*P, 1)). If specified,\n colors will be blended into the output image based on the\n opacity values in alpha_channel (between 0 and 1).\n blend_alg: [Optiona] str. Specifies the blending algorithm to use when\n merging points into images. Can use alpha compositing ('alpha'),\n Laplacian Pyramid Blending ('laplacian') or a more conservative\n version of Laplacian Blending ('laplacian_light')\n :return (N, C, H, W) tensor in [-1, +1] with points splatted onto images\n \"\"\"\n assert images.dim() == 4 # (N, C, H, W)\n assert points.dim() == 3 or points.dim() == 4 # (N, P, 2) or (N, K, P, 2)\n batch_size = images.size(0)\n # each index in the second dimension gets a unique colorscale\n if points.dim() == 4:\n num_points = points.size(2)\n points = points.reshape(\n points.size(0), points.size(1) * points.size(2), 2) # (N, K*P, 2)\n if colors is None:\n if isinstance(colorscale, str):\n colorscale = [colorscale]\n assert len(colorscale) == points.size(1)\n # (1, K*P, 3)\n colors = torch.cat([\n get_plotly_colors(num_points, c) for c in colorscale], 1)\n colors = colors.repeat(batch_size, 1, 1) # (N, K*P, 3)\n elif colors is None:\n num_points = points.size(1)\n # All batch elements use the same colorscale\n if isinstance(colorscale, str):\n # (N, P, 3)\n colors = get_plotly_colors(\n points.size(1), colorscale).repeat(batch_size, 1, 1)\n else: # Each batch element uses its own colorscale\n assert len(colorscale) == batch_size\n colors = torch.cat([get_plotly_colors(num_points, c)\n for c in colorscale], 0)\n if alpha_channel is None:\n alpha_channel = torch.ones(\n batch_size, points.size(1), 1, device='cuda')\n if isinstance(sigma, (float, int)):\n sigma = torch.tensor(\n sigma, device='cuda', dtype=torch.float).view(1).repeat(batch_size)\n blank_img = torch.zeros(batch_size, images.size(1), images.size(2),\n images.size(3), device='cuda')\n blank_mask = torch.zeros(batch_size, 1, images.size(2), images.size(3),\n device='cuda')\n # (N, C, H, W)\n prop_obj_img = splat2d(blank_img, points, colors, sigma, False)\n # (N, 1, H, W)\n prop_mask_img = splat2d(blank_mask, points, alpha_channel, sigma, True)\n prop_mask_img *= opacity\n if blend_alg == 'alpha':\n # basic alpha-composite\n out = prop_mask_img * prop_obj_img + (1 - prop_mask_img) * images\n elif blend_alg == 'laplacian':\n blender = LaplacianBlender().to(images.device)\n out = blender(images, prop_obj_img, prop_mask_img)\n elif blend_alg == 'laplacian_light':\n blender = LaplacianBlender(levels=3, gaussian_kernel_size=11,\n gaussian_sigma=0.5).to(images.device)\n out = blender(images, prop_obj_img, prop_mask_img)\n return out"
},
{
"identifier": "load_fg_points",
"path": "commons/draw.py",
"snippet": "def load_fg_points(img_mask, resolution=None, normalize=False, device='cuda'):\n # returns points in XY format\n if resolution is None:\n resolution = img_mask.size(-1)\n us = vs = torch.arange(resolution)\n us, vs = torch.meshgrid(us, vs, indexing='xy')\n points = torch.stack([us.reshape(-1), vs.reshape(-1)]).permute(1, 0)\n points = points.unsqueeze(0).expand(img_mask.size(0), -1, -1)\n points = points.to(device)\n\n img_mask = img_mask.float()\n if len(img_mask.shape) == 3:\n img_mask = img_mask.unsqueeze(1)\n scale_factor = resolution / img_mask.size(2)\n if resolution != img_mask.size(2): # resize the mask:\n img_mask = F.interpolate(img_mask, scale_factor=scale_factor,\n mode='bilinear')\n\n img_mask = img_mask.squeeze(1)\n points_alpha = img_mask.reshape(img_mask.size(0), -1)\n points = points / (resolution-1)\n if not normalize:\n points *= (img_mask.size(2)/scale_factor-1)\n\n colors = color_wheel_fast_smooth(resolution).to(device)\n colors = colors.reshape(1, -1, 3).expand(img_mask.size(0), -1, -1)\n\n return points, points_alpha, colors"
},
{
"identifier": "concat_v",
"path": "commons/draw.py",
"snippet": "def concat_v(*argv, pad=0):\n width = 0\n height = 0\n count = len(argv)\n\n for img in argv:\n height += img.height\n width = max(width, img.width)\n\n dst = Image.new('RGB', (width, height + (count-1)*pad))\n start = 0\n for i, img in enumerate(argv):\n dst.paste(img, (0, start))\n start += img.height + pad\n return dst"
},
{
"identifier": "get_colors",
"path": "commons/draw.py",
"snippet": "def get_colors(N):\n # colors = torch.tensor(sns.color_palette(n_colors=N))\n if N > 15:\n cmap = plt.get_cmap('tab10')\n else:\n cmap = ListedColormap([\n \"red\", \"yellow\", \"blue\", \"lime\", \"magenta\", \"indigo\", \"orange\",\n \"cyan\", \"darkgreen\", \"maroon\", \"black\", \"white\", \"chocolate\",\n \"gray\", \"blueviolet\"])\n colors = np.array([cmap(x)[:3] for x in range(N)])\n\n return colors"
},
{
"identifier": "get_dense_colors",
"path": "commons/draw.py",
"snippet": "def get_dense_colors(points, resolution=256):\n colors = color_wheel_fast_smooth(resolution)\n if len(points.shape) == 2:\n return colors[points[:, 0], points[:, 1]]\n else:\n device = points.device\n N = len(points)\n colors = colors.permute(2, 0, 1).unsqueeze(0).expand(N, -1, -1, -1)\n points = map_minmax(points, 0, resolution-1, -1, 1).unsqueeze(-2)\n colors = F.grid_sample(colors.to(device), points, align_corners=False)\n return colors.squeeze(-1).permute(0, 2, 1)"
},
{
"identifier": "load_text_points",
"path": "commons/draw.py",
"snippet": "def load_text_points(text, pos=None, size=20, rot=0, img_size=256, colorscale='turbo'):\n # Measure the text area\n # font = ImageFont.truetype (r'Roboto-Bold.ttf', size)\n font = ImageFont.load_default()\n wi, hi = font.getbbox(text)[2:]\n\n # Create a dummy source image\n into = Image.new('1', (img_size, img_size), 0)\n # Copy the relevant area from the source image\n if pos is None:\n pos = (img_size // 2 - wi // 2, img_size // 2 - hi // 2)\n img = into.crop((pos[0], pos[1], pos[0] + wi, pos[1] + hi))\n\n # Print into the rotated area\n d = ImageDraw.Draw(img)\n d.text((0, 0), text, font=font, fill = (1))\n\n # Rotate it forward again\n img = img.rotate(rot, expand=1)\n\n # Insert it back into the source image\n into.paste(img, pos)\n text_points = np.where(np.array(into)>0)\n text_points = np.stack(text_points).transpose(1, 0)[:, [1, 0]]\n text_points = torch.from_numpy(np.ascontiguousarray(text_points)).float()\n text_colors = get_plotly_colors(len(text_points), colorscale).squeeze()\n return text_points, text_colors"
},
{
"identifier": "color_wheel_fast_smooth",
"path": "thirdparty/colormap/colormap_flow.py",
"snippet": "def color_wheel_fast_smooth(resolution=512, subdivision=16):\n lim = sqrt(2)\n colorwheel = expand_color_wheel(subdivision)\n N = colorwheel.shape[0]\n xs = torch.linspace(-1, 1, steps=resolution)\n ys = torch.linspace(-1, 1, steps=resolution)\n x, y = torch.meshgrid(xs, ys, indexing='xy')\n r = torch.sqrt(x*x + y*y) # (0, sqrt(2)]\n # https://math.stackexchange.com/questions/1327253/how-do-we-find-out-angle-from-x-y-coordinates\n theta = 2 * torch.arctan(-y / (-x+r)) + PI # [0, 2*PI]\n\n # Already got interpolated theta\n # Interpolate theta\n theta_ind = theta / (2*PI) * (N-1) # [0, N-1]\n theta_ind = torch.round(theta_ind).long()\n color = colorwheel[theta_ind]\n\n # Interpolate radius\n r = (r / lim).unsqueeze(-1)\n color = color * r + torch.ones(resolution, resolution, 3) * (1-r)\n # color = (color.numpy() * 255).astype(np.uint8)\n return color # HWC"
}
] | from torch.utils.tensorboard.writer import SummaryWriter
from PIL import Image
from commons.utils import images2grid, map_minmax, compute_pck, sample_tuples, \
pck_loop
from commons.draw import splat_points, load_fg_points, \
concat_v, get_colors, get_dense_colors, load_text_points
from thirdparty.colormap.colormap_flow import color_wheel_fast_smooth
import torch
import torch.nn.functional as F
import wandb
import numpy as np | 7,606 | flow, _ = stn(all_imgs[src_idx])
colors = F.grid_sample(colors, flow, padding_mode='border',
align_corners=True)
colors = map_minmax(colors, 0, 1, -1, 1)
alpha = 0.5
blend_img = alpha * all_imgs[src_idx] * (1-all_masks[src_idx]) + \
(all_imgs[src_idx] * alpha + colors * (1-alpha)) * all_masks[src_idx]
blend_img = torch.cat([wheel, blend_img, wheel, colors* all_masks[src_idx]])
writer.log_image_grid(blend_img, 'canon_map', train_idx, len(blend_img),
log_mean_img=False, nrow=len(blend_img)//2)
# Log keypoints from Image space to canonical space
if has_gt_kp:
canon_corrs = stn.transfer_forward(all_flows, all_kps[..., :2], res, is_flow=True)
canon_corrs = stn.unnormalize(canon_corrs, res, res)
canon_vis = all_kps[..., -1]
num_kp = canon_vis.size(-1)
N = canon_vis.size(0)
colors = kps_cols.permute(1, 0, 2).expand(-1, N, -1).to(device)
heatmaps = splat_points(
torch.ones(num_kp, 3, res, res, device=device) * -1,
canon_corrs.permute(1, 0, 2), sigma=6., opacity=1.,
colors=colors, alpha_channel=canon_vis.permute(1, 0).unsqueeze(-1))
writer.log_image_grid(heatmaps, 'kp_heatmaps', train_idx,
num_kp, padding=2, pad_value=1.)
# Log parts from Image space to canonical space
# Splat one part at a time to canonical
# TODO: splat all at once
num_parts = dset.num_parts
part_kp_canons = []
part_kp_vis = []
for part in range(num_parts):
part_masks = (parts == part).float().unsqueeze(1)
kp, kp_vis, _ = load_fg_points(part_masks, resolution=vis_denseres)
kp_canon = stn.transfer_forward(all_flows, kp[..., :2], res, is_flow=True)
kp_canon = stn.unnormalize(kp_canon, res, res)
part_kp_canons.append(kp_canon.reshape(-1, 2))
part_kp_vis.append(kp_vis.reshape(-1))
part_kp_canons = torch.stack(part_kp_canons)
part_kp_vis = torch.stack(part_kp_vis)
colors = parts_cols[:-1].unsqueeze(1).expand(-1, part_kp_vis.size(1), -1)
heatmaps = splat_points(
torch.ones(num_parts, 3, res, res, device=device) * -1,
part_kp_canons, sigma=2., opacity=1.,
colors=colors, alpha_channel=part_kp_vis.unsqueeze(-1))
writer.log_image_grid(heatmaps, 'part_heatmaps', train_idx,
num_parts, padding=2, pad_value=1.)
# Compute PCKs
N = all_imgs.size(0)
transfer_fn = stn.transfer_points
pck_pairs = None
if has_gt_kp:
# First compute PCK for all 2-pairs
if has_fixed_pairs:
tuples = dset.fixed_pairs
if dset.thresholds is not None:
thresholds = [torch.from_numpy(dset.thresholds)[tuples[:, 1]]]
else:
thresholds = None
else:
tuples = sample_tuples(N)
thresholds = None
print(f"First computing 2-point PCK for {len(tuples)} pairs")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True)
pck_pairs = compute_pck(pred_corrs, gt_corrs, vis, thresholds,
img_size=res)
# Compute k-cycle PCK
pck_cycles = []
if not has_gt_kp:
kp, kp_vis, kp_col_dense = load_fg_points(all_masks,
resolution=vis_denseres)
ignore_idx = kp_vis.sum(dim=0) == 0
all_kps = torch.cat([kp[:, ~ignore_idx], kp_vis[:, ~ignore_idx].unsqueeze(-1)], dim=2)
ignore_interim = True
else:
ignore_interim = False
for k in [2, 3, 4]:
tuples = sample_tuples(N, k=k, count=200)
if has_fixed_pairs and dset.thresholds is not None:
thresholds = torch.from_numpy(dset.thresholds[tuples[:, 1:]])
thresholds = thresholds.reshape(-1)
else:
thresholds = None
print(f"Next computing {k}-cycle PCK for {len(tuples)} tuples")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True, ignore_interim=ignore_interim)
pck = compute_pck(pred_corrs, gt_corrs, vis, thresholds, img_size=res)
pck_cycles.append(pck)
return pck_pairs, pck_cycles
class Logger(SummaryWriter):
def __init__(self, results_path, log_to_tb=False, log_to_wandb=True):
super().__init__(results_path)
self.results_path = results_path
self.log_to_tb = log_to_tb
self.log_to_wandb = log_to_wandb
def _log_image_grid(self, images, logging_name, prefix, itr, range=(-1, 1),
scale_each=False, nrow=None, **kwargs):
nrow = max(1, int(len(images) ** 0.5+0.5)) if nrow is None else nrow
if type(images[0]) is torch.Tensor:
ndarr = images2grid(images, return_as_PIL=True, nrow=nrow,
normalize=True, value_range=range,
scale_each=scale_each, **kwargs)
grid = Image.fromarray(ndarr)
grid.save(f"{self.results_path}/{logging_name}_{str(itr).zfill(7)}.png")
if self.log_to_wandb:
wandb.log({logging_name: wandb.Image(grid)}, step=itr)
else:
|
@torch.inference_mode()
def log_visuals(canon, stn, dset, train_idx, writer, vis_sample=2,
vis_denseres=32):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
pseudo_kps = dset.pseudo_kps
parts = dset.parts
vis_sample = min(vis_sample, len(dset))
res = dset.img_size
has_gt_kp = dset.kps is not None
has_fixed_pairs = dset.fixed_pairs is not None # SPair
# Run full test dataloader (assuming small dataset)
all_imgs = dset.imgs
all_masks = dset.masks
all_kps = dset.kps
all_flows, _ = stn(all_imgs)
if has_gt_kp:
kps_cols = torch.from_numpy(get_colors(all_kps.size(1))).float()
kps_cols = map_minmax(kps_cols, 0, 1, -1, 1).to(device).unsqueeze(0)
parts_cols = torch.from_numpy(get_colors(dset.num_parts+1)).float()
parts_cols = map_minmax(parts_cols, 0, 1, -1, 1).to(device)
parts_cols[-1] = 0
# Text logging
text_kp, text_kp_col = load_text_points('CVPR')
text_kp = text_kp.to(device).unsqueeze(0)
text_kp_col = text_kp_col.to(device).unsqueeze(0)
pairs = sample_tuples(len(dset), count=vis_sample, seed=0)
src_idx, trg_idx = pairs[:, 0], pairs[:, 1]
# Log only once during the training
if train_idx == 0:
# Log images and the mask
writer.log_image_grid(all_imgs[:vis_sample], 'img', train_idx,
vis_sample, nrow=vis_sample)
writer.log_image_grid(all_imgs[:vis_sample]*all_masks[:vis_sample],
'img_mask', train_idx, vis_sample, nrow=vis_sample)
# Log neural best buddies (sparse)
kp1 = pseudo_kps[src_idx, trg_idx]
kp2 = pseudo_kps[trg_idx, src_idx]
kp_vis = kp1[..., -1] * kp2[..., -1]
kp1, kp2 = kp1[..., :2], kp2[..., :2]
colors = map_minmax(get_dense_colors(kp1), 0, 1, -1, 1)
blend_src = splat_points(
all_imgs[src_idx], kp1, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp2, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
writer.log_image_grid(stacked, 'kp_pseudo_gt', train_idx, 2*vis_sample,
log_mean_img=False, nrow=2)
# Log parts
parts_img = parts_cols[parts[:vis_sample]].permute(0, 3, 1, 2)
writer.log_image_grid(parts_img, 'parts', train_idx, vis_sample,
nrow=vis_sample, log_mean_img=False)
# Log groundtruth kp
if has_gt_kp:
kp1, kp2 = all_kps[src_idx], all_kps[trg_idx]
kp_vis = kp1[..., -1] * kp2[..., -1]
kp1, kp2 = kp1[..., :2], kp2[..., :2]
colors = kps_cols.expand(vis_sample, -1, -1)
blend_src = splat_points(
all_imgs[src_idx], kp1, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp2, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
writer.log_image_grid(stacked, 'kp_gt', train_idx, 2*vis_sample,
log_mean_img=False, nrow=2)
# Log kp and top predictions by STN (if kp are available)
if has_gt_kp:
kp1 = all_kps[src_idx][..., :2]
kp_vis = all_kps[src_idx][..., 2]
kp_pred = stn.transfer_points(
kp1, src_idx, trg_idx, all_flows, mask=all_masks, res=res, is_flow=True)
colors = kps_cols.expand(vis_sample, -1, -1)
blend_src = splat_points(
all_imgs[src_idx], kp1, sigma=3., opacity=1.0,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp_pred.float(), sigma=3., opacity=1.0,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
writer.log_image_grid(stacked, 'kp_pred_sparse', train_idx,
2*vis_sample, log_mean_img=False, nrow=2)
# Log current canon image
canon_grid = canon.get_grid(vis_sample)
if canon_grid.size(1) > 3:
canon_grid = canon_grid[:, :3]
scale_factor = res / canon_grid.size(-1)
canon_grid = F.interpolate(
canon_grid, scale_factor=scale_factor, mode='bilinear')
writer.log_image_grid(canon_grid, 'canon', train_idx, 1, log_mean_img=False)
# Log dense correspondences
kp, kp_vis, kp_col_dense = load_fg_points(all_masks[src_idx],
resolution=vis_denseres)
kp_pred, kp_canon = stn.transfer_points(
kp, src_idx, trg_idx, all_flows, mask=all_masks, res=res,
return_canon=True, is_flow=True)
colors = map_minmax(kp_col_dense, 0, 1, -1, 1)
blend_src = splat_points(
all_imgs[src_idx], kp, sigma=4., opacity=0.75,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp_pred.float(), sigma=4., opacity=0.75,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
blend_canon = splat_points(
torch.ones_like(canon_grid) * -1, kp_canon, sigma=1.3, opacity=1.0,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_canon, blend_trg], dim=1).\
flatten(0, 1)
writer.log_image_grid(
stacked, 'kp_pred_dense', train_idx, 3*vis_sample,
log_mean_img=False, nrow=3)
# # Log dense correspondences with text
# text_kp = text_kp.expand(vis_sample, -1, -1)
# text_kp_col = text_kp_col.expand(vis_sample, -1, -1)
# kp_pred, kp_canon = stn.transfer_points(
# text_kp, src_idx, trg_idx, all_flows, mask=all_masks, res=res,
# return_canon=True, is_flow=True)
# blend_src = splat_points(all_imgs[src_idx], text_kp, sigma=0.7, opacity=1.,
# colors=text_kp_col)
# blend_trg = splat_points(all_imgs[trg_idx], kp_pred.float(), sigma=0.7,
# opacity=1., colors=text_kp_col)
# blend_canon = splat_points(torch.ones_like(canon_grid) * -1, kp_canon,
# sigma=0.7, opacity=1., colors=text_kp_col)
# stacked = torch.stack([blend_src, blend_canon, blend_trg], dim=1).\
# flatten(0, 1)
# writer.log_image_grid(
# stacked, 'kp_pred_text', train_idx, 3*vis_sample,
# log_mean_img=False, nrow=3)
# Log dense mapping from canonical space to Image space
wheel = color_wheel_fast_smooth(res).permute(2, 0, 1).unsqueeze(0).to(device)
colors = wheel.expand(vis_sample, -1, -1, -1)
flow, _ = stn(all_imgs[src_idx])
colors = F.grid_sample(colors, flow, padding_mode='border',
align_corners=True)
colors = map_minmax(colors, 0, 1, -1, 1)
alpha = 0.5
blend_img = alpha * all_imgs[src_idx] * (1-all_masks[src_idx]) + \
(all_imgs[src_idx] * alpha + colors * (1-alpha)) * all_masks[src_idx]
blend_img = torch.cat([wheel, blend_img, wheel, colors* all_masks[src_idx]])
writer.log_image_grid(blend_img, 'canon_map', train_idx, len(blend_img),
log_mean_img=False, nrow=len(blend_img)//2)
# Log keypoints from Image space to canonical space
if has_gt_kp:
canon_corrs = stn.transfer_forward(all_flows, all_kps[..., :2], res, is_flow=True)
canon_corrs = stn.unnormalize(canon_corrs, res, res)
canon_vis = all_kps[..., -1]
num_kp = canon_vis.size(-1)
N = canon_vis.size(0)
colors = kps_cols.permute(1, 0, 2).expand(-1, N, -1).to(device)
heatmaps = splat_points(
torch.ones(num_kp, 3, res, res, device=device) * -1,
canon_corrs.permute(1, 0, 2), sigma=6., opacity=1.,
colors=colors, alpha_channel=canon_vis.permute(1, 0).unsqueeze(-1))
writer.log_image_grid(heatmaps, 'kp_heatmaps', train_idx,
num_kp, padding=2, pad_value=1.)
# Log parts from Image space to canonical space
# Splat one part at a time to canonical
# TODO: splat all at once
num_parts = dset.num_parts
part_kp_canons = []
part_kp_vis = []
for part in range(num_parts):
part_masks = (parts == part).float().unsqueeze(1)
kp, kp_vis, _ = load_fg_points(part_masks, resolution=vis_denseres)
kp_canon = stn.transfer_forward(all_flows, kp[..., :2], res, is_flow=True)
kp_canon = stn.unnormalize(kp_canon, res, res)
part_kp_canons.append(kp_canon.reshape(-1, 2))
part_kp_vis.append(kp_vis.reshape(-1))
part_kp_canons = torch.stack(part_kp_canons)
part_kp_vis = torch.stack(part_kp_vis)
colors = parts_cols[:-1].unsqueeze(1).expand(-1, part_kp_vis.size(1), -1)
heatmaps = splat_points(
torch.ones(num_parts, 3, res, res, device=device) * -1,
part_kp_canons, sigma=2., opacity=1.,
colors=colors, alpha_channel=part_kp_vis.unsqueeze(-1))
writer.log_image_grid(heatmaps, 'part_heatmaps', train_idx,
num_parts, padding=2, pad_value=1.)
# Compute PCKs
N = all_imgs.size(0)
transfer_fn = stn.transfer_points
pck_pairs = None
if has_gt_kp:
# First compute PCK for all 2-pairs
if has_fixed_pairs:
tuples = dset.fixed_pairs
if dset.thresholds is not None:
thresholds = [torch.from_numpy(dset.thresholds)[tuples[:, 1]]]
else:
thresholds = None
else:
tuples = sample_tuples(N)
thresholds = None
print(f"First computing 2-point PCK for {len(tuples)} pairs")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True)
pck_pairs = compute_pck(pred_corrs, gt_corrs, vis, thresholds,
img_size=res)
# Compute k-cycle PCK
pck_cycles = []
if not has_gt_kp:
kp, kp_vis, kp_col_dense = load_fg_points(all_masks,
resolution=vis_denseres)
ignore_idx = kp_vis.sum(dim=0) == 0
all_kps = torch.cat([kp[:, ~ignore_idx], kp_vis[:, ~ignore_idx].unsqueeze(-1)], dim=2)
ignore_interim = True
else:
ignore_interim = False
for k in [2, 3, 4]:
tuples = sample_tuples(N, k=k, count=200)
if has_fixed_pairs and dset.thresholds is not None:
thresholds = torch.from_numpy(dset.thresholds[tuples[:, 1:]])
thresholds = thresholds.reshape(-1)
else:
thresholds = None
print(f"Next computing {k}-cycle PCK for {len(tuples)} tuples")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True, ignore_interim=ignore_interim)
pck = compute_pck(pred_corrs, gt_corrs, vis, thresholds, img_size=res)
pck_cycles.append(pck)
return pck_pairs, pck_cycles
class Logger(SummaryWriter):
def __init__(self, results_path, log_to_tb=False, log_to_wandb=True):
super().__init__(results_path)
self.results_path = results_path
self.log_to_tb = log_to_tb
self.log_to_wandb = log_to_wandb
def _log_image_grid(self, images, logging_name, prefix, itr, range=(-1, 1),
scale_each=False, nrow=None, **kwargs):
nrow = max(1, int(len(images) ** 0.5+0.5)) if nrow is None else nrow
if type(images[0]) is torch.Tensor:
ndarr = images2grid(images, return_as_PIL=True, nrow=nrow,
normalize=True, value_range=range,
scale_each=scale_each, **kwargs)
grid = Image.fromarray(ndarr)
grid.save(f"{self.results_path}/{logging_name}_{str(itr).zfill(7)}.png")
if self.log_to_wandb:
wandb.log({logging_name: wandb.Image(grid)}, step=itr)
else: | grid = concat_v(*images) | 7 | 2023-11-14 16:43:16+00:00 | 12k |
AnonymGiant/ViLaM | lavis/runners/runner_iter.py | [
{
"identifier": "download_cached_file",
"path": "lavis/common/dist_utils.py",
"snippet": "def download_cached_file(url, check_hash=True, progress=False):\n \"\"\"\n Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.\n If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.\n \"\"\"\n\n def get_cached_file_path():\n # a hack to sync the file path across processes\n parts = torch.hub.urlparse(url)\n filename = os.path.basename(parts.path)\n cached_file = os.path.join(timm_hub.get_cache_dir(), filename)\n\n return cached_file\n\n if is_main_process():\n timm_hub.download_cached_file(url, check_hash, progress)\n\n if is_dist_avail_and_initialized():\n dist.barrier()\n\n return get_cached_file_path()"
},
{
"identifier": "is_main_process",
"path": "lavis/common/dist_utils.py",
"snippet": "def is_main_process():\n return get_rank() == 0"
},
{
"identifier": "main_process",
"path": "lavis/common/dist_utils.py",
"snippet": "def main_process(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rank, _ = get_dist_info()\n if rank == 0:\n return func(*args, **kwargs)\n\n return wrapper"
},
{
"identifier": "registry",
"path": "lavis/common/registry.py",
"snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):"
},
{
"identifier": "is_url",
"path": "lavis/common/utils.py",
"snippet": "def is_url(url_or_filename):\n parsed = urlparse(url_or_filename)\n return parsed.scheme in (\"http\", \"https\")"
},
{
"identifier": "concat_datasets",
"path": "lavis/datasets/data_utils.py",
"snippet": "def concat_datasets(datasets):\n \"\"\"\n Concatenates multiple datasets into a single dataset.\n\n It supports may-style datasets and DataPipeline from WebDataset. Currently, does not support\n generic IterableDataset because it requires creating separate samplers.\n\n Now only supports conctenating training datasets and assuming validation and testing\n have only a single dataset. This is because metrics should not be computed on the concatenated\n datasets.\n\n Args:\n datasets: dict of torch.utils.data.Dataset objects by split.\n\n Returns:\n Dict of concatenated datasets by split, \"train\" is the concatenation of multiple datasets,\n \"val\" and \"test\" remain the same.\n\n If the input training datasets contain both map-style and DataPipeline datasets, returns\n a tuple, where the first element is a concatenated map-style dataset and the second\n element is a chained DataPipeline dataset.\n\n \"\"\"\n # concatenate datasets in the same split\n for split_name in datasets:\n if split_name != \"train\":\n assert (\n len(datasets[split_name]) == 1\n ), \"Do not support multiple {} datasets.\".format(split_name)\n datasets[split_name] = datasets[split_name][0]\n else:\n iterable_datasets, map_datasets = [], []\n for dataset in datasets[split_name]:\n if isinstance(dataset, wds.DataPipeline):\n logging.info(\n \"Dataset {} is IterableDataset, can't be concatenated.\".format(\n dataset\n )\n )\n iterable_datasets.append(dataset)\n elif isinstance(dataset, IterableDataset):\n raise NotImplementedError(\n \"Do not support concatenation of generic IterableDataset.\"\n )\n else:\n map_datasets.append(dataset)\n\n # if len(iterable_datasets) > 0:\n # concatenate map-style datasets and iterable-style datasets separately\n chained_datasets = (\n ChainDataset(iterable_datasets) if len(iterable_datasets) > 0 else None\n )\n concat_datasets = (\n ConcatDataset(map_datasets) if len(map_datasets) > 0 else None\n )\n\n train_datasets = concat_datasets, chained_datasets\n train_datasets = tuple([x for x in train_datasets if x is not None])\n train_datasets = (\n train_datasets[0] if len(train_datasets) == 1 else train_datasets\n )\n\n datasets[split_name] = train_datasets\n\n return datasets"
},
{
"identifier": "reorg_datasets_by_split",
"path": "lavis/datasets/data_utils.py",
"snippet": "def reorg_datasets_by_split(datasets):\n \"\"\"\n Organizes datasets by split.\n\n Args:\n datasets: dict of torch.utils.data.Dataset objects by name.\n\n Returns:\n Dict of datasets by split {split_name: List[Datasets]}.\n \"\"\"\n # if len(datasets) == 1:\n # return datasets[list(datasets.keys())[0]]\n # else:\n reorg_datasets = dict()\n\n # reorganize by split\n for _, dataset in datasets.items():\n for split_name, dataset_split in dataset.items():\n if split_name not in reorg_datasets:\n reorg_datasets[split_name] = [dataset_split]\n else:\n reorg_datasets[split_name].append(dataset_split)\n\n return reorg_datasets"
},
{
"identifier": "RunnerBase",
"path": "lavis/runners/runner_base.py",
"snippet": "class RunnerBase:\n \"\"\"\n A runner class to train and evaluate a model given a task and datasets.\n\n The runner uses pytorch distributed data parallel by default. Future release\n will support other distributed frameworks.\n \"\"\"\n\n def __init__(self, cfg, task, model, datasets, job_id):\n self.config = cfg\n self.job_id = job_id\n\n self.task = task\n self.datasets = datasets\n\n self._model = model\n\n self._wrapped_model = None\n self._device = None\n self._optimizer = None\n self._scaler = None\n self._dataloaders = None\n self._lr_sched = None\n\n self.start_epoch = 0\n\n # self.setup_seeds()\n self.setup_output_dir()\n\n @property\n def device(self):\n if self._device is None:\n self._device = torch.device(self.config.run_cfg.device)\n\n return self._device\n\n @property\n def use_distributed(self):\n return self.config.run_cfg.distributed\n\n @property\n def model(self):\n \"\"\"\n A property to get the DDP-wrapped model on the device.\n \"\"\"\n # move model to device\n if self._model.device != self.device:\n self._model = self._model.to(self.device)\n\n # distributed training wrapper\n if self.use_distributed:\n if self._wrapped_model is None:\n self._wrapped_model = DDP(\n self._model, device_ids=[self.config.run_cfg.gpu]\n )\n else:\n self._wrapped_model = self._model\n\n return self._wrapped_model\n\n @property\n def optimizer(self):\n # TODO make optimizer class and configurations\n if self._optimizer is None:\n lr_scale = self.config.run_cfg.get(\"lr_layer_decay\", 1)\n weight_decay = self.config.run_cfg.get(\"weight_decay\", 0.05)\n optim_params = self._model.get_optimizer_params(weight_decay,lr_scale)\n\n num_parameters = 0\n for p_group in optim_params:\n for p in p_group[\"params\"]:\n num_parameters += p.data.nelement() \n logging.info(\"number of trainable parameters: {}\".format(num_parameters)) \n \n beta2 = self.config.run_cfg.get(\"beta2\", 0.999)\n\n self._optimizer = torch.optim.AdamW(\n optim_params,\n lr=float(self.config.run_cfg.init_lr),\n betas=(0.9, beta2),\n ) \n return self._optimizer\n\n @property\n def scaler(self):\n amp = self.config.run_cfg.get(\"amp\", False)\n\n if amp:\n if self._scaler is None:\n self._scaler = torch.cuda.amp.GradScaler()\n\n return self._scaler\n\n @property\n def lr_scheduler(self):\n \"\"\"\n A property to get and create learning rate scheduler by split just in need.\n \"\"\"\n if self._lr_sched is None:\n lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched)\n\n # max_epoch = self.config.run_cfg.max_epoch\n max_epoch = self.max_epoch\n # min_lr = self.config.run_cfg.min_lr\n min_lr = self.min_lr\n # init_lr = self.config.run_cfg.init_lr\n init_lr = self.init_lr\n\n # optional parameters\n decay_rate = self.config.run_cfg.get(\"lr_decay_rate\", None)\n warmup_start_lr = self.config.run_cfg.get(\"warmup_lr\", -1)\n warmup_steps = self.config.run_cfg.get(\"warmup_steps\", 0)\n\n self._lr_sched = lr_sched_cls(\n optimizer=self.optimizer,\n max_epoch=max_epoch,\n min_lr=min_lr,\n init_lr=init_lr,\n decay_rate=decay_rate,\n warmup_start_lr=warmup_start_lr,\n warmup_steps=warmup_steps,\n )\n\n return self._lr_sched\n\n @property\n def dataloaders(self) -> dict:\n \"\"\"\n A property to get and create dataloaders by split just in need.\n\n If no train_dataset_ratio is provided, concatenate map-style datasets and\n chain wds.DataPipe datasets separately. Training set becomes a tuple\n (ConcatDataset, ChainDataset), both are optional but at least one of them is\n required. The resultant ConcatDataset and ChainDataset will be sampled evenly.\n\n If train_dataset_ratio is provided, create a MultiIterLoader to sample\n each dataset by ratios during training.\n\n Currently do not support multiple datasets for validation and test.\n\n Returns:\n dict: {split_name: (tuples of) dataloader}\n \"\"\"\n if self._dataloaders is None:\n # reoganize datasets by split and concatenate/chain if necessary\n dataset_ratios = self.config.run_cfg.get(\"train_dataset_ratios\", None)\n\n # concatenate map-style datasets and chain wds.DataPipe datasets separately\n # training set becomes a tuple (ConcatDataset, ChainDataset), both are\n # optional but at least one of them is required. The resultant ConcatDataset\n # and ChainDataset will be sampled evenly.\n logging.info(\n \"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline).\"\n )\n\n datasets = reorg_datasets_by_split(self.datasets)\n self.datasets = concat_datasets(datasets)\n\n # print dataset statistics after concatenation/chaining\n for split_name in self.datasets:\n if isinstance(self.datasets[split_name], tuple) or isinstance(\n self.datasets[split_name], list\n ):\n # mixed wds.DataPipeline and torch.utils.data.Dataset\n num_records = sum(\n [\n len(d)\n if not type(d) in [wds.DataPipeline, ChainDataset]\n else 0\n for d in self.datasets[split_name]\n ]\n )\n\n else:\n if hasattr(self.datasets[split_name], \"__len__\"):\n # a single map-style dataset\n num_records = len(self.datasets[split_name])\n else:\n # a single wds.DataPipeline\n num_records = -1\n logging.info(\n \"Only a single wds.DataPipeline dataset, no __len__ attribute.\"\n )\n\n if num_records >= 0:\n logging.info(\n \"Loaded {} records for {} split from the dataset.\".format(\n num_records, split_name\n )\n )\n\n # create dataloaders\n split_names = sorted(self.datasets.keys())\n\n datasets = [self.datasets[split] for split in split_names]\n is_trains = [split in self.train_splits for split in split_names]\n\n batch_sizes = [\n self.config.run_cfg.batch_size_train\n if split == \"train\"\n else self.config.run_cfg.batch_size_eval\n for split in split_names\n ]\n\n collate_fns = []\n for dataset in datasets:\n if isinstance(dataset, tuple) or isinstance(dataset, list):\n collate_fns.append([getattr(d, \"collater\", None) for d in dataset])\n else:\n collate_fns.append(getattr(dataset, \"collater\", None))\n\n dataloaders = self.create_loaders(\n datasets=datasets,\n num_workers=self.config.run_cfg.num_workers,\n batch_sizes=batch_sizes,\n is_trains=is_trains,\n collate_fns=collate_fns,\n dataset_ratios=dataset_ratios,\n )\n\n self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)}\n\n return self._dataloaders\n\n @property\n def cuda_enabled(self):\n return self.device.type == \"cuda\"\n\n @property\n def max_epoch(self):\n return int(self.config.run_cfg.max_epoch)\n\n @property\n def log_freq(self):\n log_freq = self.config.run_cfg.get(\"log_freq\", 50)\n return int(log_freq)\n\n @property\n def init_lr(self):\n return float(self.config.run_cfg.init_lr)\n\n @property\n def min_lr(self):\n return float(self.config.run_cfg.min_lr)\n\n @property\n def accum_grad_iters(self):\n return int(self.config.run_cfg.get(\"accum_grad_iters\", 1))\n\n @property\n def valid_splits(self):\n valid_splits = self.config.run_cfg.get(\"valid_splits\", [])\n\n if len(valid_splits) == 0:\n logging.info(\"No validation splits found.\")\n\n return valid_splits\n\n @property\n def test_splits(self):\n test_splits = self.config.run_cfg.get(\"test_splits\", [])\n\n return test_splits\n\n @property\n def train_splits(self):\n train_splits = self.config.run_cfg.get(\"train_splits\", [])\n\n if len(train_splits) == 0:\n logging.info(\"Empty train splits.\")\n\n return train_splits\n\n @property\n def evaluate_only(self):\n \"\"\"\n Set to True to skip training.\n \"\"\"\n return self.config.run_cfg.evaluate\n\n @property\n def use_dist_eval_sampler(self):\n return self.config.run_cfg.get(\"use_dist_eval_sampler\", True)\n\n @property\n def resume_ckpt_path(self):\n return self.config.run_cfg.get(\"resume_ckpt_path\", None)\n\n @property\n def train_loader(self):\n train_dataloader = self.dataloaders[\"train\"]\n\n return train_dataloader\n\n def setup_output_dir(self):\n lib_root = Path(registry.get_path(\"library_root\"))\n\n output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id\n result_dir = output_dir / \"result\"\n\n output_dir.mkdir(parents=True, exist_ok=True)\n result_dir.mkdir(parents=True, exist_ok=True)\n\n registry.register_path(\"result_dir\", str(result_dir))\n registry.register_path(\"output_dir\", str(output_dir))\n\n self.result_dir = result_dir\n self.output_dir = output_dir\n\n def train(self):\n start_time = time.time()\n best_agg_metric = 0\n best_epoch = 0\n\n self.log_config()\n\n # resume from checkpoint if specified\n if not self.evaluate_only and self.resume_ckpt_path is not None:\n self._load_checkpoint(self.resume_ckpt_path)\n\n for cur_epoch in range(self.start_epoch, self.max_epoch):\n # training phase\n if not self.evaluate_only:\n logging.info(\"Start training\")\n train_stats = self.train_epoch(cur_epoch)\n self.log_stats(split_name=\"train\", stats=train_stats)\n \n self._save_checkpoint(cur_epoch, is_best=False)\n\n\n # evaluation phase\n if len(self.valid_splits) > 0:\n for split_name in self.valid_splits:\n logging.info(\"Evaluating on {}.\".format(split_name))\n\n val_log = self.eval_epoch(\n split_name=split_name, cur_epoch=cur_epoch\n )\n # if val_log is not None:\n # if is_main_process():\n # assert (\n # \"agg_metrics\" in val_log\n # ), \"No agg_metrics found in validation log.\"\n\n # agg_metrics = val_log[\"agg_metrics\"]\n # if agg_metrics > best_agg_metric and split_name == \"val\":\n # best_epoch, best_agg_metric = cur_epoch, agg_metrics\n\n # self._save_checkpoint(cur_epoch, is_best=True)\n\n # val_log.update({\"best_epoch\": best_epoch})\n # self.log_stats(val_log, split_name)\n\n else:\n # if no validation split is provided, we just save the checkpoint at the end of each epoch.\n if not self.evaluate_only:\n self._save_checkpoint(cur_epoch, is_best=False)\n\n if self.evaluate_only:\n break\n\n dist.barrier()\n\n # testing phase\n test_epoch = \"best\" if len(self.valid_splits) > 0 else cur_epoch\n self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only)\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n logging.info(\"Training time {}\".format(total_time_str))\n\n def evaluate(self, cur_epoch=\"best\", skip_reload=False):\n test_logs = dict()\n\n if len(self.test_splits) > 0:\n for split_name in self.test_splits:\n test_logs[split_name] = self.eval_epoch(\n split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload\n )\n\n return test_logs\n\n def train_epoch(self, epoch):\n # train\n self.model.train()\n\n return self.task.train_epoch(\n epoch=epoch,\n model=self.model,\n data_loader=self.train_loader,\n optimizer=self.optimizer,\n scaler=self.scaler,\n lr_scheduler=self.lr_scheduler,\n cuda_enabled=self.cuda_enabled,\n log_freq=self.log_freq,\n accum_grad_iters=self.accum_grad_iters,\n )\n\n @torch.no_grad()\n def eval_epoch(self, split_name, cur_epoch, skip_reload=False):\n \"\"\"\n Evaluate the model on a given split.\n\n Args:\n split_name (str): name of the split to evaluate on.\n cur_epoch (int): current epoch.\n skip_reload_best (bool): whether to skip reloading the best checkpoint.\n During training, we will reload the best checkpoint for validation.\n During testing, we will use provided weights and skip reloading the best checkpoint .\n \"\"\"\n data_loader = self.dataloaders.get(split_name, None)\n assert data_loader, \"data_loader for split {} is None.\".format(split_name)\n\n # TODO In validation, you need to compute loss as well as metrics\n # TODO consider moving to model.before_evaluation()\n model = self.unwrap_dist_model(self.model)\n if not skip_reload and cur_epoch == \"best\":\n model = self._reload_best_model(model)\n model.eval()\n\n self.task.before_evaluation(\n model=model,\n dataset=self.datasets[split_name],\n )\n results = self.task.evaluation(model, data_loader)\n\n if results is not None:\n return self.task.after_evaluation(\n val_result=results,\n split_name=split_name,\n epoch=cur_epoch,\n )\n\n def unwrap_dist_model(self, model):\n if self.use_distributed:\n return model.module\n else:\n return model\n\n def create_loaders(\n self,\n datasets,\n num_workers,\n batch_sizes,\n is_trains,\n collate_fns,\n dataset_ratios=None,\n ):\n \"\"\"\n Create dataloaders for training and validation.\n \"\"\"\n\n def _create_loader(dataset, num_workers, bsz, is_train, collate_fn):\n # create a single dataloader for each split\n if isinstance(dataset, ChainDataset) or isinstance(\n dataset, wds.DataPipeline\n ):\n # wds.WebdDataset instance are chained together\n # webdataset.DataPipeline has its own sampler and collate_fn\n loader = iter(\n DataLoader(\n dataset,\n batch_size=bsz,\n num_workers=num_workers,\n pin_memory=True,\n )\n )\n else:\n # map-style dataset are concatenated together\n # setup distributed sampler\n if self.use_distributed:\n sampler = DistributedSampler(\n dataset,\n shuffle=is_train,\n num_replicas=get_world_size(),\n rank=get_rank(),\n )\n if not self.use_dist_eval_sampler:\n # e.g. retrieval evaluation\n sampler = sampler if is_train else None\n else:\n sampler = None\n\n loader = DataLoader(\n dataset,\n batch_size=bsz,\n num_workers=num_workers,\n pin_memory=True,\n sampler=sampler,\n shuffle=sampler is None and is_train,\n collate_fn=collate_fn,\n drop_last=True if is_train else False,\n )\n loader = PrefetchLoader(loader)\n\n if is_train:\n loader = IterLoader(loader, use_distributed=self.use_distributed)\n\n return loader\n\n loaders = []\n\n for dataset, bsz, is_train, collate_fn in zip(\n datasets, batch_sizes, is_trains, collate_fns\n ):\n if isinstance(dataset, list) or isinstance(dataset, tuple):\n loader = MultiIterLoader(\n loaders=[\n _create_loader(d, num_workers, bsz, is_train, collate_fn[i])\n for i, d in enumerate(dataset)\n ],\n ratios=dataset_ratios,\n )\n else:\n loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn)\n\n loaders.append(loader)\n\n return loaders\n\n @main_process\n def _save_checkpoint(self, cur_epoch, is_best=False):\n \"\"\"\n Save the checkpoint at the current epoch.\n \"\"\"\n model_no_ddp = self.unwrap_dist_model(self.model)\n param_grad_dic = {\n k: v.requires_grad for (k, v) in model_no_ddp.named_parameters()\n }\n state_dict = model_no_ddp.state_dict()\n for k in list(state_dict.keys()):\n if k in param_grad_dic.keys() and not param_grad_dic[k]:\n # delete parameters that do not require gradient\n del state_dict[k]\n save_obj = {\n \"model\": state_dict,\n \"optimizer\": self.optimizer.state_dict(),\n \"config\": self.config.to_dict(),\n \"scaler\": self.scaler.state_dict() if self.scaler else None,\n \"epoch\": cur_epoch,\n }\n save_to = os.path.join(\n self.output_dir,\n \"checkpoint_{}.pth\".format(\"best\" if is_best else cur_epoch),\n )\n logging.info(\"Saving checkpoint at epoch {} to {}.\".format(cur_epoch, save_to))\n torch.save(save_obj, save_to)\n\n def _reload_best_model(self, model):\n \"\"\"\n Load the best checkpoint for evaluation.\n \"\"\"\n checkpoint_path = os.path.join(self.output_dir, \"checkpoint_best.pth\")\n\n logging.info(\"Loading checkpoint from {}.\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n try:\n model.load_state_dict(checkpoint[\"model\"])\n except RuntimeError as e:\n logging.warning(\n \"\"\"\n Key mismatch when loading checkpoint. This is expected if only part of the model is saved.\n Trying to load the model with strict=False.\n \"\"\"\n )\n model.load_state_dict(checkpoint[\"model\"], strict=False)\n return model\n\n def _load_checkpoint(self, url_or_filename):\n \"\"\"\n Resume from a checkpoint.\n \"\"\"\n if is_url(url_or_filename):\n cached_file = download_cached_file(\n url_or_filename, check_hash=False, progress=True\n )\n checkpoint = torch.load(cached_file, map_location=self.device)\n elif os.path.isfile(url_or_filename):\n checkpoint = torch.load(url_or_filename, map_location=self.device)\n else:\n raise RuntimeError(\"checkpoint url or path is invalid\")\n\n state_dict = checkpoint[\"model\"]\n self.unwrap_dist_model(self.model).load_state_dict(state_dict)\n\n self.optimizer.load_state_dict(checkpoint[\"optimizer\"])\n if self.scaler and \"scaler\" in checkpoint:\n self.scaler.load_state_dict(checkpoint[\"scaler\"])\n\n self.start_epoch = checkpoint[\"epoch\"] + 1\n logging.info(\"Resume checkpoint from {}\".format(url_or_filename))\n\n @main_process\n def log_stats(self, stats, split_name):\n if isinstance(stats, dict):\n log_stats = {**{f\"{split_name}_{k}\": v for k, v in stats.items()}}\n with open(os.path.join(self.output_dir, \"log.txt\"), \"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n elif isinstance(stats, list):\n pass\n\n @main_process\n def log_config(self):\n with open(os.path.join(self.output_dir, \"log.txt\"), \"a\") as f:\n f.write(json.dumps(self.config.to_dict(), indent=4) + \"\\n\")"
}
] | import datetime
import logging
import os
import time
import torch
import torch.distributed as dist
import webdataset as wds
from lavis.common.dist_utils import download_cached_file, is_main_process, main_process
from lavis.common.registry import registry
from lavis.common.utils import is_url
from lavis.datasets.data_utils import concat_datasets, reorg_datasets_by_split
from lavis.runners.runner_base import RunnerBase
from torch.utils.data.dataset import ChainDataset | 8,067 | assert (
"agg_metrics" in val_log
), "No agg_metrics found in validation log."
agg_metrics = val_log["agg_metrics"]
if agg_metrics > best_agg_metric and split_name == "val":
best_iters, best_agg_metric = end_iters, agg_metrics
self._save_checkpoint(end_iters, is_best=True)
val_log.update({"best_iters": best_iters})
self.log_stats(val_log, split_name)
else:
# if no validation split is provided, we just save the checkpoint at the end of each inner epoch.
if not self.evaluate_only:
self._save_checkpoint(end_iters, is_best=False)
if self.evaluate_only:
break
dist.barrier()
# testing phase
self.evaluate(cur_epoch=self.cur_epoch)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Training time {}".format(total_time_str))
def train_iters(self, epoch, start_iters):
# train by iterations
self.model.train()
return self.task.train_iters(
epoch=epoch,
start_iters=start_iters,
iters_per_inner_epoch=self.iters_per_inner_epoch,
model=self.model,
data_loader=self.train_loader,
optimizer=self.optimizer,
scaler=self.scaler,
lr_scheduler=self.lr_scheduler,
cuda_enabled=self.cuda_enabled,
log_freq=self.log_freq,
accum_grad_iters=self.accum_grad_iters,
)
@main_process
def _save_checkpoint(self, cur_iters, is_best=False):
save_obj = {
"model": self.unwrap_dist_model(self.model).state_dict(),
"optimizer": self.optimizer.state_dict(),
"config": self.config.to_dict(),
"scaler": self.scaler.state_dict() if self.scaler else None,
"iters": cur_iters,
}
save_to = os.path.join(
self.output_dir,
"checkpoint_{}.pth".format("best" if is_best else cur_iters),
)
logging.info("Saving checkpoint at iters {} to {}.".format(cur_iters, save_to))
torch.save(save_obj, save_to)
def _load_checkpoint(self, url_or_filename):
"""
Resume from a checkpoint.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location=self.device)
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location=self.device)
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
self.unwrap_dist_model(self.model).load_state_dict(state_dict)
self.optimizer.load_state_dict(checkpoint["optimizer"])
if self.scaler and "scaler" in checkpoint:
self.scaler.load_state_dict(checkpoint["scaler"])
self.start_iters = checkpoint["iters"] + 1
logging.info("Resume checkpoint from {}".format(url_or_filename))
@property
def dataloaders(self) -> dict:
"""
A property to get and create dataloaders by split just in need.
If no train_dataset_ratio is provided, concatenate map-style datasets and
chain wds.DataPipe datasets separately. Training set becomes a tuple
(ConcatDataset, ChainDataset), both are optional but at least one of them is
required. The resultant ConcatDataset and ChainDataset will be sampled evenly.
If train_dataset_ratio is provided, create a MultiIterLoader to sample
each dataset by ratios during training.
Currently do not support multiple datasets for validation and test.
Returns:
dict: {split_name: (tuples of) dataloader}
"""
if self._dataloaders is None:
# reoganize datasets by split and concatenate/chain if necessary
dataset_ratios = self.config.run_cfg.get("train_dataset_ratios", None)
if dataset_ratios is None:
# concatenate map-style datasets and chain wds.DataPipe datasets separately
# training set becomes a tuple (ConcatDataset, ChainDataset), both are
# optional but at least one of them is required. The resultant ConcatDataset
# and ChainDataset will be sampled evenly.
logging.info(
"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)."
)
datasets = reorg_datasets_by_split(self.datasets)
| """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
@registry.register_runner("runner_iter")
class RunnerIter(RunnerBase):
"""
Run training based on the number of iterations. This is common when
the training dataset size is large. Underhood logic is similar to
epoch-based training by considering every #iters_per_inner_epoch as an
inner epoch.
In iter-based runner, after every #iters_per_inner_epoch steps, we
1) do a validation epoch;
2) schedule the learning rate;
3) save the checkpoint.
We refer every #iters_per_inner_epoch steps as an inner epoch.
"""
def __init__(self, cfg, task, model, datasets, job_id):
super().__init__(cfg, task, model, datasets, job_id)
self.start_iters = 0
self.max_iters = int(self.config.run_cfg.get("max_iters", -1))
assert self.max_iters > 0, "max_iters must be greater than 0."
self.iters_per_inner_epoch = int(
self.config.run_cfg.get("iters_per_inner_epoch", -1)
)
assert (
self.iters_per_inner_epoch > 0
), "iters_per_inner_epoch must be greater than 0."
@property
def max_epoch(self):
return int(self.max_iters / self.iters_per_inner_epoch)
@property
def cur_epoch(self):
try:
return self.train_loader.epoch
except AttributeError:
# pipeline data (e.g. LAION) is streaming, have no concept of epoch
return 0
def _progress(self, cur_iters):
return "{}_iters={}".format(self.cur_epoch, cur_iters)
def train(self):
start_time = time.time()
best_agg_metric = 0
best_iters = 0
self.log_config()
# resume from checkpoint if specified
if not self.evaluate_only and self.resume_ckpt_path is not None:
self._load_checkpoint(self.resume_ckpt_path)
for start_iters in range(
self.start_iters, self.max_iters, self.iters_per_inner_epoch
):
end_iters = start_iters + self.iters_per_inner_epoch
# training phase
if not self.evaluate_only:
logging.info(
"Start training, max_iters={}, in total {} inner epochs.".format(
self.max_iters, int(self.max_iters / self.iters_per_inner_epoch)
)
)
train_stats = self.train_iters(self.cur_epoch, start_iters)
self.log_stats(split_name="train", stats=train_stats)
self._save_checkpoint(end_iters, is_best=False)
# evaluation phase
if len(self.valid_splits) > 0:
for split_name in self.valid_splits:
logging.info("Evaluating on {}.".format(split_name))
val_log = self.eval_epoch(
split_name=split_name, cur_epoch=self._progress(end_iters)
)
if val_log is not None:
if is_main_process():
assert (
"agg_metrics" in val_log
), "No agg_metrics found in validation log."
agg_metrics = val_log["agg_metrics"]
if agg_metrics > best_agg_metric and split_name == "val":
best_iters, best_agg_metric = end_iters, agg_metrics
self._save_checkpoint(end_iters, is_best=True)
val_log.update({"best_iters": best_iters})
self.log_stats(val_log, split_name)
else:
# if no validation split is provided, we just save the checkpoint at the end of each inner epoch.
if not self.evaluate_only:
self._save_checkpoint(end_iters, is_best=False)
if self.evaluate_only:
break
dist.barrier()
# testing phase
self.evaluate(cur_epoch=self.cur_epoch)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Training time {}".format(total_time_str))
def train_iters(self, epoch, start_iters):
# train by iterations
self.model.train()
return self.task.train_iters(
epoch=epoch,
start_iters=start_iters,
iters_per_inner_epoch=self.iters_per_inner_epoch,
model=self.model,
data_loader=self.train_loader,
optimizer=self.optimizer,
scaler=self.scaler,
lr_scheduler=self.lr_scheduler,
cuda_enabled=self.cuda_enabled,
log_freq=self.log_freq,
accum_grad_iters=self.accum_grad_iters,
)
@main_process
def _save_checkpoint(self, cur_iters, is_best=False):
save_obj = {
"model": self.unwrap_dist_model(self.model).state_dict(),
"optimizer": self.optimizer.state_dict(),
"config": self.config.to_dict(),
"scaler": self.scaler.state_dict() if self.scaler else None,
"iters": cur_iters,
}
save_to = os.path.join(
self.output_dir,
"checkpoint_{}.pth".format("best" if is_best else cur_iters),
)
logging.info("Saving checkpoint at iters {} to {}.".format(cur_iters, save_to))
torch.save(save_obj, save_to)
def _load_checkpoint(self, url_or_filename):
"""
Resume from a checkpoint.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location=self.device)
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location=self.device)
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
self.unwrap_dist_model(self.model).load_state_dict(state_dict)
self.optimizer.load_state_dict(checkpoint["optimizer"])
if self.scaler and "scaler" in checkpoint:
self.scaler.load_state_dict(checkpoint["scaler"])
self.start_iters = checkpoint["iters"] + 1
logging.info("Resume checkpoint from {}".format(url_or_filename))
@property
def dataloaders(self) -> dict:
"""
A property to get and create dataloaders by split just in need.
If no train_dataset_ratio is provided, concatenate map-style datasets and
chain wds.DataPipe datasets separately. Training set becomes a tuple
(ConcatDataset, ChainDataset), both are optional but at least one of them is
required. The resultant ConcatDataset and ChainDataset will be sampled evenly.
If train_dataset_ratio is provided, create a MultiIterLoader to sample
each dataset by ratios during training.
Currently do not support multiple datasets for validation and test.
Returns:
dict: {split_name: (tuples of) dataloader}
"""
if self._dataloaders is None:
# reoganize datasets by split and concatenate/chain if necessary
dataset_ratios = self.config.run_cfg.get("train_dataset_ratios", None)
if dataset_ratios is None:
# concatenate map-style datasets and chain wds.DataPipe datasets separately
# training set becomes a tuple (ConcatDataset, ChainDataset), both are
# optional but at least one of them is required. The resultant ConcatDataset
# and ChainDataset will be sampled evenly.
logging.info(
"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)."
)
datasets = reorg_datasets_by_split(self.datasets) | self.datasets = concat_datasets(datasets) | 5 | 2023-11-14 08:57:59+00:00 | 12k |
ml4bio/USPNet | Net/New_ComModel.py | [
{
"identifier": "MultiHeadAttention",
"path": "Net/SelfAttentionTorch.py",
"snippet": "class MultiHeadAttention(nn.Module):\n\n def __init__(self,\n config\n ):\n \"\"\"Multi-head attention.\n :param in_features: Size of each input sample.\n :param head_num: Number of heads.\n :param bias: Whether to use the bias term.\n :param activation: The activation after each linear transformation.\n \"\"\"\n super(MultiHeadAttention, self).__init__()\n\n in_features = config['d_model']\n head_num = config['h']\n bias = config['bias']\n activation = config['activation']\n\n if in_features % head_num != 0:\n raise ValueError('`in_features`({}) should be divisible by `head_num`({})'.format(in_features, head_num))\n self.in_features = in_features\n self.head_num = head_num\n self.activation = activation\n self.bias = bias\n self.linear_q = nn.Linear(in_features, in_features, bias)\n self.linear_k = nn.Linear(in_features, in_features, bias)\n self.linear_v = nn.Linear(in_features, in_features, bias)\n self.linear_o = nn.Linear(in_features, in_features, bias)\n\n def forward(self, q, k, v, mask=None):\n q, k, v = self.linear_q(q), self.linear_k(k), self.linear_v(v)\n if self.activation is not None:\n q = self.activation(q)\n k = self.activation(k)\n v = self.activation(v)\n\n q = self._reshape_to_batches(q)\n k = self._reshape_to_batches(k)\n v = self._reshape_to_batches(v)\n if mask is not None:\n mask = mask.repeat(self.head_num, 1, 1)\n y = ScaledDotProductAttention()(q, k, v, mask)\n y = self._reshape_from_batches(y)\n\n y = self.linear_o(y)\n if self.activation is not None:\n y = self.activation(y)\n return y\n\n @staticmethod\n def gen_history_mask(x):\n \"\"\"Generate the mask that only uses history data.\n :param x: Input tensor.\n :return: The mask.\n \"\"\"\n batch_size, seq_len, _ = x.size()\n return torch.tril(torch.ones(seq_len, seq_len)).view(1, seq_len, seq_len).repeat(batch_size, 1, 1)\n\n def _reshape_to_batches(self, x):\n batch_size, seq_len, in_feature = x.size()\n sub_dim = in_feature // self.head_num\n return x.reshape(batch_size, seq_len, self.head_num, sub_dim)\\\n .permute(0, 2, 1, 3)\\\n .reshape(batch_size * self.head_num, seq_len, sub_dim)\n\n def _reshape_from_batches(self, x):\n batch_size, seq_len, in_feature = x.size()\n batch_size //= self.head_num\n out_dim = in_feature * self.head_num\n return x.reshape(batch_size, self.head_num, seq_len, in_feature)\\\n .permute(0, 2, 1, 3)\\\n .reshape(batch_size, seq_len, out_dim)\n\n def extra_repr(self):\n return 'in_features={}, head_num={}, bias={}, activation={}'.format(\n self.in_features, self.head_num, self.bias, self.activation,\n )"
},
{
"identifier": "TransformerEncoder",
"path": "Net/transformer.py",
"snippet": "class TransformerEncoder(nn.Module):\n ''' A neural network Transformer Encoder '''\n\n def __init__(self, vocab_size, max_sequence_length, qty_encoder_layer=1, qty_attention_head=8,\n dim_k=32, dim_v=32, dim_word_vector=256, dim_model=256, dim_inner_hidden=128, output_size=3,\n dropout=0.2, attn_dropout=0.1, embedding=False):\n super(TransformerEncoder, self).__init__()\n positions = max_sequence_length # counting UNK\n\n self.max_sequence_length = max_sequence_length\n self.dim_model = dim_model\n\n # Embedding containing sentence order information\n self.position_encoder = nn.Embedding(positions, dim_word_vector, padding_idx=0)\n self.position_encoder.weight.data = position_encoding_init(positions, dim_word_vector)\n\n # Embedding vector of words. TODO: test with word2vec\n self.word_embedding_layer = nn.Embedding(vocab_size, dim_word_vector, padding_idx=0)\n\n # Create a set of encoder layers, given the quantity informed in\n self.encoder_layers = nn.ModuleList([\n EncoderLayer(dim_model, dim_inner_hidden, qty_attention_head, dim_k, dim_v, dropout=dropout, attn_dropout=attn_dropout)\n for _ in range(qty_encoder_layer)\n ])\n\n # whether do embedding before attention module\n self.embedding = embedding\n logger.info('''Transformer Model:\n - max sequence length = {}\n - encoder layers = {}\n - attention heads = {}\n '''.format(max_sequence_length, qty_encoder_layer, qty_attention_head))\n\n def get_trainable_parameters(self):\n \"\"\" Avoid updating the position encoding \"\"\"\n position_parameters = set(map(id, self.position_encoder.parameters()))\n return (p for p in self.parameters() if id(p) not in position_parameters)\n\n def forward(self, sequence):\n if(self.embedding):\n # lookup word embedding layer\n word_embedding = self.word_embedding_layer(sequence)\n else:\n word_embedding = sequence\n encoder_output = word_embedding\n\n for encoder_layer in self.encoder_layers:\n encoder_output, attentions = encoder_layer(encoder_output)\n\n return encoder_output\n\n def get_positions(self, sequence):\n \"\"\"\n Get position\n :param sequence: input tensor\n :return: array with the order of each element. Example: [23, 45, 67, 54, PAD, PAD] ---> [1, 2, 3, 4, 0, 0]\n \"\"\"\n\n PADDING = 0\n positions = [[pos + 1 if word != PADDING else 0 for pos, word in enumerate(instance)] for instance in sequence]\n return torch.autograd.Variable(torch.LongTensor(positions), volatile=False).cuda()"
},
{
"identifier": "CRF",
"path": "Net/CRF.py",
"snippet": "class CRF(nn.Module):\n \"\"\"Conditional random field.\n\n This module implements a conditional random field [LMP01]_. The forward computation\n of this class computes the log likelihood of the given sequence of tags and\n emission score tensor. This class also has `~CRF.decode` method which finds\n the best tag sequence given an emission score tensor using `Viterbi algorithm`_.\n\n Args:\n num_tags: Number of tags.\n batch_first: Whether the first dimension corresponds to the size of a minibatch.\n reweight_ratio: Used to solve imbalance problem. The idea is from\n \"DeepCNF-D: Predicting Protein Order/Disorder Regions by\n Weighted Deep Convolutional Neural Fields\"\n\n Attributes:\n start_transitions (`~torch.nn.Parameter`): Start transition score tensor of size\n ``(num_tags,)``.\n end_transitions (`~torch.nn.Parameter`): End transition score tensor of size\n ``(num_tags,)``.\n transitions (`~torch.nn.Parameter`): Transition score tensor of size\n ``(num_tags, num_tags)``.\n\n\n .. [LMP01] Lafferty, J., McCallum, A., Pereira, F. (2001).\n \"Conditional random fields: Probabilistic models for segmenting and\n labeling sequence data\". *Proc. 18th International Conf. on Machine\n Learning*. Morgan Kaufmann. pp. 282–289.\n\n .. _Viterbi algorithm: https://en.wikipedia.org/wiki/Viterbi_algorithm\n \"\"\"\n\n def __init__(self, num_tags: int, batch_first: bool = False, reweight_ratio = None) -> None:\n if num_tags <= 0:\n raise ValueError(f'invalid number of tags: {num_tags}')\n super().__init__()\n self.num_tags = num_tags\n self.batch_first = batch_first\n self.start_transitions = nn.Parameter(torch.empty(num_tags))\n self.end_transitions = nn.Parameter(torch.empty(num_tags))\n self.transitions = nn.Parameter(torch.empty(num_tags, num_tags))\n\n self.reset_parameters()\n\n\n self.reweight_ratio = reweight_ratio\n\n def reset_parameters(self) -> None:\n \"\"\"Initialize the transition parameters.\n\n The parameters will be initialized randomly from a uniform distribution\n between -0.1 and 0.1.\n \"\"\"\n nn.init.uniform_(self.start_transitions, -0.1, 0.1)\n nn.init.uniform_(self.end_transitions, -0.1, 0.1)\n nn.init.uniform_(self.transitions, -0.1, 0.1)\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(num_tags={self.num_tags})'\n\n def forward(\n self,\n emissions: torch.Tensor,\n tags: torch.LongTensor,\n mask: Optional[torch.ByteTensor] = None,\n reduction: str = 'sum',\n ) -> torch.Tensor:\n \"\"\"Compute the conditional log likelihood of a sequence of tags given emission scores.\n\n Args:\n emissions (`~torch.Tensor`): Emission score tensor of size\n ``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,\n ``(batch_size, seq_length, num_tags)`` otherwise.\n tags (`~torch.LongTensor`): Sequence of tags tensor of size\n ``(seq_length, batch_size)`` if ``batch_first`` is ``False``,\n ``(batch_size, seq_length)`` otherwise.\n mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``\n if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.\n reduction: Specifies the reduction to apply to the output:\n ``none|sum|mean|token_mean``. ``none``: no reduction will be applied.\n ``sum``: the output will be summed over batches. ``mean``: the output will be\n averaged over batches. ``token_mean``: the output will be averaged over tokens.\n\n Returns:\n `~torch.Tensor`: The log likelihood. This will have size ``(batch_size,)`` if\n reduction is ``none``, ``()`` otherwise.\n \"\"\"\n self._validate(emissions, tags=tags, mask=mask)\n if reduction not in ('none', 'sum', 'mean', 'token_mean'):\n raise ValueError(f'invalid reduction: {reduction}')\n if mask is None:\n mask = torch.ones_like(tags, dtype=torch.uint8)\n\n if self.batch_first:\n emissions = emissions.transpose(0, 1)\n tags = tags.transpose(0, 1)\n mask = mask.transpose(0, 1)\n if(self.reweight_ratio!=None):\n # shape: (batch_size,)\n numerator = self._compute_score_reweight(emissions, tags, mask)\n # shape: (batch_size,)\n denominator = self._compute_normalizer_reweight(emissions, mask)\n else:\n # shape: (batch_size,)\n numerator = self._compute_score(emissions, tags, mask)\n # shape: (batch_size,)\n denominator = self._compute_normalizer(emissions, mask)\n\n # shape: (batch_size,)\n llh = numerator - denominator\n\n if reduction == 'none':\n return llh\n if reduction == 'sum':\n return llh.sum()\n if reduction == 'mean':\n return llh.mean()\n assert reduction == 'token_mean'\n return llh.sum() / mask.type_as(emissions).sum()\n\n def decode(self, emissions: torch.Tensor,\n mask: Optional[torch.ByteTensor] = None) -> List[List[int]]:\n \"\"\"Find the most likely tag sequence using Viterbi algorithm.\n\n Args:\n emissions (`~torch.Tensor`): Emission score tensor of size\n ``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,\n ``(batch_size, seq_length, num_tags)`` otherwise.\n mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``\n if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.\n\n Returns:\n List of list containing the best tag sequence for each batch.\n \"\"\"\n self._validate(emissions, mask=mask)\n if mask is None:\n mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8)\n\n if self.batch_first:\n emissions = emissions.transpose(0, 1)\n mask = mask.transpose(0, 1)\n\n return self._viterbi_decode(emissions, mask)\n\n def decode_based_on_prob(self, emissions: torch.Tensor,\n mask: Optional[torch.ByteTensor] = None, padding=False, reduce=False, torch_form=False) -> List[List[int]]:\n \"\"\"Find the most likely tag sequence using prob matrix.\n\n Args:\n emissions (`~torch.Tensor`): Emission score tensor of size\n ``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,\n ``(batch_size, seq_length, num_tags)`` otherwise.\n mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``\n if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.\n\n Returns:\n List of list containing the best tag sequence for each batch.\n \"\"\"\n self._validate(emissions, mask=mask)\n if mask is None:\n mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8)\n\n if self.batch_first:\n emissions = emissions.transpose(0, 1)\n mask = mask.transpose(0, 1)\n\n # shape: (batch_size, seq_length, num_tags)\n prob_matrix = self._compute_prob(emissions, mask, padding=padding)\n\n if(reduce):\n if(torch_form):\n result = torch.argmax(prob_matrix, dim=2)\n else:\n result =torch.argmax(prob_matrix, dim=2).detach().cpu().numpy()\n else:\n if(torch_form):\n result = prob_matrix\n else:\n result = prob_matrix.detach().cpu().numpy()\n return result\n\n def _validate(\n self,\n emissions: torch.Tensor,\n tags: Optional[torch.LongTensor] = None,\n mask: Optional[torch.ByteTensor] = None) -> None:\n if emissions.dim() != 3:\n raise ValueError(f'emissions must have dimension of 3, got {emissions.dim()}')\n if emissions.size(2) != self.num_tags:\n raise ValueError(\n f'expected last dimension of emissions is {self.num_tags}, '\n f'got {emissions.size(2)}')\n\n if tags is not None:\n if emissions.shape[:2] != tags.shape:\n raise ValueError(\n 'the first two dimensions of emissions and tags must match, '\n f'got {tuple(emissions.shape[:2])} and {tuple(tags.shape)}')\n\n if mask is not None:\n if emissions.shape[:2] != mask.shape:\n raise ValueError(\n 'the first two dimensions of emissions and mask must match, '\n f'got {tuple(emissions.shape[:2])} and {tuple(mask.shape)}')\n no_empty_seq = not self.batch_first and mask[0].all()\n no_empty_seq_bf = self.batch_first and mask[:, 0].all()\n if not no_empty_seq and not no_empty_seq_bf:\n raise ValueError('mask of the first timestep must all be on')\n\n def _compute_score(\n self, emissions: torch.Tensor, tags: torch.LongTensor,\n mask: torch.ByteTensor) -> torch.Tensor:\n # emissions: (seq_length, batch_size, num_tags)\n # tags: (seq_length, batch_size)\n # mask: (seq_length, batch_size)\n assert emissions.dim() == 3 and tags.dim() == 2\n assert emissions.shape[:2] == tags.shape\n assert emissions.size(2) == self.num_tags\n assert mask.shape == tags.shape\n assert mask[0].all()\n\n seq_length, batch_size = tags.shape\n mask = mask.type_as(emissions)\n\n # Start transition score and first emission\n # shape: (batch_size,)\n score = self.start_transitions[tags[0]]\n score += emissions[0, torch.arange(batch_size), tags[0]]\n\n for i in range(1, seq_length):\n # Transition score to next tag, only added if next timestep is valid (mask == 1)\n # shape: (batch_size,)\n\n score += self.transitions[tags[i - 1], tags[i]] * mask[i]\n\n # Emission score for next tag, only added if next timestep is valid (mask == 1)\n # shape: (batch_size,)\n score += emissions[i, torch.arange(batch_size), tags[i]] * mask[i]\n\n # End transition score\n # shape: (batch_size,)\n seq_ends = mask.long().sum(dim=0) - 1\n # shape: (batch_size,)\n last_tags = tags[seq_ends, torch.arange(batch_size)]\n # shape: (batch_size,)\n score += self.end_transitions[last_tags]\n\n\n return score\n\n def _compute_normalizer(\n self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:\n # emissions: (seq_length, batch_size, num_tags)\n # mask: (seq_length, batch_size)\n assert emissions.dim() == 3 and mask.dim() == 2\n assert emissions.shape[:2] == mask.shape\n assert emissions.size(2) == self.num_tags\n assert mask[0].all()\n\n seq_length = emissions.size(0)\n\n # Start transition score and first emission; score has size of\n # (batch_size, num_tags) where for each batch, the j-th column stores\n # the score that the first timestep has tag j\n # shape: (batch_size, num_tags)\n score = self.start_transitions + emissions[0]\n\n for i in range(1, seq_length):\n # Broadcast score for every possible next tag\n # shape: (batch_size, num_tags, 1)\n broadcast_score = score.unsqueeze(2)\n\n # Broadcast emission score for every possible current tag\n # shape: (batch_size, 1, num_tags)\n broadcast_emissions = emissions[i].unsqueeze(1)\n\n # Compute the score tensor of size (batch_size, num_tags, num_tags) where\n # for each sample, entry at row i and column j stores the sum of scores of all\n # possible tag sequences so far that end with transitioning from tag i to tag j\n # and emitting\n # shape: (batch_size, num_tags, num_tags)\n next_score = broadcast_score + self.transitions + broadcast_emissions\n\n # Sum over all possible current tags, but we're in score space, so a sum\n # becomes a log-sum-exp: for each sample, entry i stores the sum of scores of\n # all possible tag sequences so far, that end in tag i\n # shape: (batch_size, num_tags)\n next_score = torch.logsumexp(next_score, dim=1)\n\n # Set score to the next score if this timestep is valid (mask == 1)\n # shape: (batch_size, num_tags)\n score = torch.where(mask[i].unsqueeze(1), next_score, score)\n\n # End transition score\n # shape: (batch_size, num_tags)\n score += self.end_transitions\n\n # Sum (log-sum-exp) over all possible tags\n # shape: (batch_size,)\n return torch.logsumexp(score, dim=1)\n\n def _compute_normalizer_reverse(\n self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:\n # emissions: (seq_length, batch_size, num_tags)\n # mask: (seq_length, batch_size)\n assert emissions.dim() == 3 and mask.dim() == 2\n assert emissions.shape[:2] == mask.shape\n assert emissions.size(2) == self.num_tags\n assert mask[0].all()\n\n seq_length = emissions.size(0)\n\n # Start transition score and first emission; score has size of\n # (batch_size, num_tags) where for each batch, the j-th column stores\n # the score that the first timestep has tag j\n # shape: (batch_size, num_tags)\n score = self.end_transitions + emissions[seq_length-1]\n\n for i in range(seq_length-2, -1, -1):\n # Broadcast score for every possible next tag\n # shape: (batch_size, num_tags, 1)\n broadcast_score = score.unsqueeze(2)\n\n # Broadcast emission score for every possible current tag\n # shape: (batch_size, 1, num_tags)\n broadcast_emissions = emissions[i].unsqueeze(1)\n\n # Compute the score tensor of size (batch_size, num_tags, num_tags) where\n # for each sample, entry at row i and column j stores the sum of scores of all\n # possible tag sequences so far that end with transitioning from tag i to tag j\n # and emitting\n # shape: (batch_size, num_tags, num_tags)\n next_score = broadcast_score + self.transitions + broadcast_emissions\n\n # Sum over all possible current tags, but we're in score space, so a sum\n # becomes a log-sum-exp: for each sample, entry i stores the sum of scores of\n # all possible tag sequences so far, that end in tag i\n # shape: (batch_size, num_tags)\n next_score = torch.logsumexp(next_score, dim=1)\n\n # Set score to the next score if this timestep is valid (mask == 1)\n # shape: (batch_size, num_tags)\n score = torch.where(mask[i].unsqueeze(1), next_score, score)\n\n # End transition score\n # shape: (batch_size, num_tags)\n score += self.start_transitions\n\n # Sum (log-sum-exp) over all possible tags\n # shape: (batch_size,)\n return torch.logsumexp(score, dim=1)\n\n def _compute_prob(\n self, emissions: torch.Tensor, mask: torch.ByteTensor, padding=True) -> torch.Tensor:\n\n assert emissions.dim() == 3 and mask.dim() == 2\n assert emissions.shape[:2] == mask.shape\n assert emissions.size(2) == self.num_tags\n assert mask[0].all()\n\n forward_matrix = self._compute_forward(emissions, mask).permute(0, 2, 1)\n backward_matrix = self._compute_backward(emissions, mask).permute(0, 2, 1)\n batch_size, seq_length, num_tags = backward_matrix.shape\n\n normalizer = self._compute_normalizer(emissions, mask).unsqueeze(1).repeat_interleave(seq_length, dim=1)\\\n .unsqueeze(2).repeat_interleave(num_tags, dim=2)\n if (padding):\n num_tags = num_tags - 1\n # shape: (batch_size, seq_length, num_tags)\n\n prob_matrix = torch.exp(forward_matrix + backward_matrix - normalizer)\n # clamp to [0, 1]\n # shape: (batch_size, seq_length, num_tags)\n prob_matrix = prob_matrix.clamp(0, 1)\n\n # normalize\n # shape: (batch_size * seq_length, num_tags)\n if (padding):\n prob_matrix = flatten_lists_3D(prob_matrix)[:, 0:-1]\n else:\n prob_matrix = flatten_lists_3D(prob_matrix)\n # shape: (batch_size * seq_length, )\n sum_prob = prob_matrix.sum(dim=1)\n # shape: (batch_size * seq_length, 3)\n sum_prob = prob_matrix.sum(dim=1).unsqueeze(1).repeat_interleave(num_tags, dim=1)\n # if for specific position in a sequence, the total prob != 0, then we calculate according to prob_pos / total_prob;\n prob_matrix = prob_matrix / sum_prob\n # shape: (batch_size, seq_length, num_tags)\n prob_matrix = prob_matrix.reshape((batch_size, seq_length, num_tags))\n\n return prob_matrix\n\n def _compute_forward(\n self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:\n # emissions: (seq_length, batch_size, num_tags)\n # mask: (seq_length, batch_size)\n\n seq_length = emissions.size(0)\n\n score_list = []\n\n # Start transition score and first emission; score has size of\n # (batch_size, num_tags) where for each batch, the j-th column stores\n # the score that the first timestep has tag j\n # shape: (batch_size, num_tags)\n score = self.start_transitions + emissions[0]\n score_list.append(score.unsqueeze(2))\n for i in range(1, seq_length):\n # Broadcast score for every possible next tag\n # shape: (batch_size, num_tags, 1)\n broadcast_score = score.unsqueeze(2)\n\n # Broadcast emission score for every possible current tag\n # shape: (batch_size, 1, num_tags)\n broadcast_emissions = emissions[i].unsqueeze(1)\n\n # Compute the score tensor of size (batch_size, num_tags, num_tags) where\n # for each sample, entry at row i and column j stores the sum of scores of all\n # possible tag sequences so far that end with transitioning from tag i to tag j\n # and emitting\n # shape: (batch_size, num_tags, num_tags)\n next_score = broadcast_score + self.transitions + broadcast_emissions\n\n # Sum over all possible current tags, but we're in score space, so a sum\n # becomes a log-sum-exp: for each sample, entry i stores the sum of scores of\n # all possible tag sequences so far, that end in tag i\n # shape: (batch_size, num_tags)\n next_score = torch.logsumexp(next_score, dim=1)\n\n # Set score to the next score if this timestep is valid (mask == 1)\n # shape: (batch_size, num_tags)\n score = torch.where(mask[i].unsqueeze(1), next_score, score)\n if(i<seq_length-1):\n score_list.append(score.unsqueeze(2))\n # End transition score\n # shape: (batch_size, num_tags)\n score += self.end_transitions\n score_list.append(score.unsqueeze(2))\n\n forward_matrix = torch.cat(score_list, dim=2)\n\n # Sum (log-sum-exp) over all possible tags\n # shape: (batch_size,)\n return forward_matrix\n\n def _compute_backward(\n self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:\n # emissions: (seq_length, batch_size, num_tags)\n # mask: (seq_length, batch_size)\n\n seq_length = emissions.size(0)\n\n score_list = []\n\n # Start transition score and first emission; score has size of\n # (batch_size, num_tags) where for each batch, the j-th column stores\n # the score that the first timestep has tag j\n # shape: (batch_size, num_tags)\n score = self.end_transitions + emissions[seq_length - 1]\n\n score_list.append(score.unsqueeze(2))\n\n for i in range(seq_length - 2, -1, -1):\n # Broadcast score for every possible next tag\n # shape: (batch_size, num_tags, 1)\n broadcast_score = score.unsqueeze(2)\n\n # Broadcast emission score for every possible current tag\n # shape: (batch_size, 1, num_tags)\n broadcast_emissions = emissions[i].unsqueeze(1)\n\n # Compute the score tensor of size (batch_size, num_tags, num_tags) where\n # for each sample, entry at row i and column j stores the sum of scores of all\n # possible tag sequences so far that end with transitioning from tag i to tag j\n # and emitting\n # shape: (batch_size, num_tags, num_tags)\n next_score = broadcast_score + self.transitions + broadcast_emissions\n\n # Sum over all possible current tags, but we're in score space, so a sum\n # becomes a log-sum-exp: for each sample, entry i stores the sum of scores of\n # all possible tag sequences so far, that end in tag i\n # shape: (batch_size, num_tags)\n next_score = torch.logsumexp(next_score, dim=1)\n\n # Set score to the next score if this timestep is valid (mask == 1)\n # shape: (batch_size, num_tags)\n score = torch.where(mask[i].unsqueeze(1), next_score, score)\n if(i>0):\n score_list.append(score.unsqueeze(2))\n\n # End transition score\n # shape: (batch_size, num_tags)\n score += self.start_transitions\n score_list.append(score.unsqueeze(2))\n score_list.reverse()\n\n backward_matrix = torch.cat(score_list, dim=2)\n\n # Sum (log-sum-exp) over all possible tags\n # shape: (batch_size,)\n return backward_matrix\n\n def _viterbi_decode(self, emissions: torch.FloatTensor,\n mask: torch.ByteTensor) -> List[List[int]]:\n # emissions: (seq_length, batch_size, num_tags)\n # mask: (seq_length, batch_size)\n assert emissions.dim() == 3 and mask.dim() == 2\n assert emissions.shape[:2] == mask.shape\n assert emissions.size(2) == self.num_tags\n assert mask[0].all()\n\n seq_length, batch_size = mask.shape\n\n # Start transition and first emission\n # shape: (batch_size, num_tags)\n score = self.start_transitions + emissions[0]\n history = []\n\n # score is a tensor of size (batch_size, num_tags) where for every batch,\n # value at column j stores the score of the best tag sequence so far that ends\n # with tag j\n # history saves where the best tags candidate transitioned from; this is used\n # when we trace back the best tag sequence\n\n # Viterbi algorithm recursive case: we compute the score of the best tag sequence\n # for every possible next tag\n for i in range(1, seq_length):\n # Broadcast viterbi score for every possible next tag\n # shape: (batch_size, num_tags, 1)\n broadcast_score = score.unsqueeze(2)\n\n # Broadcast emission score for every possible current tag\n # shape: (batch_size, 1, num_tags)\n broadcast_emission = emissions[i].unsqueeze(1)\n\n # Compute the score tensor of size (batch_size, num_tags, num_tags) where\n # for each sample, entry at row i and column j stores the score of the best\n # tag sequence so far that ends with transitioning from tag i to tag j and emitting\n # shape: (batch_size, num_tags, num_tags)\n next_score = broadcast_score + self.transitions + broadcast_emission\n\n # Find the maximum score over all possible current tag\n # shape: (batch_size, num_tags)\n next_score, indices = next_score.max(dim=1)\n\n # Set score to the next score if this timestep is valid (mask == 1)\n # and save the index that produces the next score\n # shape: (batch_size, num_tags)\n score = torch.where(mask[i].unsqueeze(1), next_score, score)\n history.append(indices)\n\n # End transition score\n # shape: (batch_size, num_tags)\n score += self.end_transitions\n\n # Now, compute the best path for each sample\n\n # shape: (batch_size,)\n seq_ends = mask.long().sum(dim=0) - 1\n best_tags_list = []\n\n for idx in range(batch_size):\n # Find the tag which maximizes the score at the last timestep; this is our best tag\n # for the last timestep\n _, best_last_tag = score[idx].max(dim=0)\n best_tags = [best_last_tag.item()]\n\n # We trace back where the best last tag comes from, append that to our best tag\n # sequence, and trace it back again, and so on\n for hist in reversed(history[:seq_ends[idx]]):\n best_last_tag = hist[idx][best_tags[-1]]\n best_tags.append(best_last_tag.item())\n\n # Reverse the order because we start from the last timestep\n best_tags.reverse()\n best_tags_list.append(best_tags)\n\n return best_tags_list\n\n def _compute_score_reweight(\n self, emissions_unmask: torch.Tensor, tags: torch.LongTensor,\n mask: torch.ByteTensor) -> torch.Tensor:\n # emissions: (seq_length, batch_size, num_tags)\n # tags: (seq_length, batch_size)\n # mask: (seq_length, batch_size)\n assert emissions_unmask.dim() == 3 and tags.dim() == 2\n assert emissions_unmask.shape[:2] == tags.shape\n assert emissions_unmask.size(2) == self.num_tags\n assert mask.shape == tags.shape\n assert mask[0].all()\n\n device = emissions_unmask.device\n\n seq_length, batch_size, num_tags = emissions_unmask.shape\n num_tags, _ = self.transitions.shape\n\n # add reweight mask to emissions\n list = []\n for tag in self.reweight_ratio.keys():\n list.append(self.reweight_ratio[tag] * torch.ones((seq_length, batch_size, 1)))\n e_mask = torch.cat(list, dim=2).to(device)\n emissions = e_mask * emissions_unmask\n\n list = []\n # add reweight mask to transitions\n for tag in self.reweight_ratio.keys():\n list.append(self.reweight_ratio[tag] * torch.ones((1, num_tags)))\n t_mask = torch.cat(list, dim=0).to(device)\n transitions_mask = t_mask * self.transitions\n\n # add reweight mask to start_transitions\n reweight_factor = torch.tensor([self.reweight_ratio[tag] for tag in self.reweight_ratio.keys()]).to(device)\n start_transitions = self.start_transitions * reweight_factor\n\n # add reweight mask to end_transitions\n end_transitions = self.end_transitions * reweight_factor\n\n seq_length, batch_size = tags.shape\n mask = mask.type_as(emissions)\n\n # Start transition score and first emission\n # shape: (batch_size,)\n score = start_transitions[tags[0]]\n score += emissions[0, torch.arange(batch_size), tags[0]]\n\n for i in range(1, seq_length):\n # Transition score to next tag, only added if next timestep is valid (mask == 1)\n # shape: (batch_size,)\n\n score += transitions_mask[tags[i - 1], tags[i]] * mask[i]\n\n # Emission score for next tag, only added if next timestep is valid (mask == 1)\n # shape: (batch_size,)\n score += emissions[i, torch.arange(batch_size), tags[i]] * mask[i]\n\n # End transition score\n # shape: (batch_size,)\n seq_ends = mask.long().sum(dim=0) - 1\n # shape: (batch_size,)\n last_tags = tags[seq_ends, torch.arange(batch_size)]\n # shape: (batch_size,)\n score += end_transitions[last_tags]\n\n return score\n\n def _compute_normalizer_reweight(\n self, emissions_unmask: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:\n # emissions: (seq_length, batch_size, num_tags)\n # mask: (seq_length, batch_size)\n assert emissions_unmask.dim() == 3 and mask.dim() == 2\n assert emissions_unmask.shape[:2] == mask.shape\n assert emissions_unmask.size(2) == self.num_tags\n assert mask[0].all()\n\n device = emissions_unmask.device\n\n seq_length, batch_size, num_tags = emissions_unmask.shape\n num_tags, _ = self.transitions.shape\n\n # add reweight mask to emissions\n list = []\n for tag in self.reweight_ratio.keys():\n list.append(self.reweight_ratio[tag] * torch.ones((seq_length, batch_size, 1)))\n e_mask = torch.cat(list, dim=2).to(device)\n\n emissions = e_mask * emissions_unmask\n\n # add reweight mask to transitions\n list = []\n for tag in self.reweight_ratio.keys():\n list.append(self.reweight_ratio[tag] * torch.ones((1, num_tags)))\n t_mask = torch.cat(list, dim=0).to(device)\n transitions_mask = t_mask * self.transitions\n\n # add reweight mask to start_transitions\n reweight_factor = torch.tensor([self.reweight_ratio[tag] for tag in self.reweight_ratio.keys()]).to(device)\n start_transitions = self.start_transitions * reweight_factor\n\n # add reweight mask to end_transitions\n end_transitions = self.end_transitions * reweight_factor\n\n seq_length = emissions.size(0)\n\n # Start transition score and first emission; score has size of\n # (batch_size, num_tags) where for each batch, the j-th column stores\n # the score that the first timestep has tag j\n # shape: (batch_size, num_tags)\n score = start_transitions + emissions[0]\n\n for i in range(1, seq_length):\n # Broadcast score for every possible next tag\n # shape: (batch_size, num_tags, 1)\n broadcast_score = score.unsqueeze(2)\n\n # Broadcast emission score for every possible current tag\n # shape: (batch_size, 1, num_tags)\n broadcast_emissions = emissions[i].unsqueeze(1)\n\n # Compute the score tensor of size (batch_size, num_tags, num_tags) where\n # for each sample, entry at row i and column j stores the sum of scores of all\n # possible tag sequences so far that end with transitioning from tag i to tag j\n # and emitting\n # shape: (batch_size, num_tags, num_tags)\n next_score = broadcast_score + transitions_mask + broadcast_emissions\n\n # Sum over all possible current tags, but we're in score space, so a sum\n # becomes a log-sum-exp: for each sample, entry i stores the sum of scores of\n # all possible tag sequences so far, that end in tag i\n # shape: (batch_size, num_tags)\n next_score = torch.logsumexp(next_score, dim=1)\n\n # Set score to the next score if this timestep is valid (mask == 1)\n # shape: (batch_size, num_tags)\n score = torch.where(mask[i].unsqueeze(1), next_score, score)\n\n # End transition score\n # shape: (batch_size, num_tags)\n score += end_transitions\n\n # Sum (log-sum-exp) over all possible tags\n # shape: (batch_size,)\n return torch.logsumexp(score, dim=1)"
},
{
"identifier": "LSTM_attention",
"path": "Net/LSTM_Attention.py",
"snippet": "class LSTM_attention(nn.Module):\n ''' Compose with two layers '''\n def __init__(self,config):\n super(LSTM_attention, self).__init__()\n data = config[0]\n lstm_config = config[1]\n self.lstm = BLSTM(lstm_config)\n #self.slf_attn = multihead_attention(data.HP_hidden_dim,num_heads = data.num_attention_head, dropout_rate=data.HP_dropout)\n self.label_attn = multihead_attention(data['d_model'], num_heads=data['h'],dropout_rate=data['dropout'])\n self.linear = nn.Linear(data['d_model'], data['d_model'])\n self.act = nn.ReLU()\n self.droplstm = nn.Dropout(data['dropout'])\n self.gpu = data['gpu']\n if self.gpu:\n self.lstm =self.lstm.cuda()\n self.label_attn = self.label_attn.cuda()\n\n def forward(self,lstm_out,label_embs):\n\n lstm_out = self.lstm(lstm_out)\n\n lstm_out = self.droplstm(lstm_out)\n\n # lstm_out (seq_length * batch_size * hidden)\n label_attention_output = self.label_attn(lstm_out, label_embs, label_embs)\n # label_attention_output (batch_size, seq_len, embed_size)\n lstm_out = torch.cat([lstm_out, label_attention_output], -1)\n return lstm_out"
}
] | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from Net.LSTM import *
from Net.CNN import *
from Net.SelfAttentionTorch import MultiHeadAttention
from Net.transformer import TransformerEncoder
from torch.autograd import Variable
from torch.nn import Parameter
from Net.CRF import CRF
from Net.LSTM_Attention import LSTM_attention | 10,222 |
embedding_feature_dim_msa = 768
embedding_feature_dim_pro = 1024
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
class Attention_CRF(nn.Module):
def __init__(self, config, config1, cnn_configs, lstm_lan_config, lstm_config,
use_CRF=False, use_attention=True, reweight_ratio=None):
super(Attention_CRF, self).__init__()
self.num_classes = 20
self.max_len = config1['max_text_len']
self.embedding = nn.Embedding(num_embeddings=config['vocab_size'], embedding_dim=config['embedding_size'])
self.ef1 = 512
self.ef2 = 144
self.ef3 = 32
self.csef = 11
self.ef4 = 256
self.ef5 = 256
self.ef6 = 64
if (use_CRF):
|
embedding_feature_dim_msa = 768
embedding_feature_dim_pro = 1024
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
class Attention_CRF(nn.Module):
def __init__(self, config, config1, cnn_configs, lstm_lan_config, lstm_config,
use_CRF=False, use_attention=True, reweight_ratio=None):
super(Attention_CRF, self).__init__()
self.num_classes = 20
self.max_len = config1['max_text_len']
self.embedding = nn.Embedding(num_embeddings=config['vocab_size'], embedding_dim=config['embedding_size'])
self.ef1 = 512
self.ef2 = 144
self.ef3 = 32
self.csef = 11
self.ef4 = 256
self.ef5 = 256
self.ef6 = 64
if (use_CRF): | self.crf = CRF(num_tags=11, reweight_ratio=reweight_ratio)#original: num_tags=9 | 2 | 2023-11-14 08:19:42+00:00 | 12k |
doodledood/chat-flock | examples/manual_hierarchical_participant.py | [
{
"identifier": "InMemoryChatDataBackingStore",
"path": "chatflock/backing_stores/in_memory.py",
"snippet": "class InMemoryChatDataBackingStore(ChatDataBackingStore):\n messages: List[ChatMessage]\n participants: Dict[str, ChatParticipant]\n last_message_id: Optional[int] = None\n\n def __init__(\n self, messages: Optional[List[ChatMessage]] = None, participants: Optional[List[ChatParticipant]] = None\n ):\n self.messages = messages or []\n self.participants = {participant.name: participant for participant in (participants or [])}\n self.last_message_id = None if len(self.messages) == 0 else self.messages[-1].id\n\n def get_messages(self) -> List[ChatMessage]:\n return self.messages\n\n def add_message(self, sender_name: str, content: str, timestamp: Optional[datetime.datetime] = None) -> ChatMessage:\n self.last_message_id = self.last_message_id + 1 if self.last_message_id is not None else 1\n\n message = ChatMessage(\n id=self.last_message_id,\n sender_name=sender_name,\n content=content,\n timestamp=timestamp or datetime.datetime.now(),\n )\n\n self.messages.append(message)\n\n return message\n\n def clear_messages(self):\n self.messages = []\n self.last_message_id = None\n\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n participants = list(self.participants.values())\n active_participants = [\n participant for participant in participants if isinstance(participant, ActiveChatParticipant)\n ]\n\n return active_participants\n\n def get_non_active_participants(self) -> List[ChatParticipant]:\n participants = list(self.participants.values())\n participants = [\n participant for participant in participants if not isinstance(participant, ActiveChatParticipant)\n ]\n\n return participants\n\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n if name not in self.participants:\n return None\n\n participant = self.participants[name]\n if not isinstance(participant, ActiveChatParticipant):\n return None\n\n return participant\n\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n if name not in self.participants:\n return None\n\n participant = self.participants[name]\n if isinstance(participant, ActiveChatParticipant):\n return None\n\n return participant\n\n def add_participant(self, participant: ChatParticipant) -> None:\n if participant.name in self.participants:\n raise ChatParticipantAlreadyJoinedToChatError(participant.name)\n\n self.participants[participant.name] = participant\n\n def remove_participant(self, participant: ChatParticipant) -> None:\n if participant.name not in self.participants:\n raise ChatParticipantNotJoinedToChatError(participant.name)\n\n self.participants.pop(participant.name)\n\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n if participant_name in self.participants:\n participant = self.participants[participant_name]\n return isinstance(participant, ActiveChatParticipant)\n\n return False\n\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n if participant_name in self.participants:\n participant = self.participants[participant_name]\n return not isinstance(participant, ActiveChatParticipant)\n\n return False"
},
{
"identifier": "Chat",
"path": "chatflock/base.py",
"snippet": "class Chat:\n backing_store: ChatDataBackingStore\n renderer: ChatRenderer\n name: Optional[str] = None\n max_total_messages: Optional[int] = None\n hide_messages: bool = False\n\n def __init__(\n self,\n backing_store: ChatDataBackingStore,\n renderer: ChatRenderer,\n initial_participants: Optional[Sequence[ChatParticipant]] = None,\n name: Optional[str] = None,\n max_total_messages: Optional[int] = None,\n hide_messages: bool = False,\n ):\n if max_total_messages is not None and max_total_messages <= 0:\n raise ValueError(\"Max total messages must be None or greater than 0.\")\n\n self.backing_store = backing_store\n self.renderer = renderer\n self.name = name\n self.hide_messages = hide_messages\n self.max_total_messages = max_total_messages\n\n for i, participant in enumerate(initial_participants or []):\n self.add_participant(participant)\n\n def add_participant(self, participant: ChatParticipant) -> None:\n if self.has_active_participant_with_name(participant.name) or self.has_non_active_participant_with_name(\n participant.name\n ):\n raise ChatParticipantAlreadyJoinedToChatError(participant.name)\n\n self.backing_store.add_participant(participant)\n\n all_participants = (\n self.backing_store.get_active_participants() + self.backing_store.get_non_active_participants()\n )\n for participant in all_participants:\n participant.on_participant_joined_chat(chat=self, participant=participant)\n\n def remove_participant(self, participant: ChatParticipant) -> None:\n self.backing_store.remove_participant(participant)\n\n active_participants = self.backing_store.get_active_participants()\n non_active_participants = self.backing_store.get_non_active_participants()\n all_participants = active_participants + non_active_participants\n\n for participant in all_participants:\n participant.on_participant_left_chat(chat=self, participant=participant)\n\n def add_message(self, sender_name: str, content: str) -> None:\n sender = self.backing_store.get_active_participant_by_name(sender_name)\n if sender is None:\n raise ChatParticipantNotJoinedToChatError(sender_name)\n\n message = self.backing_store.add_message(sender_name=sender_name, content=content)\n\n self.renderer.render_new_chat_message(chat=self, message=message)\n\n active_participants = self.backing_store.get_active_participants()\n non_active_participants = self.backing_store.get_non_active_participants()\n all_participants = active_participants + non_active_participants\n\n for participant in all_participants:\n participant.on_new_chat_message(chat=self, message=message)\n\n def get_messages(self) -> List[ChatMessage]:\n return self.backing_store.get_messages()\n\n def clear_messages(self):\n self.backing_store.clear_messages()\n\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n return self.backing_store.get_active_participants()\n\n def get_non_active_participants(self) -> List[ChatParticipant]:\n return self.backing_store.get_non_active_participants()\n\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n return self.backing_store.get_active_participant_by_name(name=name)\n\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n return self.backing_store.get_non_active_participant_by_name(name=name)\n\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n return self.backing_store.has_active_participant_with_name(participant_name=participant_name)\n\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n return self.backing_store.has_non_active_participant_with_name(participant_name=participant_name)\n\n @property\n def active_participants_str(self):\n return \"\\n\\n\".join([participant.detailed_str() for participant in self.get_active_participants()])"
},
{
"identifier": "LangChainBasedAIChatConductor",
"path": "chatflock/conductors/langchain.py",
"snippet": "class LangChainBasedAIChatConductor(ChatConductor):\n def __init__(\n self,\n chat_model: BaseChatModel,\n goal: str = \"No explicit goal provided.\",\n composition_generator: Optional[ChatCompositionGenerator] = None,\n interaction_schema: Optional[str] = None,\n retriever: Optional[BaseRetriever] = None,\n spinner: Optional[Halo] = None,\n tools: Optional[List[BaseTool]] = None,\n chat_model_args: Optional[Dict[str, Any]] = None,\n ):\n self.chat_model = chat_model\n self.chat_model_args = chat_model_args or {}\n self.goal = goal\n self.tools = tools\n self.retriever = retriever\n self.composition_generator = composition_generator\n self.interaction_schema = interaction_schema\n self.spinner = spinner\n\n self.composition_initialized = False\n\n def create_next_speaker_system_prompt(self, chat: \"Chat\") -> str:\n chat_messages = chat.get_messages()\n\n if self.retriever is not None and len(chat_messages) > 0:\n relevant_docs = self.get_relevant_docs(messages=chat_messages)\n else:\n relevant_docs = []\n\n system_message = StructuredString(\n sections=[\n Section(\n name=\"Mission\",\n text=\"Select the next speaker in the conversation based on the previous messages in the \"\n \"conversation and an optional INTERACTION SCHEMA. If it seems to you that the chat \"\n \"should end instead of selecting a next speaker, terminate it.\",\n ),\n Section(name=\"Rules\", list=[\"You can only select one of the participants in the group chat.\"]),\n Section(\n name=\"Process\",\n list=[\n \"Look at the last message in the conversation and determine who should speak next based on the \"\n \"INTERACTION SCHEMA, if provided.\",\n \"If you determine that the chat should end, you should return the \"\n \"string TERMINATE instead of a participant name. For example, when the goal has been achieved, \"\n \", it is impossible to reach, or if the user asks to terminate the chat.\",\n ],\n ),\n Section(\n name=\"Input\",\n list=[\n \"Chat goal\",\n \"Currently active participants in the conversation\",\n \"Speaker interaction schema\",\n \"Previous messages from the conversation\",\n ],\n ),\n Section(\n name=\"Output\",\n text=\"The name of the next speaker in the conversation. Or, TERMINATE if the chat should end, \"\n \"instead.\",\n ),\n Section(name=\"Example Outputs\", list=['\"John\"', '\"TERMINATE\"']),\n Section(\n name=\"Additional Context for Selection\",\n text=\"None\"\n if len(relevant_docs) == 0\n else \"The following documents may be relevant for your selection of the \"\n \"next speaker, only use them for context for a better response, \"\n \"if applicable\",\n sub_sections=[\n Section(name=f\"Document {i + 1}\", text=f\"```{doc.page_content}```\")\n for i, doc in enumerate(relevant_docs)\n ],\n ),\n ]\n )\n\n return str(system_message)\n\n def create_next_speaker_first_human_prompt(self, chat: \"Chat\", goal: str) -> str:\n messages = chat.get_messages()\n messages_list = [f\"- {message.sender_name}: {message.content}\" for message in messages]\n\n participants = chat.get_active_participants()\n\n prompt = StructuredString(\n sections=[\n Section(name=\"Goal\", text=goal or \"No explicit goal provided.\"),\n Section(\n name=\"Currently Active Participants\", list=[f\"{str(participant)}\" for participant in participants]\n ),\n Section(\n name=\"Interaction Schema\",\n text=self.interaction_schema or \"Not provided. Use your best judgement.\",\n ),\n Section(\n name=\"Chat Messages\",\n text=\"No messages yet.\" if len(messages_list) == 0 else None,\n list=messages_list if len(messages_list) > 0 else [],\n ),\n ]\n )\n\n return str(prompt)\n\n def prepare_chat(self, chat: \"Chat\", **kwargs: Any) -> None:\n # If a composition generator is provided, generate a new composition for the chat before starting.\n if self.composition_generator is not None and not self.composition_initialized:\n composition_suggestion = kwargs.get(\"composition_suggestion\", None)\n new_composition = self.composition_generator.generate_composition_for_chat(\n chat=chat,\n goal=self.goal,\n composition_suggestion=composition_suggestion,\n interaction_schema=self.interaction_schema,\n )\n\n # Sync participants with the new composition.\n current_active_participants = chat.get_active_participants()\n new_participants_names = {p.name for p in new_composition.participants}\n\n for participant in new_composition.participants:\n # Add missing participants.\n if not chat.has_active_participant_with_name(participant.name):\n chat.add_participant(participant)\n continue\n\n # Remove other participants not mentioned in the new composition.\n if participant.name not in new_participants_names:\n chat.remove_participant(participant)\n\n self.interaction_schema = new_composition.participants_interaction_schema\n\n self.composition_initialized = True\n\n super().prepare_chat(chat=chat, **kwargs)\n\n def select_next_speaker(self, chat: Chat) -> Optional[ActiveChatParticipant]:\n participants = chat.get_active_participants()\n if len(participants) == 0:\n return None\n\n if self.spinner is not None:\n if chat.name is None:\n self.spinner.start(text=\"The Chat Conductor is selecting the next speaker...\")\n else:\n self.spinner.start(text=f\"The Chat Conductor ({chat.name}) is selecting the next speaker...\")\n\n # Ask the AI to select the next speaker.\n messages = [\n SystemMessage(content=self.create_next_speaker_system_prompt(chat=chat)),\n HumanMessage(content=self.create_next_speaker_first_human_prompt(chat=chat, goal=self.goal)),\n ]\n\n result = self.execute_messages(messages=messages)\n next_speaker_name = result.strip()\n\n while not chat.has_active_participant_with_name(next_speaker_name) and next_speaker_name != \"TERMINATE\":\n messages.append(AIMessage(content=next_speaker_name))\n messages.append(\n HumanMessage(\n content=f'Speaker \"{next_speaker_name}\" is not a participant in the chat. Choose another one.'\n )\n )\n\n result = self.execute_messages(messages=messages)\n next_speaker_name = result.strip()\n\n if next_speaker_name == \"TERMINATE\":\n if self.spinner is not None:\n if chat.name is None:\n self.spinner.stop_and_persist(\n symbol=\"👥\", text=\"The Chat Conductor has decided to terminate the chat.\"\n )\n else:\n self.spinner.stop_and_persist(\n symbol=\"👥\", text=f\"The Chat Conductor ({chat.name}) has decided to terminate the \" f\"chat.\"\n )\n\n return None\n\n next_speaker = chat.get_active_participant_by_name(next_speaker_name)\n if next_speaker is None:\n raise ChatParticipantNotJoinedToChatError(next_speaker_name)\n\n if self.spinner is not None:\n if chat.name is None:\n self.spinner.succeed(\n text=f'The Chat Conductor has selected \"{str(next_speaker)}\" ' f\"as the next speaker.\"\n )\n else:\n self.spinner.succeed(\n text=f'The Chat Conductor ({chat.name}) has selected \"{str(next_speaker)}\" ' f\"as the next speaker.\"\n )\n\n return next_speaker\n\n def execute_messages(self, messages: Sequence[BaseMessage]) -> str:\n return execute_chat_model_messages(\n messages=messages,\n chat_model=self.chat_model,\n tools=self.tools,\n spinner=self.spinner,\n chat_model_args=self.chat_model_args,\n )\n\n def get_relevant_docs(self, messages: Sequence[ChatMessage]) -> List[Document]:\n if self.retriever is None:\n return []\n\n return self.retriever.get_relevant_documents(query=messages[-1].content)"
},
{
"identifier": "RoundRobinChatConductor",
"path": "chatflock/conductors/round_robin.py",
"snippet": "class RoundRobinChatConductor(ChatConductor):\n def select_next_speaker(self, chat: Chat) -> Optional[ActiveChatParticipant]:\n active_participants = chat.get_active_participants()\n if len(active_participants) <= 0:\n return None\n\n messages = chat.get_messages()\n last_message = messages[-1] if len(messages) > 0 else None\n\n if last_message is not None and self.is_termination_message(last_message):\n return None\n\n last_speaker = last_message.sender_name if last_message is not None else None\n if last_speaker is None:\n return next(iter(active_participants))\n\n # Rotate to the next participant in the list.\n participant_names = [participant.name for participant in active_participants]\n\n if last_speaker not in participant_names:\n next_speaker_name = participant_names[0]\n else:\n last_speaker_index = participant_names.index(last_speaker)\n next_speaker_index = (last_speaker_index + 1) % len(participant_names)\n next_speaker_name = participant_names[next_speaker_index]\n\n next_speaker = chat.get_active_participant_by_name(next_speaker_name)\n if next_speaker is None or not isinstance(next_speaker, ActiveChatParticipant):\n raise ChatParticipantNotJoinedToChatError(next_speaker_name)\n\n return next_speaker\n\n def get_chat_result(self, chat: \"Chat\") -> str:\n result = super().get_chat_result(chat=chat)\n\n try:\n idx = result.rindex(\"TERMINATE\")\n result = result[:idx].strip()\n except ValueError:\n result = result.strip()\n\n return result\n\n def is_termination_message(self, message: ChatMessage) -> bool:\n return message.content.strip().endswith(\"TERMINATE\")"
},
{
"identifier": "GroupBasedChatParticipant",
"path": "chatflock/participants/group.py",
"snippet": "class GroupBasedChatParticipant(ActiveChatParticipant):\n inner_chat_conductor: ChatConductor\n inner_chat: Chat\n mission: str\n spinner: Optional[Halo] = None\n clear_inner_chat_before_responding: bool = False\n\n def __init__(\n self,\n group_name: str,\n chat: Chat,\n mission: str,\n chat_conductor: ChatConductor,\n clear_inner_chat_before_responding: bool = False,\n spinner: Optional[Halo] = None,\n **kwargs: Any,\n ) -> None:\n self.inner_chat = chat\n self.inner_chat_conductor = chat_conductor\n self.clear_inner_chat_before_responding = clear_inner_chat_before_responding\n self.mission = mission\n self.spinner = spinner\n\n # Make sure the inner chat is aligned\n self.inner_chat.name = group_name\n\n super().__init__(name=group_name, **kwargs)\n\n # Make sure the chat & conductor are initialized, as it may be a dynamic chat with\n # no participants yet.\n self.inner_chat_conductor.prepare_chat(chat=self.inner_chat)\n\n def respond_to_chat(self, chat: \"Chat\") -> str:\n if self.clear_inner_chat_before_responding:\n self.inner_chat.clear_messages()\n\n prev_spinner_text = None\n if self.spinner is not None:\n prev_spinner_text = self.spinner.text\n self.spinner.stop_and_persist(symbol=\"👥\", text=f\"{self.name} started a discussion.\")\n self.spinner.start(text=f\"{self.name} is discussing...\")\n\n messages = chat.get_messages()\n conversation_str = \"\\n\".join([f\"- {message.sender_name}: {message.content}\" for message in messages])\n\n leader = self.inner_chat.get_active_participants()[0]\n request_for_group, _ = get_response(\n query=\"Please translate the request for yourself in the external conversation into a collaboration \"\n \"request for your internal group. This is the external conversation:\"\n f\"\\n```{conversation_str}```\\n\\nThe group should understand exactly what to discuss, what to \"\n \"decide on, and how to respond back based on this. \",\n answerer=leader,\n )\n group_response = self.inner_chat_conductor.initiate_dialog(\n chat=self.inner_chat, initial_message=request_for_group\n )\n\n if self.spinner is not None:\n self.spinner.succeed(text=f\"{self.name} concluded their discussion.\")\n if prev_spinner_text is not None:\n self.spinner.start(text=prev_spinner_text)\n messages = self.inner_chat.get_messages()\n group_response_conversation_str = \"\\n\".join(\n [f\"- {message.sender_name}: {message.content}\" for message in messages]\n )\n leader_response_back, _ = get_response(\n query=str(\n StructuredString(\n sections=[\n Section(name=\"External Conversation\", text=conversation_str),\n Section(name=\"Internal Group Conversation\", text=group_response_conversation_str),\n Section(\n name=\"Task\",\n text=\"You are a part of the EXTERNAL CONVERSATION and need to respond back. \"\n \"You and your group have collaborated on a response back for the \"\n \"EXTERNAL CONVERSATION. Please transform the INTERNAL GROUP CONVERSATION into \"\n \"a proper, in-context response back (in your name) for the EXTERNAL CONVERSATION; \"\n \"it should be mainly based on the conclusion of the internal conversation. \"\n \"Your response will be sent to the EXTERNAL CONVERSATION verbatim.\",\n ),\n ]\n )\n ),\n answerer=leader,\n )\n\n return leader_response_back\n\n def __str__(self) -> str:\n active_participants = self.inner_chat.get_active_participants()\n\n if len(active_participants) > 0:\n names = [str(p) for p in active_participants]\n return f'{self.name} (Includes: {\", \".join(names)})'\n\n return self.name\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n participants = self.inner_chat.get_active_participants()\n members_str = \"\\n\\n\".join([p.detailed_str(level=level + 1) for p in participants])\n\n return (\n f'{prefix}- Name: {self.name}\\n{prefix} Symbol: {self.symbol}\\n{prefix} Mission: \"{self.mission}\"'\n f\"\\n{members_str}\"\n )"
},
{
"identifier": "LangChainBasedAIChatParticipant",
"path": "chatflock/participants/langchain.py",
"snippet": "class LangChainBasedAIChatParticipant(ActiveChatParticipant):\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(\n self,\n name: str,\n chat_model: BaseChatModel,\n symbol: str = \"🤖\",\n role: str = \"AI Assistant\",\n personal_mission: str = \"Be a helpful AI assistant.\",\n other_prompt_sections: Optional[List[Section]] = None,\n retriever: Optional[BaseRetriever] = None,\n tools: Optional[List[BaseTool]] = None,\n chat_model_args: Optional[Dict[str, Any]] = None,\n spinner: Optional[Halo] = None,\n ignore_group_chat_environment: bool = False,\n include_timestamp_in_messages: bool = False,\n **kwargs: Any,\n ):\n super().__init__(name=name, symbol=symbol, **kwargs)\n\n self.role = role\n self.chat_model = chat_model\n self.chat_model_args = chat_model_args or {}\n self.other_prompt_sections = other_prompt_sections or []\n self.ignore_group_chat_environment = ignore_group_chat_environment\n self.include_timestamp_in_messages = include_timestamp_in_messages\n self.retriever = retriever\n self.tools = tools\n self.spinner = spinner\n self.personal_mission = personal_mission\n\n def create_system_message(self, chat: \"Chat\", relevant_docs: Sequence[Document]) -> str:\n now = datetime.now()\n pretty_datetime = now.strftime(\"%m-%d-%Y %H:%M:%S\")\n\n base_sections = [\n Section(name=\"Current Time\", text=pretty_datetime),\n Section(name=\"Name\", text=self.name),\n Section(name=\"Role\", text=self.role),\n Section(name=\"Personal Mission\", text=self.personal_mission),\n Section(\n name=\"Additional Context for Response\",\n text=\"None\"\n if len(relevant_docs) == 0\n else \"The following documents may be relevant for your response, only use \"\n \"them for context for a better response, if applicable\",\n sub_sections=[\n Section(name=f\"Document {i + 1}\", text=f\"```{doc.page_content}```\")\n for i, doc in enumerate(relevant_docs)\n ],\n ),\n Section(\n name=\"Response Message Format\",\n list=[\n \"Your response should be the message you want to send to the group chat as your own name, \"\n \"role, and personal mission.\",\n \"Must not include any prefix (e.g., timestamp, sender name, etc.).\",\n \"Response must be a message as will be shown in the chat (timestamp and sender name are \"\n \"system-generated for you).\",\n ],\n sub_sections=[\n Section(name=\"Well-Formatted Chat Response Examples\", list=['\"Hello, how are you?\"']),\n Section(\n name=\"Badly-Formatted Chat Response Examples\",\n list=[\n (\n '\"[TIMESTAMP] John: Hello, how are you?\"'\n if self.include_timestamp_in_messages\n else '\"John: Hello, how are you?\"'\n ),\n ],\n ),\n ],\n ),\n ]\n\n active_participants = chat.get_active_participants()\n if self.ignore_group_chat_environment:\n system_message = StructuredString(sections=[*base_sections, *self.other_prompt_sections])\n else:\n system_message = StructuredString(\n sections=[\n *base_sections,\n Section(\n name=\"Chat\",\n sub_sections=[\n Section(name=\"Name\", text=chat.name or \"No name provided. Just a general chat.\"),\n Section(\n name=\"Participants\",\n text=\"\\n\".join(\n [\n f'- {str(p)}{\" -> This is you.\" if p.name == self.name else \"\"}'\n for p in active_participants\n ]\n ),\n ),\n Section(\n name=\"Guidelines\",\n list=[\n \"Your personal mission is the most important thing to you. You should always \"\n \"prioritize it.\",\n \"If a chat goal is provided, you should still follow your personal mission but \"\n \"in a way that helps the group achieve the chat goal.\",\n \"If you are the only participant in the chat, you should act as if the chat is now \"\n \"a scratch pad for you to write down your thoughts, ideas, and work on your \"\n \"mission by yourself. \"\n \"In the messages do not refer to another entity, but rather to yourself \"\n \"(I instead of You); the messages should read and sound like \"\n \"your internal thoughts and should be succinct, unless they are concrete work \"\n \"(for example, implementing something, calculating things, etc.). \"\n \"You have all the time in the world to build your thoughts, ideas, and do the \"\n \"work needed. The chat is now your place to think and iterate on your mission and \"\n \" achieve it.\",\n ],\n ),\n Section(\n name=\"Rules\",\n list=[\n \"You do not have to respond directly to the one who sent you a message. You can respond \"\n \"to anyone in the group chat.\",\n \"You cannot have private conversations with other participants. Everyone can see all \"\n \"messages sent by all other participants.\",\n ],\n ),\n Section(\n name=\"Previous Chat Messages\",\n list=[\n \"Messages are prefixed by a timestamp and the sender's name (could also be everyone). \",\n \"The prefix is for context only; it's not actually part of the message they sent. \",\n (\n 'Example: \"[TIMESTAMP] John: Hello, how are you?\"'\n if self.include_timestamp_in_messages\n else 'Example: \"John: Hello, how are you?\"'\n ),\n \"Some messages could have been sent by participants who are no longer a part of this \"\n \"conversation. Use their contents for context only; do not talk to them.\",\n \"In your response only include the message without the prefix.\",\n \"If you are the only participant in the chat, the previous chat messages are your \"\n \" memories or internal thoughts instead.\",\n ],\n ),\n ],\n ),\n *self.other_prompt_sections,\n ]\n )\n\n return str(system_message)\n\n def chat_messages_to_chat_model_messages(\n self, chat_messages: Sequence[ChatMessage], active_participants: Sequence[ActiveChatParticipant]\n ) -> List[BaseMessage]:\n messages: List[BaseMessage] = []\n for i, message in enumerate(chat_messages):\n if self.include_timestamp_in_messages:\n pretty_datetime = message.timestamp.strftime(\"%m-%d-%Y %H:%M:%S\")\n content = f\"[{pretty_datetime}] \"\n else:\n content = \"\"\n\n if self.ignore_group_chat_environment:\n content += f\"{message.sender_name}: {message.content}\"\n else:\n content += message.content\n\n if message.sender_name == self.name:\n if len(active_participants) > 1 or i == len(active_participants) - 1:\n messages.append(AIMessage(content=content))\n else:\n messages.append(HumanMessage(content=content))\n else:\n messages.append(HumanMessage(content=content))\n\n if len(messages) == 0:\n messages.append(HumanMessage(content=f\"SYSTEM: The chat has started.\"))\n\n return messages\n\n def respond_to_chat(self, chat: Chat) -> str:\n if self.spinner is not None:\n self.spinner.start(text=f\"{str(self)} is thinking...\")\n\n chat_messages = chat.get_messages()\n\n if self.retriever is not None and len(chat_messages) > 0:\n relevant_docs = self.get_relevant_docs(messages=chat_messages)\n else:\n relevant_docs = []\n\n system_message = self.create_system_message(chat=chat, relevant_docs=relevant_docs)\n\n active_participants = chat.get_active_participants()\n all_messages = self.chat_messages_to_chat_model_messages(chat_messages, active_participants)\n all_messages = [SystemMessage(content=system_message), *all_messages]\n\n message_content = self.execute_messages(messages=all_messages)\n\n if self.spinner is not None:\n self.spinner.stop()\n\n potential_prefix = f\"{self.name}:\"\n if message_content.startswith(potential_prefix):\n message_content = message_content[len(potential_prefix) :].strip()\n\n return message_content\n\n def get_relevant_docs(self, messages: Sequence[ChatMessage]) -> List[Document]:\n if self.retriever is None:\n return []\n\n return self.retriever.get_relevant_documents(query=messages[-1].content)\n\n def execute_messages(self, messages: Sequence[BaseMessage]) -> str:\n return execute_chat_model_messages(\n messages=messages,\n chat_model=self.chat_model,\n tools=self.tools,\n spinner=self.spinner,\n chat_model_args=self.chat_model_args,\n )\n\n def __str__(self) -> str:\n return f\"{self.symbol} {self.name} ({self.role})\"\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n\n tool_names = \", \".join([tool.name for tool in self.tools or []])\n if tool_names == \"\":\n tool_names = \"None\"\n\n return (\n f\"{prefix}- Name: {self.name}\\n{prefix} Role: {self.role}\\n{prefix} Symbol: {self.symbol}\\n\"\n f'{prefix} Personal Mission: \"{self.personal_mission}\"\\n{prefix} Tools: {tool_names}'\n )"
},
{
"identifier": "UserChatParticipant",
"path": "chatflock/participants/user.py",
"snippet": "class UserChatParticipant(ActiveChatParticipant):\n def __init__(self, name: str = \"User\", role: str = \"User\", symbol: str = \"👤\", **kwargs: Any):\n super().__init__(name, messages_hidden=True, **kwargs)\n\n self.role = role\n self.symbol = symbol\n\n def respond_to_chat(self, chat: Chat) -> str:\n return input(f\"{self.symbol} ({self.name}): \")\n\n def __str__(self) -> str:\n return f\"{self.symbol} {self.name} ({self.role})\"\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n return f\"{prefix}- Name: {self.name}\\n{prefix} Role: {self.role}\\n{prefix} Symbol: {self.symbol}\""
},
{
"identifier": "TerminalChatRenderer",
"path": "chatflock/renderers/terminal.py",
"snippet": "class TerminalChatRenderer(ChatRenderer):\n def __init__(self, print_timestamps: bool = False):\n self.print_timestamps = print_timestamps\n\n def render_new_chat_message(self, chat: Chat, message: ChatMessage) -> None:\n if chat.hide_messages:\n return\n\n pretty_timestamp_with_date = message.timestamp.strftime(\"%m-%d-%Y %H:%M:%S\")\n\n sender = chat.get_active_participant_by_name(message.sender_name)\n if sender is None:\n symbol = \"❓\"\n\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {symbol} {message.sender_name}: {message.content}\")\n else:\n print(f\"{symbol} {message.sender_name}: {message.content}\")\n else:\n if sender.messages_hidden:\n return\n\n if chat.name is None:\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {str(sender)}: {message.content}\")\n else:\n print(f\"{str(sender)}: {message.content}\")\n else:\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {chat.name} > {str(sender)}: {message.content}\")\n else:\n print(f\"{chat.name} > {str(sender)}: {message.content}\")"
},
{
"identifier": "create_chat_model",
"path": "examples/common.py",
"snippet": "def create_chat_model(\n model: str = \"gpt-4-1106-preview\",\n temperature: float = 0.0,\n cache_db_file_path: Optional[str] = \"output/llm_cache.db\",\n) -> BaseChatModel:\n if cache_db_file_path is not None:\n Path(cache_db_file_path).parent.mkdir(parents=True, exist_ok=True)\n\n set_llm_cache(SQLiteCache(database_path=cache_db_file_path))\n\n chat_model = ChatOpenAI(temperature=temperature, model=model)\n\n return chat_model"
}
] | import typer
from dotenv import load_dotenv
from halo import Halo
from chatflock.backing_stores import InMemoryChatDataBackingStore
from chatflock.base import Chat
from chatflock.conductors import LangChainBasedAIChatConductor, RoundRobinChatConductor
from chatflock.participants.group import GroupBasedChatParticipant
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.participants.user import UserChatParticipant
from chatflock.renderers import TerminalChatRenderer
from examples.common import create_chat_model | 8,468 |
def manual_hierarchical_participant(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
spinner = Halo(spinner="dots")
comedy_team = GroupBasedChatParticipant(
group_name="Comedy Team",
mission="Collaborate on funny humour-filled responses based on the original request for the user",
chat=Chat(
backing_store=InMemoryChatDataBackingStore(),
renderer=TerminalChatRenderer(),
initial_participants=[
|
def manual_hierarchical_participant(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
spinner = Halo(spinner="dots")
comedy_team = GroupBasedChatParticipant(
group_name="Comedy Team",
mission="Collaborate on funny humour-filled responses based on the original request for the user",
chat=Chat(
backing_store=InMemoryChatDataBackingStore(),
renderer=TerminalChatRenderer(),
initial_participants=[ | LangChainBasedAIChatParticipant( | 5 | 2023-11-12 11:10:58+00:00 | 12k |
CryptoFuzzPy/cryptofuzz | cryptofuzz/Wallet.py | [
{
"identifier": "Convertor",
"path": "cryptofuzz/utils.py",
"snippet": "class Convertor:\n def __init__(self):\n super().__init__()\n self.gen = Generator()\n \n def double_sha256(self, data):\n return hashlib.sha256(hashlib.sha256(data).digest()).digest()\n \n def mne_to_seed(self, mnemonic, password=\"\"):\n salt = (\"mnemonic\" + password).encode('utf-8')\n seed = hashlib.pbkdf2_hmac('sha512', mnemonic.encode('utf-8'), salt, 2048)\n return seed[:32]\n\n def mne_to_bytes(self, mnemonic):\n return self.mne_to_seed(mnemonic)\n\n def mne_to_hex(self, mnemonic):\n seed = self.mne_to_seed(mnemonic)\n return self.bytes_to_hex(seed)\n\n def mne_to_wif(self, mnemonic, compress: bool = False):\n seed = self.mne_to_seed(mnemonic)\n return self.bytes_to_wif(seed, compress)\n\n def mne_to_int(self, mnemonic):\n seed = self.mne_to_seed(mnemonic)\n return self.bytes_to_int(seed)\n\n def mne_to_xpub(self, mnemonic):\n seed = self.mne_to_seed(mnemonic)\n return self.bytes_to_xpub(seed)\n\n def mne_to_xprv(self, mnemonic):\n seed = self.mne_to_seed(mnemonic)\n return self.bytes_to_xprv(seed)\n\n def mne_to_addr(self, mnemonic, compress: bool = False):\n seed = self.mne_to_seed(mnemonic)\n return self.bytes_to_addr(seed, compress)\n\n def mne_to_binary(self, mnemonic):\n seed = self.mne_to_seed(mnemonic)\n return self.bytes_to_binary(seed)\n\n def bytes_to_mne(self, seed):\n return Mnemonic().to_mnemonic(seed)\n\n def bytes_to_seed(self, seed):\n return hashlib.pbkdf2_hmac('sha512', seed, b'mnemonic', 2048)\n\n def bytes_to_hex(self, seed):\n return binascii.hexlify(self.bytes_to_seed(seed)).decode('utf-8')\n\n def unHexlify(self, h: str):\n return binascii.unhexlify(h)\n \n def hex_to_bytes(self, hexed):\n return binascii.unhexlify(hexed)\n\n def hex_to_mne(self, hexed: str) -> str:\n seed = self.hex_to_bytes(hexed)\n return self.bytes_to_mne(seed)\n\n def hex_to_wif(self, hexed, compress: bool = False) -> str:\n return self.bytes_to_wif(self.hex_to_bytes(hexed), compress)\n\n def hex_to_xprv(self, hexed: str) -> str:\n return self.bytes_to_xprv(self.hex_to_bytes(hexed))\n\n def hex_to_xpub(self, hexed: str) -> str:\n return self.bytes_to_xpub(self.hex_to_bytes(hexed))\n \n def hex_to_int(self, hexed: str) -> int:\n return int(hexed, 16)\n \n def hex_to_pub(self, hexed: str, compress: bool = False) -> bytes:\n if compress:\n return self.bytes_to_public(self.hex_to_bytes(hexed), True)\n else:\n return self.bytes_to_public(self.hex_to_bytes(hexed), False)\n \n def hex_to_addr(self, hexed: str, compress: bool = False) -> str:\n seed = self.hex_to_bytes(hexed)\n if compress:\n return self.bytes_to_addr(seed, True)\n else:\n return self.bytes_to_addr(seed, False)\n\n def hex_to_binary(self, hexed: str) -> str:\n return self.bytes_to_binary(self.hex_to_bytes(hexed))\n \n def bytes_to_hex(self, seed):\n privatekey_int = int.from_bytes(hashlib.sha256(seed).digest(), byteorder='big')\n self.gen.checkValid(privatekey_int)\n pvkByte = privatekey_int.to_bytes(32, byteorder='big')\n return pvkByte.hex()\n \n def bytes_to_int(self, seed) -> int:\n return int.from_bytes(seed, byteorder='big')\n \n def bytes_to_pub(self, seed_bytes: bytes) -> bytes:\n sk = ecdsa.SigningKey.from_string(seed_bytes[:32], curve=ecdsa.SECP256k1)\n vk = sk.get_verifying_key()\n pub = COMPRESSED_PREFIX2 + vk.to_string()[-32:] if vk.to_string()[-1] % 2 == 0 else b'\\x03' + vk.to_string()[-32:]\n return pub\n \n def bytes_to_public(self, seed: bytes, compress: bool = True) -> bytes:\n sk = ecdsa.SigningKey.from_string(seed, curve=ecdsa.SECP256k1)\n vk = sk.get_verifying_key()\n if compress:\n prefix = COMPRESSED_PREFIX2 if vk.pubkey.point.y() % 2 == 0 else COMPRESSED_PREFIX\n return prefix + vk.to_string()[:32]\n else:\n return UNCOMPRESSED_PREFIX + vk.to_string()\n \n def bytes_to_xpub(self, seed: bytes, chain_code=None) -> str:\n if chain_code is None:\n chain_code = os.urandom(32) # .hex\n prefix = self.unHexlify(XPUB_PREFIX)\n FINGERPRINT = ZERO_BYTES + ZERO_BYTES\n pub = self.bytes_to_pub(seed)\n xpub = prefix + MAIN_DIGEST + FINGERPRINT + chain_code + pub\n Hash64 = self.double_sha256(xpub)\n xpub += Hash64[:4]\n xpubBase58 = b58encode(xpub)\n return xpubBase58.decode('utf-8')\n \n def bytes_to_mne(self, byte: bytes):\n seed = byte[:32]\n return Mnemonic(\"english\").to_mnemonic(seed)\n \n def bytes_to_binary(self, bytes_: bytes) -> str:\n if len(bytes_) != 32:\n raise ValueError(\"Input bytes should have a length of 32.\")\n \n # Convert each byte to its binary representation and pad with zeros\n return ''.join(format(byte, '08b') for byte in bytes_)\n \n def bytes_to_wif(self, private_key, compress=True):\n if compress:\n EXTENDED_KEY = MAIN_PREFIX + private_key + MAIN_SUFFIX\n else:\n EXTENDED_KEY = MAIN_PREFIX + private_key\n \n DOUBLE_SHA256 = self.double_sha256(EXTENDED_KEY)\n CHECKSUM = DOUBLE_SHA256[:4]\n \n WIF = b58encode(EXTENDED_KEY + CHECKSUM)\n \n return WIF.decode('utf-8')\n \n def bytes_to_xprv(self, bytes_code: bytes) -> str:\n chain_code = bytes.fromhex(ZERO_BASE_NET)\n child_number = struct.pack('>L', 0)\n key = MAIN_DIGEST_RMD160 + bytes_code # 0x00\n \n xprv_main = VERSION_NETWORK + MAIN_DIGEST_RMD160 + FINGERPRINT_RMD160 + child_number + chain_code + key\n decode_main = base58encodeCheck(b\"\", xprv_main)\n return decode_main\n \n def bytes_to_addr(self, seedBytes: bytes, compress: bool = False) -> str:\n if len(seedBytes) != 32:\n seedBytes = seedBytes[:32]\n elif compress:\n pub = self.bytes_to_public(seedBytes, compress=True)\n return self.pub_to_addr(public_key=pub)\n else:\n pub = self.bytes_to_public(seedBytes, compress=False)\n return self.pub_to_addr(public_key=pub)\n \n # ------------------------------------------------------------\n def pass_to_hex(self, passphrase):\n return hashlib.sha256(passphrase.encode()).hexdigest()\n \n def pass_to_bytes(self, passphrase: str) -> bytes:\n return bytes.fromhex(self.pass_to_hex(passphrase))\n \n def pass_to_addr(self, passphrase, compress=False):\n passBytes = self.pass_to_bytes(passphrase)\n sk = ecdsa.SigningKey.from_string(passBytes, curve=ecdsa.SECP256k1)\n vk = sk.verifying_key\n if compress:\n if vk.pubkey.point.y() & 1:\n pub_key = COMPRESSED_PREFIX + vk.to_string()[:32]\n else:\n pub_key = COMPRESSED_PREFIX2 + vk.to_string()[:32]\n else:\n pub_key = UNCOMPRESSED_PREFIX + vk.to_string()\n sha = hashlib.sha256(pub_key).digest()\n ripemd160 = hashlib.new('ripemd160')\n ripemd160.update(sha)\n \n address = base58_check_encode(ripemd160.digest())\n return \"1\" + address\n \n def pass_to_wif(self, passphrase, compress=False):\n passBytes = self.pass_to_bytes(passphrase)\n return self.bytes_to_wif(passBytes, compress)\n \n def pass_to_xprv(self, passphrase):\n return self.bytes_to_xprv(self.pass_to_bytes(passphrase))\n \n # ------------------------------------------------------------\n \n def pub_to_bytes(self, pubkey, compress=True):\n if compress:\n prefix = (COMPRESSED_PREFIX if pubkey.pubkey.point.y() & 1 else COMPRESSED_PREFIX2)\n return prefix + pubkey.pubkey.point.x().to_bytes(32, 'big')\n else:\n point_x = pubkey.pubkey.point.x().to_bytes(32, 'big')\n point_y = pubkey.pubkey.point.y().to_bytes(32, 'big')\n return UNCOMPRESSED_PREFIX + point_x + point_y\n \n def pub_to_hex(self, pubkey, compress=True):\n return self.pub_to_bytes(pubkey, compress).hex()\n \n def pub_to_addr(self, public_key: bytes) -> str:\n ripemd160 = hashlib.new('ripemd160')\n ripemd160.update(hashlib.sha256(public_key).digest())\n hashed = MAIN_DIGEST_RMD160 + ripemd160.digest()\n checksum = hashlib.sha256(hashlib.sha256(hashed).digest()).digest()[:4]\n address = hashed + checksum\n return b58encode(address).decode('utf-8')\n \n # ------------------------------------------------------------\n \n def wif_to_bytes(self, wif):\n wif_bytes = b58decode(wif)\n isCompress = wif_bytes[-5] == 0x01 if len(wif_bytes) == 38 else False\n return wif_bytes[1:-5] if isCompress else wif_bytes[1:-4]\n\n def wif_to_binary(self, wif: str) -> str:\n pvkBytes = self.wif_to_bytes(wif)\n return self.bytes_to_binary(pvkBytes)\n def wif_to_addr(self, wif: str, compress: bool = False) -> str:\n pvkBytes = self.wif_to_bytes(wif)\n public_key = self.bytes_to_public(pvkBytes, compress)\n address = self.pub_to_addr(public_key)\n return address\n\n def wif_to_int(self, wif): return self.bytes_to_int(self.wif_to_bytes(wif))\n\n def wif_to_hex(self, wif): return self.wif_to_bytes(wif).hex()\n\n def wif_to_mne(self, wif): return self.bytes_to_mne(self.wif_to_bytes(wif))\n\n def wif_to_xprv(self, wif): return self.bytes_to_xprv(self.wif_to_bytes(wif))\n\n def wif_to_xpub(self, wif): return self.bytes_to_xpub(self.wif_to_bytes(wif))\n\n def wif_to_pub(self, wif): return self.bytes_to_public(self.wif_to_bytes(wif)).hex()\n # ------------------------------------------------------------\n \n def xprv_to_bytes(self, xprv: str):\n if not xprv.startswith(\"xprv\") or len(xprv) <= 4:\n raise ValueError(\"Invalid xprv format.\")\n xprv58 = xprv[4:]\n xprvBytes = base58decode(xprv58)\n return xprvBytes[:32]\n \n def xprv_to_addr(self, xprv, compress: bool = False):\n seed = self.xprv_to_bytes(xprv)\n if compress:\n pub = self.bytes_to_public(seed, True)\n return self.pub_to_addr(pub)\n else:\n pub = self.bytes_to_public(seed, False)\n return self.pub_to_addr(pub)\n \n def xprv_to_pub(self, xprv, compress: bool = False):\n seed = self.xprv_to_bytes(xprv)\n if compress:\n return self.bytes_to_public(seed, True)\n else:\n return self.bytes_to_public(seed, False)\n \n def xprv_to_wif(self, xprv, compress: bool = False):\n seed = self.xprv_to_bytes(xprv)\n if compress:\n return self.bytes_to_wif(seed, True)\n else:\n return self.bytes_to_wif(seed, False)\n \n def xprv_to_mne(self, xprv):\n seed = self.xprv_to_bytes(xprv)\n return self.bytes_to_mne(seed)\n \n # ------------------------------------------------------------\n \n def binary_to_bytes(self, bin_str: str) -> bytes:\n if len(bin_str) != 256:\n raise ValueError(\"The binary string must have 256 characters.\")\n chunks = [bin_str[i:i + 8] for i in range(0, len(bin_str), 8)]\n return bytes([int(chunk, 2) for chunk in chunks])\n \n def int_to_bytes(self, int_dec: int) -> bytes:\n return int_dec.to_bytes(32, 'big')\n \n def int_to_hex(self, int_dec: int) -> str:\n return \"%064x\" % int_dec\n\n def int_to_mnemonic(self, int_dec: int) -> str:\n return self.bytes_to_mne(self.int_to_bytes(int_dec))\n\n def int_to_wif(self, int_dec: int, compress: bool = False) -> str:\n return self.bytes_to_wif(self.int_to_bytes(int_dec), compress)\n\n def int_to_xprv(self, int_dec: int) -> str:\n return self.bytes_to_xprv(self.int_to_bytes(int_dec))\n\n def int_to_xpub(self, int_dec: int) -> bytes:\n \"\"\"\n Convert int decimal to public key (``bytes``).\n\n :param int_dec:\n :type int_dec: int\n :return:\n :rtype: bytes\n\n \"\"\"\n return self.bytes_to_xpub(self.int_to_bytes(int_dec))\n\n def int_to_addr(self, int_dec: int, compress: bool = False) -> str:\n \"\"\"\n Convert int decimal to compress & uncompress address (``str``).\n\n :param int_dec:\n :type int_dec: int\n :param compress:\n :type compress: bool\n :return:\n :rtype: str\n \"\"\"\n return self.bytes_to_addr(self.int_to_bytes(int_dec), compress)\n\n def int_to_binary(self, int_dec: int) -> str:\n return self.bytes_to_binary(self.int_to_bytes(int_dec))\n # ------------------------------------------------------------"
},
{
"identifier": "Generator",
"path": "cryptofuzz/utils.py",
"snippet": "class Generator:\n def __init__(self):\n super().__init__()\n \n def checkValid(self, key: int) -> bool:\n if 0 < key < MAX_PRIVATE_KEY:\n return True\n else:\n raise ValueError(f\"Secret Scalar Must be greater than 0 and less than {MAX_PRIVATE_KEY}.\")\n \n def generate_private_key(self) -> str:\n randkey = \"\".join(random.choice(\"0123456789abcdef\") for _ in range(64))\n if self.checkValid(int(randkey, 16)):\n return randkey\n else:\n return self.generate_private_key()\n \n def generate_xprv(self):\n return \"xprv\" + binascii.hexlify(os.urandom(32)).decode('utf-8')\n \n def generate_decimal(self) -> int: return random.randint(0, MAX_PRIVATE_KEY)\n def generate_binary(self) -> str:\n return \"\".join(random.choice(\"01\") for _ in range(256))\n \n def generate_entropy(self, entropy_bits=256):\n entropy = os.urandom(entropy_bits // 8)\n checksum = hashlib.sha256(entropy).digest()[0]\n entropy_with_checksum = entropy + bytes([checksum])\n return entropy_with_checksum\n \n def generate_mnemonic(self, size: int) -> str:\n characters = re.findall('[A-Z][a-z]+', BIP39)\n return \" \".join(random.choices(characters, k=size)).lower()"
},
{
"identifier": "Axe",
"path": "cryptofuzz/hd.py",
"snippet": "class Axe:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n \"\"\"\n Convert Private key Hex To All Axe Format Type Addresses.\n :param hexed:\n :type hexed:\n :rtype str:\n :return: Str - address\n\n -------------------------------------------------------------\n\n >>> Axe_ = Axe()\n >>> privatekey = \"e3b0c44298fc1c149...................\"\n >>> Axe_address = Axe_.hex_addr(privatekey)\n\n -------------------------------------------------------------\n \n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(AXE)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Bitcoin",
"path": "cryptofuzz/hd.py",
"snippet": "class Bitcoin:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str, Type: str = 'p2pkh') -> str:\n \"\"\"\n Convert Private key Hex To All Bitcoin Format Type Addresses, Type: `p2pkh`, `p2sh`, `p2wpkh`, `p2wsh`, `p2wpkh_p2sh`, `p2wsh_p2sh`.\n :param hexed:\n :param Type:\n :rtype str:\n :return address:\n\n\n -----------------------------------------------------------------------------------------------\n\n >>> btc = Bitcoin()\n >>> privatekey = \"0A97965.........0102F6A45517\" # example Private Key\n >>> p2pkh_addr = btc.hex_addr(privatekey, Type='p2pkh')\n >>> p2sh_addr = btc.hex_addr(privatekey, Type='p2sh')\n >>> p2wpkh_addr = btc.hex_addr(privatekey, Type='p2wpkh')\n >>> p2wsh_addr = btc.hex_addr(privatekey, Type='p2wsh')\n >>> p2wpkh_p2sh_addr = btc.hex_addr(privatekey, Type='p2wpkh_p2sh')\n >>> p2wsh_p2sh_addr = btc.hex_addr(privatekey, Type='p2wsh_p2sh')\n\n ---------------------------------------------------------------------------------------------\n\n\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(BTC)\n hd.from_private_key(hexed)\n if Type == 'p2pkh':\n return hd.p2pkh_address()\n elif Type == 'p2sh':\n return hd.p2sh_address()\n elif Type == 'p2wpkh':\n return hd.p2wpkh_address()\n elif Type == 'p2wsh':\n return hd.p2wsh_address()\n elif Type == 'p2wpkh_p2sh':\n return hd.p2wpkh_in_p2sh_address()\n elif Type == 'p2wsh_p2sh':\n return hd.p2wsh_in_p2sh_address()\n else:\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "BitcoinGold",
"path": "cryptofuzz/hd.py",
"snippet": "class BitcoinGold:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str, Type: str = \"p2pkh\") -> str:\n \"\"\"\n\n Convert Private key Hex To All BitcoinGold Format Type Address , Type: `p2pkh`, `p2sh`, `p2wpkh`, `p2wsh`, `p2wpkh_p2sh`, `p2wsh_p2sh`.\n\n :param hexed:\n :type hexed: Str.\n :param Type:\n :type Type: Str.\n :rtype: Str.\n :return address:\n\n\n --------------------------------------------------------------\n\n >>> btg = BitcoinGold()\n >>> privatekey = \"0A9796542F1030931E317...............960DC79C48D20102F6A45517\"\n >>> p2pkh_address = btg.hex_addr(privatekey, \"p2pkh\")\n >>> p2sh_address = btg.hex_addr(privatekey, \"p2sh\")\n >>> p2wpkh_address = btg.hex_addr(privatekey, \"p2wpkh\")\n >>> p2wsh_address = btg.hex_addr(privatekey, \"p2wsh\")\n >>> p2wpkh_in_p2sh_address = btg.hex_addr(privatekey, \"p2wpkh_p2sh\")\n >>> p2wsh_in_p2sh_address = btg.hex_addr(privatekey, \"p2wsh_p2sh\")\n\n --------------------------------------------------------------\n\n\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(BTG)\n hd.from_private_key(hexed)\n if Type == \"p2pkh\":\n return hd.p2pkh_address()\n elif Type == \"p2sh\":\n return hd.p2sh_address()\n elif Type == \"p2wpkh\":\n return hd.p2wpkh_address()\n elif Type == \"p2wsh\":\n return hd.p2wsh_address()\n elif Type == \"p2wpkh_p2sh\":\n return hd.p2wpkh_in_p2sh_address()\n elif Type == \"p2wsh_p2sh\":\n return hd.p2wsh_in_p2sh_address()\n else:\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Dash",
"path": "cryptofuzz/hd.py",
"snippet": "class Dash:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n \"\"\"\n Convert Private key Hex To All Dash Address .\n :param hexed:\n :rtype str:\n :return: Str - address\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(DASH)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "DigiByte",
"path": "cryptofuzz/hd.py",
"snippet": "class DigiByte:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n \"\"\"\n Convert Private key Hex To DigiByte Address.\n\n :param hexed:\n :rtype str:\n :return: Str - address\n\n\n --------------------------------------------------------------\n\n >>> dgb = DigiByte()\n >>> privatekey = \"0A97965...A45517\" # example Private Key\n >>> digibyte_address = dgb.hex_addr(privatekey)\n\n --------------------------------------------------------------\n\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(DGB)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Dogecoin",
"path": "cryptofuzz/hd.py",
"snippet": "class Dogecoin:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str, Type: str = 'p2pkh') -> str:\n \"\"\"\n Generate Private key Hex Address To All Dogecoin Format Type Address , Type: `p2pkh`, `p2sh`.\n\n :param hexed:\n :type hexed: str\n :param Type:\n :type Type: str\n :rtype: str\n :return: str - address\n\n\n --------------------------------------------------------------\n\n >>> doge = Dogecoin()\n >>> privatekey = \"0A9796542F1030...02F6A45517\" # example Private Key\n >>> p2pkh_doge_addr = doge.hex_addr(privatekey, 'p2pkh')\n >>> p2sh_doge_addr = doge.hex_addr(privatekey, 'p2sh')\n\n --------------------------------------------------------------\n\n \"\"\"\n\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(DOGE)\n hd.from_private_key(hexed)\n if Type == 'p2pkh':\n return hd.p2pkh_address()\n elif Type == 'p2sh':\n return hd.p2sh_address()\n else:\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Ethereum",
"path": "cryptofuzz/hd.py",
"snippet": "class Ethereum:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n \"\"\"\n Convert Private key Hex To All Ethereum Format Type Address .\n :param hexed:\n :rtype str:\n :return: str - address\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(ETH)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Litecoin",
"path": "cryptofuzz/hd.py",
"snippet": "class Litecoin:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str, Type: str = 'p2pkh') -> str:\n \"\"\"\n\n ------------------------------------------\n Convert Private key Hex To All Litecoin Format Type Address , Type: `p2pkh`, `p2sh`, `p2wpkh`, `p2wsh`, `p2wpkh_p2sh`, `p2wsh_p2sh`.\n :param hexed:\n :type hexed: str.\n :param Type:\n :type Type: str.\n :returns: address.\n\n ------------------------------------------\n\n >>> ltc = Litecoin()\n >>> privatekey = \"e3b0c44298fc1c149...................\"\n >>> p2pkh_address = ltc.hex_addr(privatekey, 'p2pkh')\n >>> p2sh_address = ltc.hex_addr(privatekey, 'p2sh')\n >>> p2wpkh_address = ltc.hex_addr(privatekey, 'p2wpkh')\n >>> p2wsh_address = ltc.hex_addr(privatekey, 'p2wsh')\n >>> p2wpkh_p2sh_address = ltc.hex_addr(privatekey, 'p2wpkh_p2sh')\n >>> p2wsh_p2sh_address = ltc.hex_addr(privatekey, 'p2wsh_p2sh')\n\n ------------------------------------------\n\n\n\n \"\"\"\n\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(LTC)\n hd.from_private_key(hexed)\n if Type == 'p2pkh':\n return hd.p2pkh_address()\n elif Type == 'p2sh':\n return hd.p2sh_address()\n elif Type == 'p2wpkh':\n return hd.p2wpkh_address()\n elif Type == 'p2wsh':\n return hd.p2wsh_address()\n elif Type == 'p2wpkh_p2sh':\n return hd.p2wpkh_in_p2sh_address()\n elif Type == 'p2wsh_p2sh':\n return hd.p2wsh_in_p2sh_address()\n else:\n return hd.p2pkh_address()\n\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Qtum",
"path": "cryptofuzz/hd.py",
"snippet": "class Qtum:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n \"\"\"\n Convert Private key Hex To All Qtum Format Type Address .\n :param hexed:\n :rtype str:\n :return: str - address\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(QTUM)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Ravencoin",
"path": "cryptofuzz/hd.py",
"snippet": "class Ravencoin:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n \"\"\"\n Convert Private key Hex To All Ravencoin Format Type Address .\n :param hexed:\n :rtype str:\n :return: str - address\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(RVN)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Tron",
"path": "cryptofuzz/hd.py",
"snippet": "class Tron:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n\n \"\"\"\n Convert Private key Hex To All Tron Format Type Address .\n :param hexed:\n :rtype str:\n :return: str - address\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(TRX)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Zcash",
"path": "cryptofuzz/hd.py",
"snippet": "class Zcash:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n \"\"\"\n Convert Private key Hex To All Zcash Format Type Address .\n :param hexed:\n :rtype str:\n :return: str - address\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(ZEC)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
}
] | import os
from . import Generator, Convertor
from . import (
Bitcoin, BitcoinGold, Dash, DigiByte, Dogecoin, Ethereum, Litecoin, Qtum, Ravencoin, Tron, Zcash, Axe
) | 9,565 | >>> p2wpkh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wpkh_p2sh')
>>> p2wsh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wsh_p2sh')
--------------------------------------------------------
"""
BTC = Bitcoin()
if Type == 'p2pkh':
return BTC.hex_addr(privatekey, 'p2pkh')
elif Type == 'p2sh':
return BTC.hex_addr(privatekey, 'p2sh')
elif Type == 'p2wpkh':
return BTC.hex_addr(privatekey, 'p2wpkh')
elif Type == 'p2wsh':
return BTC.hex_addr(privatekey, 'p2wsh')
elif Type == 'p2wpkh_p2sh':
return BTC.hex_addr(privatekey, 'p2wpkh_p2sh')
elif Type == 'p2wsh_p2sh':
return BTC.hex_addr(privatekey, 'p2wsh_p2sh')
else:
return BTC.hex_addr(privatekey, 'p2pkh')
# ----------------------------------------------------------
def PrivateKey_To_Ethereum_Addr(privatekey: str) -> str:
"""
Convert Private Key To Ethereum Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Ethereum_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Ethereum_Addr(Privatekey)
--------------------------------------------------------
"""
ETH = Ethereum()
return ETH.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_BitcoinGold_Addr(privatekey: str) -> str:
"""
Convert Private Key To Bitcoin Gold Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_BitcoinGold_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_BitcoinGold_Addr(Privatekey)
--------------------------------------------------------
"""
BTG = BitcoinGold()
return BTG.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_Dash_Addr(privatekey: str) -> str:
"""
Convert Private Key To Dash Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Dash_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Dash_Addr(Privatekey)
--------------------------------------------------------
"""
DASH = Dash()
return DASH.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_DigiByte_Addr(privatekey: str) -> str:
"""
Convert Private Key To Digibyte Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Digibyte_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_DigiByte_Addr(Privatekey)
--------------------------------------------------------
"""
| # programmer and owner mmdrza.com
# ----------------------------------------------------------
convertor = Convertor()
generator = Generator()
# ----------------------------------------------------------
def getPrivateKey() -> str:
"""
Generate a private key without repeating.
:return private key:
:rtype str:
---------------------------------------------------
>>> Privatekey = getPrivateKey()
---------------------------------------------------
"""
return generator.generate_private_key()
# ----------------------------------------------------------
def getMnemonic(size: int = 12) -> str:
"""
Generate Random Standard Mnemonic BIP39.
:param size:
:type size: Int
:return mnemonic:
:rtype str:
--------------------------------------------------
>>> Mnemonic = getMnemonic()
--------------------------------------------------
"""
return generator.generate_mnemonic(size=size)
# ----------------------------------------------------------
def getBinary() -> str:
"""
Generate random Binary With Length 256 (256 bits).
:rtype str:
:return binary:
-------------------------------------------------
>>> Binary = getBinary()
------------------------------------------------
"""
return generator.generate_binary()
# ----------------------------------------------------------
def getRootKey() -> str:
"""
Generate Root Key.
:rtype str:
:return root key:
------------------------------------------------
>>> RootKey = getRootKey()
------------------------------------------------
"""
return generator.generate_xprv()
# -------------------------------------------------------------------
def getBytes() -> bytes: return os.urandom(32)
# -------------------------------------------------------------------
def getDecimal() -> int: return generator.generate_decimal()
# -------------------------------------------------------------------
def PrivateKey_To_Addr(hexed: str, compress: bool = False) -> str:
"""
Convert Private key Hex To Compress and UnCompress Address.
:param hexed:
:type hexed: str
:param compress:
:type compress: bool
:return address:
:rtype str:
----------------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> address_compress = PrivateKey_To_Addr(privatekey, True)
>>> address_uncompress = PrivateKey_To_Addr(privatekey, False)
----------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
if compress:
return convertor.bytes_to_addr(seed, True)
else:
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def PrivateKey_To_Wif(hexed: str, compress: bool = False) -> str:
"""
Convert Private key Hex To Compress and UnCompress WIF.
:param hexed:
:type hexed: str
:param compress:
:type compress: bool
:return wif:
:rtype str:
------------------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> wif_compress = PrivateKey_To_Wif(privatekey, True)
>>> wif_uncompress = PrivateKey_To_Wif(privatekey, False)
------------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def PrivateKey_To_PublicKey(hexed: str, compress: bool = False) -> str:
"""
Convert Private key Hex To compress and uncompress Public Key.
:param hexed:
:type hexed: str
:param compress:
:type compress: bool
:return public key:
:rtype str:
------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> publickey_compress = PrivateKey_To_PublicKey(privatekey, True)
>>> publickey_uncompress = PrivateKey_To_PublicKey(privatekey, False)
------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def PrivateKey_To_Mnemonic(hexed: str) -> str:
"""
Convert Private key Hex To Mnemonic.
:param hexed:
:type hexed: str
:return mnemonic:
:rtype str:
--------------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> mnemonic = PrivateKey_To_Mnemonic(privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_mne(seed)
# ----------------------------------------------------------
def PrivateKey_To_Byte(hexed: str) -> bytes:
"""
Convert Private key Hex To Byte.
:param hexed:
:type hexed: Str.
:return byte:
:rtype bytes:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> byte = PrivateKey_To_Byte(Privatekey)
--------------------------------------------------------
"""
return convertor.hex_to_bytes(hexed)
# ----------------------------------------------------------
def PrivateKey_To_Binary(hexed: str) -> str:
"""
Convert Private key Hex To Binary.
:param hexed:
:type hexed: Str
:return binary:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> binary = PrivateKey_To_Binary(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_binary(seed)
# ----------------------------------------------------------
def PrivateKey_To_Decimal(hexed: str) -> int:
"""
Convert Private key Hex To Decimal.
:param hexed:
:type hexed: Str
:return decimal:
:rtype int:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> decimal = PrivateKey_To_Decimal(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def PrivateKey_To_XPRV(hexed: str) -> str:
"""
Convert Private key Hex To XPRV.
:param hexed:
:type hexed: Str
:return xprv:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> xprv = PrivateKey_To_XPRV(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def PrivateKey_To_CompressAddr(hexed: str) -> str:
"""
Convert Private key Hex To Compress Address.
:param hexed:
:type hexed: Str
:return address:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> address_compress = PrivateKey_To_CompressAddr(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_addr(seed, True)
# ----------------------------------------------------------
def PrivateKey_To_UncompressAddr(hexed: str) -> str:
"""
Convert Private key Hex To UnCompress Address.
:param hexed:
:type hexed: Str
:return address:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> address_uncompress = PrivateKey_To_UncompressAddr(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def PrivateKey_To_XPUB(hexed: str) -> str:
"""
Convert Private key Hex To XPUB.
:param hexed:
:type hexed: Str
:return xpub:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> xpub = PrivateKey_To_XPUB(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Bytes_To_PrivateKey(byte: bytes) -> str:
"""
Convert Byte To Private Key.
:param byte:
:type byte: Bytes
:return private key:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> privatekey = Bytes_To_PrivateKey(Privatekey)
--------------------------------------------------------
"""
return convertor.bytes_to_hex(byte)
# ----------------------------------------------------------
def Bytes_To_Address(seed: bytes, compress: bool = False):
"""
Convert Bytes To Compressed and Uncompressed Address.
:param seed:
:type seed: Bytes
:param compress:
:type compress: bool
:return address:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> address_compress = Bytes_To_Address(seedBytes, True)
>>> address_uncompress = Bytes_To_Address(seedBytes, False)
--------------------------------------------------------
"""
if compress:
return convertor.bytes_to_addr(seedBytes=seed, compress=True)
else:
return convertor.bytes_to_addr(seedBytes=seed, compress=False)
# ----------------------------------------------------------
def Bytes_To_Mnemonic(seed: bytes) -> str:
"""
Convert Bytes To Mnemonic.
:param seed:
:type seed: Bytes
:return mnemonic:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> mnemonic = Bytes_To_Mnemonic(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_mne(seed)
# ----------------------------------------------------------
def Bytes_To_XPRV(seed: bytes) -> str:
"""
Convert Bytes To XPRV.
:param seed:
:type seed: Bytes
:return xprv:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> xprv = Bytes_To_XPRV(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def Bytes_To_Binary(seed: bytes):
"""
Convert Bytes To Binary.
:param seed:
:type seed: Bytes
:return binary:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> binary = Bytes_To_Binary(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_binary(seed)
# ----------------------------------------------------------
def Bytes_To_PublicKey(seed: bytes, compress: bool = False):
"""
Convert Bytes To Public Key Compressed and Uncompressed.
:param seed:
:type seed: Bytes
:param compress:
:type compress: bool
:return public:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> public_compress = Bytes_To_PublicKey(seedBytes, True)
>>> public_uncompress = Bytes_To_PublicKey(seedBytes, False)
--------------------------------------------------------
"""
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Bytes_To_Compress_Addr(seed: bytes) -> str:
"""
Convert Bytes To Compressed Address.
:param seed:
:type seed: Bytes
:return address:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> address_compress = Bytes_To_Compress_Addr(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_addr(seed, True)
# ----------------------------------------------------------
def Bytes_To_Uncompress_Addr(seed: bytes) -> str:
"""
Convert Bytes To Uncompressed Address.
:param seed:
:type seed: Bytes
:return address:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> address_uncompress = Bytes_To_Uncompress_Addr(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def Bytes_To_Decimal(seed: bytes):
"""
Convert Bytes To Decimal.
:param seed:
:type seed: Bytes
:return decimal:
:rtype int:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> decimal = Bytes_To_Decimal(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def Bytes_To_XPUB(seed: bytes) -> str:
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Bytes_To_Wif(seed: bytes, compress: bool = False) -> str:
"""
Convert Bytes To Wif Compressed and UnCompressed.
:param seed:
:type seed: Bytes
:param compress:
:type compress: bool
:return wif:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> wif_compress = Bytes_To_Wif(seedBytes, True)
>>> wif_uncompress = Bytes_To_Wif(seedBytes, False)
--------------------------------------------------------
"""
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def Mnemonic_To_Bytes(mnemonic: str) -> bytes:
return convertor.mne_to_seed(mnemonic=mnemonic)
# ----------------------------------------------------------
def Mnemonic_To_PrivateKey(mnemonic: str) -> str:
seed = convertor.mne_to_seed(mnemonic=mnemonic)
return convertor.bytes_to_hex(seed=seed)
# ----------------------------------------------------------
def Mnemonic_To_PublicKey(mnemonic: str, compress: bool = False):
seed = convertor.mne_to_seed(mnemonic=mnemonic)
if compress:
pub = convertor.bytes_to_public(seed, True).hex()
return convertor.pub_to_addr(pub)
else:
pub = convertor.bytes_to_public(seed, False).hex()
return convertor.pub_to_addr(pub)
# ----------------------------------------------------------
def Mnemonic_To_Decimal(mnemonic: str):
seed = convertor.mne_to_seed(mnemonic=mnemonic)
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def Mnemonic_To_Binary(mnemonic: str):
seed = convertor.mne_to_seed(mnemonic=mnemonic)
return convertor.bytes_to_binary(seed)
# ----------------------------------------------------------
def Mnemonic_To_XPRV(mnemonic: str):
seedBytes = convertor.mne_to_seed(mnemonic)
return convertor.bytes_to_xprv(seedBytes)
# ----------------------------------------------------------
def Mnemonic_To_Addr(mnemonic: str, compress: bool = False) -> str:
seedBytes = convertor.mne_to_seed(mnemonic)
if compress:
return convertor.bytes_to_addr(seedBytes, True)
else:
return convertor.bytes_to_addr(seedBytes, False)
# ----------------------------------------------------------
def Mnemonic_To_XPUB(mnemonic: str):
seedBytes = convertor.mne_to_seed(mnemonic)
return convertor.bytes_to_xpub(seedBytes)
# ----------------------------------------------------------
def Mnemonic_To_Wif(mnemonic: str, compress: bool = False) -> str:
seedBytes = convertor.mne_to_seed(mnemonic)
if compress:
return convertor.bytes_to_wif(seedBytes, True)
else:
return convertor.bytes_to_wif(seedBytes, False)
# ----------------------------------------------------------
def Passphrase_To_Addr(passphrase: str, compress: bool = False) -> str:
if compress:
return convertor.pass_to_addr(passphrase, True)
else:
return convertor.pass_to_addr(passphrase, False)
# ----------------------------------------------------------
def Passphrase_To_Bytes(passphrase: str) -> bytes:
return convertor.pass_to_bytes(passphrase)
# ----------------------------------------------------------
def Passphrase_To_PrivateKey(passphrase: str) -> str:
return convertor.bytes_to_hex(convertor.pass_to_bytes(passphrase))
# ----------------------------------------------------------
def Passphrase_To_PublicKey(passphrase: str, compress: bool = False) -> str:
seed = convertor.pass_to_bytes(passphrase)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Passphrase_To_Wif(passphrase: str, compress: bool = False) -> str:
seed = convertor.pass_to_bytes(passphrase)
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def Passphrase_To_RootKey(passphrase: str) -> str:
seed = convertor.pass_to_bytes(passphrase)
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def Passphrase_To_XPUB(passphrase: str) -> str:
seed = convertor.pass_to_bytes(passphrase)
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Passphrase_To_Decimal(passphrase: str) -> int:
seed = convertor.pass_to_bytes(passphrase)
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def Wif_To_Bytes(wif: str) -> bytes:
return convertor.wif_to_bytes(wif)
# ----------------------------------------------------------
def Wif_To_Addr(wif: str, compress: bool = False) -> str:
return convertor.wif_to_addr(wif, compress)
# ----------------------------------------------------------
def Wif_To_PrivateKey(wif: str) -> str:
return convertor.bytes_to_hex(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_Mnemonic(wif: str) -> str:
return convertor.bytes_to_mne(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_Decimal(wif: str) -> int:
return convertor.bytes_to_int(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_Binary(wif: str) -> str:
return convertor.bytes_to_binary(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_XPRV(wif: str) -> str:
return convertor.bytes_to_xprv(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_XPUB(wif: str) -> str: return convertor.bytes_to_xpub(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_RootKey(wif: str) -> str:
return Wif_To_XPRV(wif)
# ----------------------------------------------------------
def Wif_To_PublicKey(wif: str, compress: bool = False):
seed = convertor.wif_to_bytes(wif)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Decimal_To_PrivateKey(dec: int) -> str:
return "%064x" % dec
# ----------------------------------------------------------
def Decimal_To_Bytes(dec: int) -> bytes:
return convertor.int_to_bytes(dec)
# ----------------------------------------------------------
def Decimal_To_PublicKey(dec: int, compress: bool = False) -> str:
seed = Decimal_To_Bytes(dec)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Decimal_To_Address(dec: int, compress: bool = False) -> str:
seed = Decimal_To_Bytes(dec)
if compress:
return convertor.bytes_to_addr(seed, True)
else:
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def Decimal_To_Mnemonic(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_mne(seed)
# ----------------------------------------------------------
def Decimal_To_XPRV(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def Decimal_To_XPUB(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Decimal_To_Binary(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_binary(seed)
def Decimal_To_Wif(dec: int, compress: bool = False) -> str:
seed = convertor.int_to_bytes(dec)
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def Binary_To_Bytes(binary_str: str) -> bytes:
return convertor.binary_to_bytes(binary_str)
# ----------------------------------------------------------
def Binary_To_Address(binary_str: str, compress: bool = False) -> str:
seed = convertor.binary_to_bytes(binary_str)
if compress:
return convertor.bytes_to_addr(seed, True)
else:
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def Binary_To_PrivateKey(binary_str: str) -> str: return convertor.bytes_to_hex(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_Mnemonic(binary_str: str) -> str: return convertor.bytes_to_mne(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_XPRV(binary_str: str) -> str: return convertor.bytes_to_xprv(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_XPUB(binary_str: str) -> str: return convertor.bytes_to_xpub(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_Wif(binary_str: str, compress: bool = False) -> str: return convertor.bytes_to_wif(
convertor.binary_to_bytes(binary_str), compress)
# ----------------------------------------------------------
def Binary_To_PublicKey(binary_str: str, compress: bool = False) -> str: return convertor.bytes_to_public(
convertor.binary_to_bytes(binary_str), compress).hex()
# ----------------------------------------------------------
def Binary_To_Decimal(binary_str: str) -> int: return convertor.bytes_to_int(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def XPRV_To_Bytes(xprv: str) -> bytes: return convertor.xprv_to_bytes(xprv)
def XPRV_To_PrivateKey(xprv: str) -> str: return convertor.bytes_to_hex(convertor.xprv_to_bytes(xprv))
def XPRV_To_PublicKey(xprv: str, compress: bool = False) -> str: return convertor.bytes_to_public(
convertor.xprv_to_bytes(xprv), compress).hex()
def XPRV_To_Wif(xprv: str, compress: bool = False) -> str: return convertor.bytes_to_wif(convertor.xprv_to_bytes(xprv),
compress)
def XPRV_To_Address(xprv: str, compress: bool = False) -> str: return convertor.bytes_to_addr(
convertor.xprv_to_bytes(xprv), compress)
def XPRV_To_Mnemonic(xprv: str) -> str: return convertor.bytes_to_mne(convertor.xprv_to_bytes(xprv))
def XPRV_To_XPUB(xprv: str) -> str: return convertor.bytes_to_xpub(convertor.xprv_to_bytes(xprv))
def XPRV_To_Decimal(xprv: str) -> int: return convertor.bytes_to_int(convertor.xprv_to_bytes(xprv))
# ----------------------------------------------------------
def PrivateKey_To_Bitcoin_Addr(privatekey: str, Type: str = 'p2pkh') -> str:
"""
Convert Private Key To Bitcoin All Type Address, Type: p2pkh, p2sh, p2wpkh, p2wsh, p2wpkh_p2sh, p2wsh_p2sh.
:param privatekey:
:type privatekey: str
:param Type:
:type Type: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Bitcoin_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> p2pkh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2pkh')
>>> p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2sh')
>>> p2wpkh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wpkh')
>>> p2wsh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wsh')
>>> p2wpkh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wpkh_p2sh')
>>> p2wsh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wsh_p2sh')
--------------------------------------------------------
"""
BTC = Bitcoin()
if Type == 'p2pkh':
return BTC.hex_addr(privatekey, 'p2pkh')
elif Type == 'p2sh':
return BTC.hex_addr(privatekey, 'p2sh')
elif Type == 'p2wpkh':
return BTC.hex_addr(privatekey, 'p2wpkh')
elif Type == 'p2wsh':
return BTC.hex_addr(privatekey, 'p2wsh')
elif Type == 'p2wpkh_p2sh':
return BTC.hex_addr(privatekey, 'p2wpkh_p2sh')
elif Type == 'p2wsh_p2sh':
return BTC.hex_addr(privatekey, 'p2wsh_p2sh')
else:
return BTC.hex_addr(privatekey, 'p2pkh')
# ----------------------------------------------------------
def PrivateKey_To_Ethereum_Addr(privatekey: str) -> str:
"""
Convert Private Key To Ethereum Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Ethereum_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Ethereum_Addr(Privatekey)
--------------------------------------------------------
"""
ETH = Ethereum()
return ETH.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_BitcoinGold_Addr(privatekey: str) -> str:
"""
Convert Private Key To Bitcoin Gold Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_BitcoinGold_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_BitcoinGold_Addr(Privatekey)
--------------------------------------------------------
"""
BTG = BitcoinGold()
return BTG.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_Dash_Addr(privatekey: str) -> str:
"""
Convert Private Key To Dash Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Dash_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Dash_Addr(Privatekey)
--------------------------------------------------------
"""
DASH = Dash()
return DASH.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_DigiByte_Addr(privatekey: str) -> str:
"""
Convert Private Key To Digibyte Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Digibyte_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_DigiByte_Addr(Privatekey)
--------------------------------------------------------
""" | DGB = DigiByte() | 6 | 2023-11-10 14:51:41+00:00 | 12k |
atlantic-quantum/Shipyard | shipyard/passes/timing_constraints.py | [
{
"identifier": "ActivationRecord",
"path": "shipyard/call_stack.py",
"snippet": "class ActivationRecord:\n \"\"\"Activation Records for shipyard\"\"\"\n\n def __init__(\n self,\n name: str,\n ar_type: ARType,\n nesting_level: int,\n ):\n self.name = name\n self.type = ar_type\n self.nesting_level = nesting_level\n self.members = {}\n\n def __setitem__(self, key, value):\n self.members[key] = value\n LOGGER.debug(\"%s: %s\", key, value)\n\n def __getitem__(self, key):\n return self.members[key]\n\n def get(self, key, default=None):\n \"\"\"Gets a member of the activation record by key\"\"\"\n return self.members.get(key, default)\n\n def __str__(self):\n lines = [f\"{self.nesting_level}: {self.type.value} {self.name}\"]\n for name, val in self.members.items():\n lines.append(f\" {name:<20}: {val}\")\n\n return \"\\n\".join(lines)\n\n def __repr__(self):\n return self.__str__()"
},
{
"identifier": "ARType",
"path": "shipyard/call_stack.py",
"snippet": "class ARType(Enum):\n \"\"\"\n Enumeration of Acivation Record Types\n \"\"\"\n\n PROGRAM = \"PROGRAM\"\n EXTERN = \"EXTERN\"\n SUBROUTINE = \"SUBROUTINE\"\n CALIBRATION = \"CALIBRATION\"\n DEFCAL = \"DEFCAL\"\n GATE = \"GATE\"\n LOOP = \"LOOP\""
},
{
"identifier": "Error",
"path": "shipyard/compiler_error.py",
"snippet": "class Error(Exception):\n \"\"\"Base Error Class for shipyard\"\"\"\n\n def __init__(self, error_code=None, message=None):\n self.error_code = error_code\n # self.token = token\n # add exception class name before the message\n class_name = self.__class__.__name__.rsplit(\".\", maxsplit=1)[-1]\n self.message = f\"{class_name}: ({self.error_code}) {message}\"\n super().__init__(self.message)"
},
{
"identifier": "ErrorCode",
"path": "shipyard/compiler_error.py",
"snippet": "class ErrorCode(Enum):\n \"\"\"Class to enumerate error codes of the shipyard\"\"\"\n\n ID_NOT_FOUND = \"Identifier not found\"\n DUPLICATE_ID = \"Duplicate id found\"\n NOT_IN_GLOBAL_SCOPE = \"Not in global scope\"\n INVALID_DEFCAL_ARGUMENT = \"Invalid defcal argument\"\n EXPRESSION_IN_DEFCAL = \"Expression in defcal signature, unhandled\"\n INVALID_GATECALL_ARGUMENT = \"Invalid gatecall argument\"\n UNHANDLED = \"Unhandled case\"\n UNDETERMINED_CALL = \"Unable to determine a unique function for function call\"\n NO_SEQC_STATEMENT = \"No equivalent SEQC statement\"\n COMPILE_OUT = \"Statement should be compiled out before printing SEQC code\"\n PORT_NOT_FOUND = \"Port was not found within setup\"\n INSTRUMENT_NOT_FOUND = \"Instrument was not found within setup\"\n INPUT_NOT_FOUND = \"Input value was not found\"\n OUTPUT_NOT_SUPPORTED = \"Output type not supported\"\n INPUT_TYPE_NOT_SUPPORTED = \"Input type not supported\"\n INVALID_ARGUMENT = \"Invalid argument\"\n INVALID_WAVEFORM = \"Waveform does not meet timing constraints\"\n INCLUDE_ERROR = \"Error in include statement\""
},
{
"identifier": "SetupInternal",
"path": "shipyard/setup/internal.py",
"snippet": "class SetupInternal(BaseModel):\n\n \"\"\"\n A Pydantic model containing the information required to compile an openQASM program\n to instrument level instructions.\n\n It is recommended to instanciate this object from a configuration file\n (json (future yml?))\n \"\"\"\n\n # todo validation\n\n # todo move to own module\n instruments: dict[str, Instrument]\n ports: dict[str, Port]\n frames: dict[str, Frame]\n\n @classmethod\n def from_dict(cls, setup: dict[str, dict[str, dict]]) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a dictionary\n\n Args:\n setup (dict[str, dict[str, dict]]): dictionary to create a Setup object from\n\n Returns:\n Setup: created from dictionary\n \"\"\"\n instruments = {\n k: Instrument(name=k, **v) for k, v in setup[\"Instruments\"].items()\n }\n ports = {}\n for k, val in setup[\"Ports\"].items():\n val[\"instrument\"] = instruments[val[\"instrument\"]]\n val[\"core\"] = Port.Core(**val[\"core\"])\n ports[k] = Port(name=k, **val)\n frames = {}\n for k, val in setup[\"Frames\"].items():\n val[\"port\"] = ports[val[\"port\"]]\n frames[k] = Frame(name=k, **val)\n return cls(instruments=instruments, ports=ports, frames=frames)\n\n def to_dict(self) -> dict[str, dict[str, dict]]:\n \"\"\"Creates a dictionary from a Setup object\n\n Args:\n filename (Path | str, optional):\n path to save dictionary to. Defaults to None.\n\n Returns:\n dict[str, dict[str, dict]]: dictionary created from Setup object\n \"\"\"\n setup = {\n \"Instruments\": {\n k: {\n \"type\": v.type,\n \"serial\": v.serial,\n }\n for k, v in self.instruments.items()\n },\n \"Ports\": {\n k: {\n \"instrument\": v.instrument.name,\n \"core\": {\n \"type\": v.core.type.value,\n \"index\": v.core.index,\n \"channels\": v.core.channels,\n },\n }\n for k, v in self.ports.items()\n },\n \"Frames\": {\n k: {\n \"port\": v.port.name,\n \"frequency\": v.frequency,\n \"phase\": v.phase,\n }\n for k, v in self.frames.items()\n },\n }\n return setup\n\n @classmethod\n def from_json(cls, filename: str | Path) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a json file\n\n Args:\n filename (str | Path): path to json file\n\n Returns:\n Setup: created from json file\n \"\"\"\n with open(filename, encoding=\"utf-8\") as file:\n data = json.load(file)\n return cls.from_dict(data)\n\n def to_json(self, filename: str | Path) -> Path:\n \"\"\"Writes a Setup object to a json file\n\n Args:\n filename (str | Path): path to json file to create\n\n Returns:\n Path: path to json file\n \"\"\"\n data = self.to_dict()\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n json.dump(data, file, indent=4)\n return Path(filename)\n\n @classmethod\n def from_yml(cls, filename: str | Path) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a yml file\n\n Args:\n filename (str | Path): path to yml file\n\n Returns:\n Setup: created from yml file\n \"\"\"\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n data = yaml.safe_load(file)\n return cls.from_dict(data)\n\n def to_yml(self, filename: str | Path) -> Path:\n \"\"\"Writes a Setup object to a yml file\n\n Args:\n filename (str | Path): path to yml file to create\n\n Returns:\n Path: path to yml file\n \"\"\"\n data = self.to_dict()\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n yaml.dump(data, file)\n return Path(filename)\n\n def cores(self) -> set[tuple[str, int, str]]:\n \"\"\"Gets all the AWG Cores used in the setup\n\n Returns:\n set[tuple[str, int, str]]:\n a Set of tuples, each tuple has a string representing the instruement\n name, a integer representing the index of the awg core of the\n instrument and a string representing the type of the awg core.\n \"\"\"\n return set(\n (port.instrument.name, port.core.index, port.core.type.value)\n for port in self.ports.values()\n )"
},
{
"identifier": "Interpreter",
"path": "shipyard/passes/interpreter.py",
"snippet": "class Interpreter(QASMVisitor):\n \"\"\"AST-visitor for evaluating OpenQASM code.\n\n Class maintains a call stack of activation records, which hold variable/literals\n information. Also maintains record of external functions, subroutines, and\n quantum gates.\n\n If subclassing, generally only the specialised ``visit_*`` methods need to be\n overridden. These are derived from the base class, and use the name of the\n relevant :mod:`AST node <.ast>` verbatim after ``visit_``.\n\n Based on the openQASM3 Printer\"\"\"\n\n def __init__(\n self,\n setup: SetupInternal = None,\n external_funcs: dict = None,\n visit_loops: bool = True,\n ):\n self.call_stack = CallStack()\n self.setup = setup\n self.external_funcs = external_funcs\n self.calibration_scope = {}\n self.defcal_nodes = {}\n self.defcal_names = []\n self.subroutines = {}\n self.visit_loops = visit_loops\n\n def visit_Program(self, node: ast.Program) -> None:\n activation_record = ActivationRecord(\n name=\"main\", ar_type=ARType.PROGRAM, nesting_level=1\n )\n with self.ar_context_manager(activation_record):\n for statement in node.statements:\n self.visit(statement)\n\n @_maybe_annotated\n def visit_Include(self, node: ast.Include) -> None:\n \"\"\"Include statements should be resolved at this point\"\"\"\n raise self.compile_out(node)\n\n @_maybe_annotated\n def visit_QubitDeclaration(self, node: ast.QubitDeclaration) -> None:\n \"\"\"Qubit declarations not supported\"\"\"\n activation_record = self.call_stack.peek()\n if node.size is not None:\n size = self.visit(node.size)\n activation_record[node.qubit.name] = [f\"${x}\" for x in range(size)]\n\n def visit_SubroutineDefinition(self, node: ast.SubroutineDefinition) -> None:\n \"\"\"Add subroutine to subroutines dict\"\"\"\n self.subroutines[node.name.name] = node\n\n @_maybe_annotated\n def visit_QuantumGateDefinition(self, node: ast.QuantumGateDefinition) -> None:\n \"\"\"Not supporting quantum gate definitions\"\"\"\n raise self.compile_out(node)\n\n @_maybe_annotated\n def visit_ExternDeclaration(self, node: ast.ExternDeclaration) -> None:\n \"\"\"Pass over extern declarations\"\"\"\n\n def visit_Identifier(self, node: ast.Identifier) -> None:\n \"\"\"Return the value associated with a given identifier\"\"\"\n try:\n activation_record = self.call_stack.down_stack(node.name)\n return activation_record[node.name]\n except KeyError as exc:\n raise SemanticError(\n ErrorCode.ID_NOT_FOUND,\n f\"Identifier: {node.name} not found in call stack\",\n ) from exc\n\n def visit_BooleanLiteral(self, node: ast.BooleanLiteral) -> bool:\n \"\"\"Return the value of a boolean literal\"\"\"\n return node.value\n\n def visit_BinaryExpression(self, node: ast.BinaryExpression) -> None:\n \"\"\"Evaluate and return the binary expression\"\"\"\n left = self.visit(node.lhs)\n right = self.visit(node.rhs)\n op = node.op\n return binary_ops[op.value](left, right)\n\n def visit_UnaryExpression(self, node: ast.UnaryExpression) -> None:\n \"\"\"Evaluate and return the unary expression\"\"\"\n op = node.op\n return unary_ops[op.value](self.visit(node.expression))\n\n def visit_FloatLiteral(self, node: ast.FloatLiteral) -> None:\n \"\"\"Return the value of a float literal\"\"\"\n return node.value\n\n def visit_ImaginaryLiteral(self, node: ast.ImaginaryLiteral) -> None:\n \"\"\"Return the value of an imaginary literal\"\"\"\n return complex(0, node.value)\n\n def visit_DurationLiteral(self, node: ast.DurationLiteral) -> None:\n \"\"\"Return the value of a duration literal\"\"\"\n return node.value\n\n def visit_IntegerLiteral(self, node: ast.IntegerLiteral) -> None:\n \"\"\"Return the value of an integer literal\"\"\"\n return node.value\n\n def visit_ArrayLiteral(self, node: ast.ArrayLiteral) -> None:\n \"\"\"Return the value of an array literal\"\"\"\n return np.array([self.visit(val) for val in node.values])\n\n def visit_IndexExpression(self, node: ast.IndexExpression) -> None:\n \"\"\"Return the value of an index expression. Assumes the IndexExpression\n is a discrete set (ex. arr[{0, 1, 2}]), range (ex. arr[0:3:1]), or\n list of expressions (ex. arr[0:2, 4])\"\"\"\n activation_record = self.call_stack.down_stack(node.collection.name)\n if isinstance(node.index, ast.DiscreteSet):\n return activation_record[node.collection.name][self.visit(node.index)]\n if isinstance(node.index, ast.RangeDefinition):\n start, end, step = self.visit(node.index)\n return activation_record[node.collection.name][start:end:step]\n # assume list of expressions\n indices = [self.visit(index) for index in node.index]\n return activation_record[node.collection.name][indices]\n\n def visit_ReturnStatement(self, node: ast.ReturnStatement) -> None:\n \"\"\"Return the value of a return statement\"\"\"\n return self.visit(node.expression)\n\n def visit_Concatenation(self, node: ast.Concatenation) -> None:\n \"\"\"\n Concatenation node visitor:\n joins elements in OpenQASM concatenation statement\n\n example:\n qasm: 'a ++ b ++ c;'\n\n Args:\n node (ast.Concatenation): openQASM concatenation AST node\n \"\"\"\n return np.concatenate([self.visit(node.lhs), self.visit(node.rhs)])\n\n def quantum_gate_helper(\n self, node: ast.QuantumMeasurementStatement | ast.QuantumReset | ast.QuantumGate\n ) -> None:\n \"\"\"\n Helper function for QuantumGate, QuantumMeasurementStatement, and QuantumReset.\n Puts the calibration dictionary onto the stack and then adds a new activation\n record for the quantum gate, measurement, or reset. In the case of a\n QuantumGate, the function first adds the arguments to the activation record,\n then the statements in the measurement, reset, or gate body are visited.\n \"\"\"\n curr_nesting = self.call_stack.peek().nesting_level\n outer_activation_record = ActivationRecord(\n name=\"calibration\",\n ar_type=ARType.CALIBRATION,\n nesting_level=curr_nesting + 1,\n )\n outer_activation_record.members = self.calibration_scope\n with self.ar_context_manager(outer_activation_record):\n inner_activation_record = ActivationRecord(\n name=\"defcal\", ar_type=ARType.DEFCAL, nesting_level=curr_nesting + 2\n )\n with self.ar_context_manager(inner_activation_record):\n signature = Mangler(node).signature()\n mangled_name = signature.match(self.defcal_names)[0]\n\n if isinstance(node, ast.QuantumGate):\n if node.modifiers:\n raise self.compile_out(node.modifiers)\n args = [self.visit(arg) for arg in node.arguments]\n node = self.defcal_nodes[mangled_name]\n inner_activation_record = self.call_stack.peek()\n for arg, val in zip(\n node.arguments, args\n ): # ignores Integer arguments\n if isinstance(arg, ast.ClassicalArgument):\n inner_activation_record[arg.name.name] = val\n for statement in self.defcal_nodes[mangled_name].body:\n if isinstance(statement, ast.ReturnStatement):\n returnval = self.visit(statement)\n return returnval\n self.visit(statement)\n\n @_maybe_annotated\n def visit_QuantumGate(self, node: ast.QuantumGate) -> None:\n \"\"\"\n QuantumGate node visitor:\n Visits and evaluates quantum gate call, at this point the gate operation\n should have a calibration definition (defcal).\n\n Example:\n qasm:\n defcal x90 $0 {...}\n >>x90 $0;\n -> ^^^^^^^\n Args:\n node (ast.QuantumGate): openQASM QuantumGate AST node\n\n Optionally returns elements based on gate definition\n \"\"\"\n self.quantum_gate_helper(node)\n\n @_maybe_annotated\n def visit_QuantumMeasurementStatement(\n self, node: ast.QuantumMeasurementStatement\n ) -> None:\n \"\"\"\n QuantumMeasurementStatement node visitor:\n Visits and evaluates quantum measurement call, at this point the quantum\n measurement statement should have a calibration definition (defcal)\n\n Example:\n qasm:\n defcal measure $0 -> bit {...}\n >>b1 = measure $0;\n -> ^^^^^^^^^^^\n Args:\n node (ast.QuantumMeasurementStatement): openQASM\n QuantumMeasurementStatement AST node\n Optionally allows for returns based on quantum measurement definition\n (gate definition)\n \"\"\"\n match node.target:\n case ast.Identifier():\n name = node.target.name\n activation_record = self.call_stack.down_stack(name)\n activation_record[name] = self.quantum_gate_helper(node)\n case ast.IndexedIdentifier():\n activation_record = self.call_stack.down_stack(node.target.name.name)\n activation_record[node.target.name.name][\n [self.visit(index) for index in node.target.indices[0]]\n ] = self.quantum_gate_helper(node)\n case _:\n self.quantum_gate_helper(node)\n\n @_maybe_annotated\n def visit_QuantumReset(self, node: ast.QuantumReset) -> None:\n \"\"\"\n QuantumReset node visitor:\n Visits and evaluates quantum reset call, at this point the quantum reset\n should have a calibration definition (defcal)\n\n Example:\n qasm:\n defcal reset $0 {...}\n >>reset $0;\n -> ^^^^^^^^^\n\n Args:\n node (ast.QuantumReset): openQASM QuantumReset AST node\n \"\"\"\n self.quantum_gate_helper(node)\n\n def visit_QuantumMeasurement(self, node: ast.QuantumMeasurement) -> None:\n \"\"\"\n QuantumMeasurement node visitor:\n Visits and evaluates quantum measurement call, at this point the quantum\n measurement statement should have a calibration definition (defcal). Differs\n from QuantumMeasurementStatement in that it does not allow for returns\n\n Example:\n qasm:\n defcal measure $0 -> bit {...}\n >>measure $0;\n ^^^^^^^^^^^\n Args:\n node (ast.QuantumMeasurement): openQASM QuantumMeasurement AST node\n Optionally allows for returns based on quantum measurement definition\n (gate definition)\n \"\"\"\n self.quantum_gate_helper(node)\n\n def visit_ExternArgument(self, node: ast.ExternArgument) -> None:\n \"\"\"Passes extern argument call\"\"\"\n\n def visit_DiscreteSet(self, node: ast.DiscreteSet) -> None:\n \"\"\"Returns a set of discrete values\"\"\"\n discrete_set = []\n for i in node.values:\n discrete_set.append(self.visit(i))\n return set(discrete_set)\n\n def visit_RangeDefinition(self, node: ast.RangeDefinition) -> None:\n \"\"\"Returns tuple of (start,end,step) or default values\"\"\"\n start = self.visit(node.start) if node.start else 0\n end = self.visit(node.end) if node.end else None\n step = self.visit(node.step) if node.step else 1\n return (start, end, step)\n\n def visit_ExpressionStatement(self, node: ast.ExpressionStatement) -> None:\n \"\"\"Visits expression statement\"\"\"\n return self.visit(node.expression)\n\n def generic_visit(self, node: ast.QASMNode) -> None:\n LOGGER.debug(\"Generic visit: %s\", node)\n\n @_maybe_annotated\n def visit_ClassicalDeclaration(self, node: ast.ClassicalDeclaration) -> None:\n \"\"\"Saves classical declaration to activation record\"\"\"\n activation_record = self.call_stack.peek()\n match node:\n case ast.ClassicalDeclaration(type=ast.PortType()):\n name = node.identifier.name\n # activation_record = self.call_stack.peek()\n activation_record[name] = self.setup.ports[name]\n case ast.ClassicalDeclaration(\n type=ast.FrameType(),\n init_expression=ast.FunctionCall(name=ast.Identifier(\"newframe\")),\n ):\n call = node.init_expression\n assert isinstance(call, ast.FunctionCall)\n assert len(call.arguments) == 3\n port = call.arguments[0].name\n frequency = self.visit(call.arguments[1])\n phase = self.visit(call.arguments[2])\n frame = Frame(\n name=node.identifier.name,\n port=activation_record[port],\n frequency=frequency,\n phase=phase,\n )\n\n activation_record[frame.name] = frame\n case ast.ClassicalDeclaration(type=ast.ArrayType()):\n if node.init_expression is None:\n shapes = [self.visit(dim) for dim in node.type.dimensions]\n activation_record[node.identifier.name] = np.zeros(shape=shapes)\n else:\n activation_record[node.identifier.name] = self.visit(\n node.init_expression\n )\n case ast.ClassicalDeclaration(type=ast.BitType()):\n if node.init_expression is None:\n size = self.visit(node.type.size) or 1\n activation_record[node.identifier.name] = np.zeros(shape=size)\n else:\n activation_record[node.identifier.name] = self.visit(\n node.init_expression\n )\n case ast.ClassicalDeclaration(type=ast.WaveformType()):\n if node.init_expression is None:\n activation_record[node.identifier.name] = None\n else:\n activation_record[node.identifier.name] = self.visit(\n node.init_expression\n )\n case _:\n if node.init_expression is not None:\n activation_record[node.identifier.name] = self.visit(\n node.init_expression\n )\n else:\n activation_record[node.identifier.name] = None\n\n @_maybe_annotated\n def visit_IODeclaration(self, node: ast.IODeclaration) -> None:\n \"\"\"IO Declaration should be resolved\"\"\"\n raise self.compile_out(node)\n\n @_maybe_annotated\n def visit_ConstantDeclaration(self, node: ast.ConstantDeclaration) -> None:\n \"\"\"Saves constant declaration to activation record\"\"\"\n activation_record = self.call_stack.peek()\n activation_record[node.identifier.name] = self.visit(node.init_expression)\n\n @_maybe_annotated\n def visit_CalibrationDefinition(self, node: ast.CalibrationDefinition) -> None:\n \"\"\"\n CalibrationDefinition (defcal) node visitor:\n Saves defcal defintions to self.defcal_nodes dictionary with a\n mangled name. These mangled names are also saved to a list of\n defcal names (self.defcal_names)\n\n Args:\n node (ast.CalibrationDefinition): defcal node to visit\n \"\"\"\n\n mangled_name = Mangler(node).signature().mangle()\n self.defcal_names.append(mangled_name)\n self.defcal_nodes[mangled_name] = node\n\n @_maybe_annotated\n def visit_CalibrationStatement(self, node: ast.CalibrationStatement) -> None:\n \"\"\"\n CalibrationStatement node visitor:\n Evaluates each line in a calibration block. Updates the\n self.calibration_scope dictionary which maintains a\n dictionary of values/variables in calibration scope.\n\n Args:\n node (ast.CalibrationStatement): openQASM CalibrationStatement AST node\n \"\"\"\n curr_nesting = self.call_stack.peek().nesting_level\n outer_activation_record = ActivationRecord(\n name=\"outer_calibration\",\n ar_type=ARType.CALIBRATION,\n nesting_level=curr_nesting + 1,\n )\n outer_activation_record.members = self.calibration_scope\n with self.ar_context_manager(outer_activation_record):\n inner_activation_record = ActivationRecord(\n name=\"new_calibration\",\n ar_type=ARType.CALIBRATION,\n nesting_level=curr_nesting + 2,\n )\n with self.ar_context_manager(inner_activation_record):\n for statement in node.body:\n self.visit(statement)\n self.calibration_scope.update(self.call_stack.peek().members)\n\n def visit_QuantumArgument(self, node: ast.QuantumArgument) -> None:\n \"\"\"Raises error\"\"\"\n self.visit(node.name)\n\n @_maybe_annotated\n def visit_BreakStatement(self, node: ast.BreakStatement) -> None:\n \"\"\"Raises error\"\"\"\n raise NotImplementedError\n\n @_maybe_annotated\n def visit_ContinueStatement(self, node: ast.ContinueStatement) -> None:\n \"\"\"Raises error\"\"\"\n raise NotImplementedError\n\n @_maybe_annotated\n def visit_EndStatement(self, node: ast.EndStatement) -> None:\n \"\"\"Raises error\"\"\"\n raise NotImplementedError\n\n @_maybe_annotated\n def visit_WhileLoop(self, node: ast.WhileLoop) -> None:\n \"\"\"\n WhileLoop node visitor:\n Prints out a while loop in SEQC format (which happens to be identical to\n openQASM format) All the statements in the block of the while loop are\n visited\n\n Example:\n qasm:\n while (int i < 10) {...; i=i+1;}\n ->\n seqc:\n while (cvar i < 10) {...; i=i+1;}\n\n Args:\n node (ast.WhileLoop): openQASM WhileLoop AST node\n context (PrinterState): state of the printer (e.g. indentation)\n \"\"\"\n if not self.visit_loops:\n return\n activation_record = ActivationRecord(\n name=\"while loop\",\n ar_type=ARType.LOOP,\n nesting_level=self.call_stack.nesting_level + 1,\n )\n with self.ar_context_manager(activation_record):\n # todo break if while_condition is just True (i.e. infiinite loop)\n while self.visit(node.while_condition):\n for statement in node.block:\n self.visit(statement)\n\n @_maybe_annotated\n def visit_ForInLoop(self, node: ast.ForInLoop) -> None:\n \"\"\"\n ForInLoop node visitor:\n Evaluates iteration range of for loop and then evaluates the body of the\n for loop for each iteration.\n Args:\n node (ast.ForInLoop): openQASM ForInLoop AST node\n\n Raises:\n Error: ErrorCode.UNHANDLED\n If the SET iterated over by the ForInLoop is incorrectly defined or not\n created using a RangeDefinition\n \"\"\"\n if not self.visit_loops:\n return\n name = node.identifier.name\n activation_record = ActivationRecord(\n name=f\"for_loop_{self.call_stack.nesting_level+1}\",\n ar_type=ARType.LOOP,\n nesting_level=self.call_stack.nesting_level + 1,\n )\n with self.ar_context_manager(activation_record):\n start, end, step = self.visit(node.set_declaration)\n if end is None:\n raise Error(\n ErrorCode.UNHANDLED,\n f\"unsupported set declaration in for loop: {node.set_declaration}\",\n )\n activation_record = self.call_stack.peek()\n activation_record[name] = start\n for i in range(start, end, step):\n activation_record[name] = i\n for statement in node.block:\n self.visit(statement)\n\n def visit_DelayInstruction(self, node: ast.DelayInstruction) -> None:\n \"\"\"Passes over delay instructions\"\"\"\n\n def visit_DurationOf(self, node: ast.DurationOf) -> None:\n \"\"\"DurationOf function not implemented\"\"\"\n raise self.compile_out(node)\n\n def visit_SizeOf(self, node: ast.SizeOf) -> None:\n \"\"\"SizeOf function not implemented\"\"\"\n raise self.compile_out(node)\n\n @_maybe_annotated\n def visit_AliasStatement(self, node: ast.AliasStatement) -> None:\n \"\"\"Saves alias statement to activation record, including name and value\"\"\"\n match node:\n case ast.AliasStatement(target=ast.Identifier(), value=ast.Concatenation()):\n activation_record = self.call_stack.peek()\n activation_record[node.target.name] = self.visit(node.value)\n case ast.AliasStatement(\n ast.Identifier(alias),\n ast.IndexExpression(ast.Identifier(name), [ast.RangeDefinition()]),\n ):\n start, end, step = self.visit_RangeDefinition(node.value.index[0])\n activation_record = self.call_stack.peek()\n activation_record[alias] = self.call_stack.down_stack(name)[name][\n start:end:step\n ]\n case _:\n raise self.compile_out(node)\n\n def _visit_IndexElement(self, node: IndexElement) -> None:\n match node:\n case ast.DiscreteSet():\n return self.visit(node)\n case list():\n return [self.visit(index) for index in node]\n\n @_maybe_annotated\n def visit_ClassicalAssignment(self, node: ast.ClassicalAssignment) -> None:\n \"\"\"Evaluate and save classical assignment to activation record\"\"\"\n match node:\n case ast.ClassicalAssignment(lvalue=ast.Identifier()):\n activation_record = self.call_stack.down_stack(node.lvalue.name)\n activation_record[node.lvalue.name] = self.visit(node.rvalue)\n case ast.ClassicalAssignment(lvalue=ast.IndexedIdentifier()):\n activation_record = self.call_stack.down_stack(node.lvalue.name.name)\n indices = [\n self._visit_IndexElement(index) for index in node.lvalue.indices\n ]\n activation_record[node.lvalue.name.name][indices] = self.visit(\n node.rvalue\n )\n case _:\n raise Error(\n ErrorCode.UNHANDLED, f\"unhandled classical assignment: {node}\"\n )\n\n def evaluate_function(self, func_name: str, arg_vals: list):\n \"\"\"Helper function to evaluate subroutine calls. Either from external\n functional definitions or from subroutines defined in the program.\n Adds arguments to the activation record and evaluates the body of the\n subroutine.\"\"\"\n if func_name in self.external_funcs:\n return self.external_funcs[func_name](*arg_vals)\n if func_name in self.subroutines:\n activation_record = self.call_stack.peek()\n node = self.subroutines[func_name]\n for arg, val in zip(node.arguments, arg_vals):\n activation_record[arg.name.name] = val\n for statement in node.body:\n if isinstance(statement, ast.ReturnStatement):\n return self.visit(statement)\n self.visit(statement)\n raise Error(ErrorCode.UNHANDLED, f\"function {func_name} not found\")\n\n def visit_FunctionCall(self, node: ast.FunctionCall) -> None:\n \"\"\"\n FunctionCall node visitor:\n Evaluates function calls. Either from external functional definitions\n or from subroutines defined in the program.\n\n Args:\n node (ast.FunctionCall): openQASM FunctionCall AST node\n \"\"\"\n curr_nesting = self.call_stack.peek().nesting_level\n activation_record = ActivationRecord(\n name=f\"{node.name.name}\",\n ar_type=ARType.SUBROUTINE,\n nesting_level=curr_nesting + 1,\n )\n with self.ar_context_manager(activation_record):\n match node:\n case ast.FunctionCall(name=ast.Identifier(\"play\")) | ast.FunctionCall(\n name=ast.Identifier(\"capture_v1\")\n ) | ast.FunctionCall(\n name=ast.Identifier(\"capture_v2\")\n ) | ast.FunctionCall(\n name=ast.Identifier(\"capture_v3\")\n ) | ast.FunctionCall(\n name=ast.Identifier(\"capture_v1_spectrum\")\n ):\n self.visit_play(node)\n case ast.FunctionCall(\n name=ast.Identifier(\"set_phase\"),\n arguments=[ast.Identifier(frame_name), _],\n ):\n frame: Frame = self.call_stack.down_stack(frame_name)[frame_name]\n phase_val = self.visit(node.arguments[1])\n phase_val = (phase_val + np.pi) % (2 * np.pi) - np.pi\n frame.set_phase(phase_val)\n case ast.FunctionCall(\n name=ast.Identifier(\"shift_phase\"),\n arguments=[ast.Identifier(frame_name), _],\n ):\n frame: Frame = self.call_stack.down_stack(frame_name)[frame_name]\n phase_val = self.visit(node.arguments[1]) + frame.phase\n phase_val = (phase_val + np.pi) % (2 * np.pi) - np.pi\n frame.set_phase(phase_val)\n case ast.FunctionCall(\n name=ast.Identifier(\"set_frequency\"),\n arguments=[ast.Identifier(frame_name), _],\n ):\n frame: Frame = self.call_stack.down_stack(frame_name)[frame_name]\n frame.set_frequency(self.visit(node.arguments[1]))\n case ast.FunctionCall(\n name=ast.Identifier(\"shift_frequency\"),\n arguments=[ast.Identifier(frame_name), _],\n ):\n frame: Frame = self.call_stack.down_stack(frame_name)[frame_name]\n frame.shift_frequency(self.visit(node.arguments[1]))\n case ast.FunctionCall(name=ast.Identifier(\"executeTableEntry\")):\n pass\n case ast.FunctionCall(name=ast.Identifier(\"assignWaveIndex\")):\n pass\n case _:\n args = [self.visit(arg) for arg in node.arguments]\n return_val = self.evaluate_function(node.name.name, args)\n return return_val\n\n def visit_play(self, node: ast.FunctionCall) -> None:\n \"\"\"Passes over visit_play function (see PulseVisualizer)\"\"\"\n\n @contextmanager\n def ar_context_manager(\n self,\n activation_record: ActivationRecord,\n ):\n \"\"\"\n Context manager for activation records / call stack,\n the activation record tracks ports and frames declared in the program\n to make sure frames can be replaced with appropriate channels\n\n Args:\n activation_record (ActivationRecord): activation record to activate\n \"\"\"\n self.call_stack.push(activation_record)\n LOGGER.debug(\"ENTER: ACTIVATION RECORD %s\", activation_record.name)\n LOGGER.debug(self.call_stack)\n try:\n yield\n finally:\n LOGGER.debug(\"LEAVE: ACTIVATION RECORD %s\", activation_record.name)\n LOGGER.debug(self.call_stack)\n self.call_stack.pop()\n\n def compile_out(self, node: ast.QASMNode) -> Error:\n \"\"\"\n Method for standartizing raising errors when Intepreter is asked to visit\n nodes that should be compiled out of the AST before the Interpreter is used.\n\n Args:\n node (ast.QASMNode):\n Should have been removed from the AST by prior compilation steps\n\n Returns:\n Error: should be raised immediately after this method returns\n \"\"\"\n return Error(ErrorCode.COMPILE_OUT, f\"{node}\")"
}
] | from contextlib import contextmanager
from openpulse import ast
from openpulse.printer import dumps
from ..call_stack import ActivationRecord, ARType
from ..compiler_error import Error, ErrorCode
from ..setup.internal import SetupInternal
from .interpreter import Interpreter
import numpy as np | 8,836 | """Check that waveforms meet ZI timing constraints"""
class TimingConstraints(Interpreter):
"""
Analyzes the waveforms played or captured in the program to make sure they meet
the timing constraints of the ZI hardware.
Args:
minimum_length (int | None):
minimum length of the waveform in samples (default: 32)
granularity (int | None):
granularity of the waveform in samples (default: 16)
"""
def __init__(
self,
setup: SetupInternal = None,
external_funcs: dict = None,
minimum_length: int = 32,
granularity: int = 16,
) -> None:
self.minimum_length = minimum_length
self.granularity = granularity
self.flagged_wfs = []
super().__init__(setup=setup, external_funcs=external_funcs)
def check_timing_constraints(self, node, delay_flag=False) -> tuple[bool, int]:
"""
Checks the timing constraints of a waveform
Args:
node
can be various types
Returns:
bool: True if the waveform meets the timing constraints
int: length of the waveform
"""
dur_val = self.visit(node)
if isinstance(dur_val, np.ndarray):
dur_val = len(dur_val)
elif dur_val is None: # should occur during ExecuteTableEntry
return True, -1
return (
dur_val >= self.minimum_length and dur_val % self.granularity == 0
), dur_val
def visit_Program(self, node: ast.Program) -> None:
activation_record = ActivationRecord(
name="main", ar_type=ARType.PROGRAM, nesting_level=1
)
for extern in self.external_funcs:
activation_record[extern] = "external"
with self.ar_context_manager(activation_record):
for statement in node.statements:
self.visit(statement)
if self.flagged_wfs:
total_message = self.construct_warning_message()
| """Check that waveforms meet ZI timing constraints"""
class TimingConstraints(Interpreter):
"""
Analyzes the waveforms played or captured in the program to make sure they meet
the timing constraints of the ZI hardware.
Args:
minimum_length (int | None):
minimum length of the waveform in samples (default: 32)
granularity (int | None):
granularity of the waveform in samples (default: 16)
"""
def __init__(
self,
setup: SetupInternal = None,
external_funcs: dict = None,
minimum_length: int = 32,
granularity: int = 16,
) -> None:
self.minimum_length = minimum_length
self.granularity = granularity
self.flagged_wfs = []
super().__init__(setup=setup, external_funcs=external_funcs)
def check_timing_constraints(self, node, delay_flag=False) -> tuple[bool, int]:
"""
Checks the timing constraints of a waveform
Args:
node
can be various types
Returns:
bool: True if the waveform meets the timing constraints
int: length of the waveform
"""
dur_val = self.visit(node)
if isinstance(dur_val, np.ndarray):
dur_val = len(dur_val)
elif dur_val is None: # should occur during ExecuteTableEntry
return True, -1
return (
dur_val >= self.minimum_length and dur_val % self.granularity == 0
), dur_val
def visit_Program(self, node: ast.Program) -> None:
activation_record = ActivationRecord(
name="main", ar_type=ARType.PROGRAM, nesting_level=1
)
for extern in self.external_funcs:
activation_record[extern] = "external"
with self.ar_context_manager(activation_record):
for statement in node.statements:
self.visit(statement)
if self.flagged_wfs:
total_message = self.construct_warning_message() | raise Error( | 2 | 2023-11-16 17:37:29+00:00 | 12k |
KevinXu02/ControlledDreamGaussian | frankmocap/bodymocap/body_mocap_api.py | [
{
"identifier": "hmr",
"path": "frankmocap/bodymocap/models/hmr.py",
"snippet": "def hmr(smpl_mean_params, pretrained=True, **kwargs):\n \"\"\" Constructs an HMR model with ResNet50 backbone.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = HMR(Bottleneck, [3, 4, 6, 3], smpl_mean_params, **kwargs)\n if pretrained:\n resnet_imagenet = resnet.resnet50(pretrained=True)\n model.load_state_dict(resnet_imagenet.state_dict(),strict=False)\n return model"
},
{
"identifier": "SMPL",
"path": "frankmocap/bodymocap/models/smpl.py",
"snippet": "class SMPL(_SMPL):\n \"\"\" Extension of the official SMPL implementation to support more joints \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SMPL, self).__init__(*args, **kwargs)\n joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES]\n JOINT_REGRESSOR_TRAIN_EXTRA = './frankmocap/extra_data/body_module/data_from_spin//J_regressor_extra.npy'\n J_regressor_extra = np.load(JOINT_REGRESSOR_TRAIN_EXTRA)\n self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))\n self.joint_map = torch.tensor(joints, dtype=torch.long)\n\n def forward(self, *args, **kwargs):\n kwargs['get_skin'] = True\n smpl_output = super(SMPL, self).forward(*args, **kwargs)\n extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices) #Additional 9 joints #Check doc/J_regressor_extra.png\n joints = torch.cat([smpl_output.joints, extra_joints], dim=1) #[N, 24 + 21, 3] + [N, 9, 3]\n joints = joints[:, self.joint_map, :]\n output = ModelOutput(vertices=smpl_output.vertices,\n global_orient=smpl_output.global_orient,\n body_pose=smpl_output.body_pose,\n joints=joints,\n betas=smpl_output.betas,\n full_pose=smpl_output.full_pose)\n return output"
},
{
"identifier": "SMPLX",
"path": "frankmocap/bodymocap/models/smpl.py",
"snippet": "class SMPLX(_SMPLX):\n \"\"\" Extension of the official SMPL implementation to support more joints \"\"\"\n\n def __init__(self, *args, **kwargs):\n kwargs['ext'] = 'pkl' #We have pkl file\n super(SMPLX, self).__init__(*args, **kwargs)\n joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES]\n JOINT_REGRESSOR_TRAIN_EXTRA_SMPLX = 'extra_data/body_module/J_regressor_extra_smplx.npy'\n J_regressor_extra = np.load(JOINT_REGRESSOR_TRAIN_EXTRA_SMPLX) #(9, 10475)\n self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))\n self.joint_map = torch.tensor(joints, dtype=torch.long)\n\n def forward(self, *args, **kwargs):\n kwargs['get_skin'] = True\n\n #if pose parameter is for SMPL with 21 joints (ignoring root)\n if(kwargs['body_pose'].shape[1]==69):\n kwargs['body_pose'] = kwargs['body_pose'][:,:-2*3] #Ignore the last two joints (which are on the palm. Not used)\n\n if(kwargs['body_pose'].shape[1]==23):\n kwargs['body_pose'] = kwargs['body_pose'][:,:-2] #Ignore the last two joints (which are on the palm. Not used)\n\n smpl_output = super(SMPLX, self).forward(*args, **kwargs)\n extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices)\n # extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices[:,:6890]) *0 #TODO: implement this correctly\n\n #SMPL-X Joint order: https://docs.google.com/spreadsheets/d/1_1dLdaX-sbMkCKr_JzJW_RZCpwBwd7rcKkWT_VgAQ_0/edit#gid=0\n smplx_to_smpl = list(range(0,22)) + [28,43] + list(range(55,76)) # 28 left middle finger , 43: right middle finger 1\n smpl_joints = smpl_output.joints[:,smplx_to_smpl,:] # Convert SMPL-X to SMPL 127 ->45\n joints = torch.cat([smpl_joints, extra_joints], dim=1) # [N, 127, 3]->[N, 45, 3] + [N, 9, 3] # SMPL-X has more joints. should convert 45\n joints = joints[:, self.joint_map, :] \n\n # Hand joints\n smplx_hand_to_panoptic = [0, 13,14,15,16, 1,2,3,17, 4,5,6,18, 10,11,12,19, 7,8,9,20] #Wrist Thumb to Pinky\n\n smplx_lhand = [20] + list(range(25,40)) + list(range(66, 71)) #20 for left wrist. 20 finger joints\n lhand_joints = smpl_output.joints[:,smplx_lhand, :] #(N,21,3)\n lhand_joints = lhand_joints[:, smplx_hand_to_panoptic, :] #Convert SMPL-X hand order to paonptic hand order\n\n smplx_rhand = [21] + list(range(40,55)) + list(range(71, 76)) #21 for right wrist. 20 finger joints\n rhand_joints = smpl_output.joints[:, smplx_rhand, :] #(N,21,3)\n rhand_joints = rhand_joints[:,smplx_hand_to_panoptic,:] #Convert SMPL-X hand order to paonptic hand order\n\n output = ModelOutput(vertices=smpl_output.vertices,\n global_orient=smpl_output.global_orient,\n body_pose=smpl_output.body_pose,\n joints=joints,\n right_hand_joints=rhand_joints, #N,21,3\n left_hand_joints=lhand_joints, #N,21,3\n betas=smpl_output.betas,\n full_pose=smpl_output.full_pose)\n return output"
},
{
"identifier": "constants",
"path": "frankmocap/bodymocap/constants.py",
"snippet": "FOCAL_LENGTH = 5000.\nIMG_RES = 224\nIMG_NORM_MEAN = [0.485, 0.456, 0.406]\nIMG_NORM_STD = [0.229, 0.224, 0.225]\nJOINT_NAMES = [\n'OP Nose', 'OP Neck', 'OP RShoulder', #0,1,2\n'OP RElbow', 'OP RWrist', 'OP LShoulder', #3,4,5\n'OP LElbow', 'OP LWrist', 'OP MidHip', #6, 7,8\n'OP RHip', 'OP RKnee', 'OP RAnkle', #9,10,11\n'OP LHip', 'OP LKnee', 'OP LAnkle', #12,13,14\n'OP REye', 'OP LEye', 'OP REar', #15,16,17\n'OP LEar', 'OP LBigToe', 'OP LSmallToe', #18,19,20\n'OP LHeel', 'OP RBigToe', 'OP RSmallToe', 'OP RHeel', #21, 22, 23, 24 ##Total 25 joints for openpose\n'Right Ankle', 'Right Knee', 'Right Hip', #0,1,2\n'Left Hip', 'Left Knee', 'Left Ankle', #3, 4, 5\n'Right Wrist', 'Right Elbow', 'Right Shoulder', #6\n'Left Shoulder', 'Left Elbow', 'Left Wrist', #9\n'Neck (LSP)', 'Top of Head (LSP)', #12, 13\n'Pelvis (MPII)', 'Thorax (MPII)', #14, 15\n'Spine (H36M)', 'Jaw (H36M)', #16, 17\n'Head (H36M)', 'Nose', 'Left Eye', #18, 19, 20\n'Right Eye', 'Left Ear', 'Right Ear' #21,22,23 (Total 24 joints)\n]\nJOINT_IDS = {JOINT_NAMES[i]: i for i in range(len(JOINT_NAMES))}\nJOINT_MAP = {\n'OP Nose': 24, 'OP Neck': 12, 'OP RShoulder': 17,\n'OP RElbow': 19, 'OP RWrist': 21, 'OP LShoulder': 16,\n'OP LElbow': 18, 'OP LWrist': 20, 'OP MidHip': 0,\n'OP RHip': 2, 'OP RKnee': 5, 'OP RAnkle': 8,\n'OP LHip': 1, 'OP LKnee': 4, 'OP LAnkle': 7,\n'OP REye': 25, 'OP LEye': 26, 'OP REar': 27,\n'OP LEar': 28, 'OP LBigToe': 29, 'OP LSmallToe': 30,\n'OP LHeel': 31, 'OP RBigToe': 32, 'OP RSmallToe': 33, 'OP RHeel': 34,\n'Right Ankle': 8, 'Right Knee': 5, 'Right Hip': 45,\n'Left Hip': 46, 'Left Knee': 4, 'Left Ankle': 7,\n'Right Wrist': 21, 'Right Elbow': 19, 'Right Shoulder': 17,\n'Left Shoulder': 16, 'Left Elbow': 18, 'Left Wrist': 20,\n'Neck (LSP)': 47, 'Top of Head (LSP)': 48,\n'Pelvis (MPII)': 49, 'Thorax (MPII)': 50,\n'Spine (H36M)': 51, 'Jaw (H36M)': 52,\n'Head (H36M)': 53, 'Nose': 24, 'Left Eye': 26,\n'Right Eye': 25, 'Left Ear': 28, 'Right Ear': 27\n}\nH36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]\nH36M_TO_J14 = H36M_TO_J17[:14]\nJ24_TO_J17 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18, 14, 16, 17]\nJ24_TO_J14 = J24_TO_J17[:14]\nSMPL_JOINTS_FLIP_PERM = [0, 2, 1, 3, 5, 4, 6, 8, 7, 9, 11, 10, 12, 14, 13, 15, 17, 16, 19, 18, 21, 20, 23, 22]\nSMPL_POSE_FLIP_PERM = []\nJ24_FLIP_PERM = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18, 19, 21, 20, 23, 22]\nJ49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\\\n + [25+i for i in J24_FLIP_PERM]"
},
{
"identifier": "crop",
"path": "frankmocap/bodymocap/utils/imutils.py",
"snippet": "def crop(img, center, scale, res, rot=0):\n \"\"\"Crop image according to the supplied bounding box.\"\"\"\n # Upper left point\n ul = np.array(transform([1, 1], center, scale, res, invert=1))-1\n # Bottom right point\n br = np.array(transform([res[0]+1,\n res[1]+1], center, scale, res, invert=1))-1\n\n # Padding so that when rotated proper amount of context is included\n pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)\n if not rot == 0:\n ul -= pad\n br += pad\n\n new_shape = [br[1] - ul[1], br[0] - ul[0]]\n if new_shape[0]>15000 or new_shape[1]>15000:\n print(\"Image Size Too Big! scale{}, new_shape{} br{}, ul{}\".format(scale, new_shape, br, ul))\n return None\n\n\n if len(img.shape) > 2:\n new_shape += [img.shape[2]]\n\n\n new_img = np.zeros(new_shape, dtype=np.uint8)\n\n # #Compute bbox for Han's format\n # bboxScale_o2n = 224/new_img.shape[0]\n # bboxTopLeft = ul *bboxScale_o2n\n\n\n # Range to fill new array\n new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]\n new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]\n # Range to sample from original image\n old_x = max(0, ul[0]), min(len(img[0]), br[0])\n old_y = max(0, ul[1]), min(len(img), br[1])\n # print(\"{} vs {} || {} vs {}\".format(new_y[1] - new_y[0] , old_y[1] - old_y[0], new_x[1] - new_x[0], old_x[1] -old_x[0] ) )\n if new_y[1] - new_y[0] != old_y[1] - old_y[0] or new_x[1] - new_x[0] != old_x[1] -old_x[0] or new_y[1] - new_y[0] <0 or new_x[1] - new_x[0] <0:\n print(\"Warning: maybe person is out of image boundary!\")\n return None\n new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1],\n old_x[0]:old_x[1]]\n\n if not rot == 0:\n # Remove padding\n new_img = scipy.misc.imrotate(new_img, rot)\n new_img = new_img[pad:-pad, pad:-pad]\n\n new_img = cv2.resize(new_img, tuple(res))\n # new_img = scipy.misc.imresize(new_img, res) #Need this to get the same number with the old model (trained with this resize)\n\n return new_img#, bboxScale_o2n, bboxTopLeft"
},
{
"identifier": "crop_bboxInfo",
"path": "frankmocap/bodymocap/utils/imutils.py",
"snippet": "def crop_bboxInfo(img, center, scale, res =(224,224)):\n \"\"\"Crop image according to the supplied bounding box.\"\"\"\n # Upper left point\n ul = np.array(transform([1, 1], center, scale, res, invert=1))-1\n # Bottom right point\n br = np.array(transform([res[0]+1,\n res[1]+1], center, scale, res, invert=1))-1\n\n\n # Padding so that when rotated proper amount of context is included\n pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)\n\n new_shape = [br[1] - ul[1], br[0] - ul[0]]\n if len(img.shape) > 2:\n new_shape += [img.shape[2]]\n # new_img = np.zeros(new_shape)\n if new_shape[0] <1 or new_shape[1] <1:\n return None, None, None\n new_img = np.zeros(new_shape, dtype=np.uint8)\n\n if new_img.shape[0] ==0:\n return None, None, None\n\n #Compute bbox for Han's format\n bboxScale_o2n = res[0]/new_img.shape[0] #224/ 531\n\n # Range to fill new array\n new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]\n new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]\n # Range to sample from original image\n old_x = max(0, ul[0]), min(len(img[0]), br[0])\n old_y = max(0, ul[1]), min(len(img), br[1])\n\n if new_y[0] <0 or new_y[1]<0 or new_x[0] <0 or new_x[1]<0 :\n return None, None, None\n\n new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1],\n old_x[0]:old_x[1]]\n\n bboxTopLeft_inOriginal = (ul[0], ul[1] )\n\n if new_img.shape[0] <20 or new_img.shape[1]<20:\n return None, None, None\n # print(bboxTopLeft_inOriginal)\n # from renderer import viewer2D\n # viewer2D.ImShow(new_img.astype(np.uint8),name='cropped')\n\n new_img = cv2.resize(new_img, res)\n\n # viewer2D.ImShow(new_img.astype(np.uint8),name='original')\n\n return new_img, bboxScale_o2n, np.array(bboxTopLeft_inOriginal)"
},
{
"identifier": "process_image_bbox",
"path": "frankmocap/bodymocap/utils/imutils.py",
"snippet": "def process_image_bbox(img_original, bbox_XYWH, input_res=224):\n \"\"\"Read image, do preprocessing and possibly crop it according to the bounding box.\n If there are bounding box annotations, use them to crop the image.\n If no bounding box is specified but openpose detections are available, use them to get the bounding box.\n \"\"\"\n normalize_img = Normalize(mean=constants.IMG_NORM_MEAN, std=constants.IMG_NORM_STD)\n img_original = img_original[:,:,::-1].copy() # PyTorch does not support negative stride at the moment\n img = img_original.copy()\n\n center, scale = bbox_from_bbr(bbox_XYWH, imageHeight = img.shape[0])\n if center is None:\n return None, None, None, None, None\n\n img, boxScale_o2n, bboxTopLeft = crop_bboxInfo(img, center, scale, (input_res, input_res))\n\n # viewer2D.ImShow(img, name='cropped', waitTime=1) #224,224,3\n\n if img is None:\n return None, None, None, None, None\n\n\n # unCropped = uncrop(img, center, scale, (input_res, input_res))\n\n # if True:\n # viewer2D.ImShow(img)\n norm_img = (img.copy()).astype(np.float32) / 255.\n norm_img = torch.from_numpy(norm_img).permute(2,0,1)\n norm_img = normalize_img(norm_img.clone())[None]\n\n bboxInfo ={\"center\": center, \"scale\": scale, \"bboxXYWH\":bbox_XYWH}\n return img, norm_img, boxScale_o2n, bboxTopLeft, bboxInfo"
},
{
"identifier": "process_image_keypoints",
"path": "frankmocap/bodymocap/utils/imutils.py",
"snippet": "def process_image_keypoints(img, keypoints, input_res=224):\n \"\"\"Read image, do preprocessing and possibly crop it according to the bounding box.\n If there are bounding box annotations, use them to crop the image.\n If no bounding box is specified but openpose detections are available, use them to get the bounding box.\n \"\"\"\n normalize_img = Normalize(mean=constants.IMG_NORM_MEAN, std=constants.IMG_NORM_STD)\n img = img[:,:,::-1].copy() # PyTorch does not support negative stride at the moment\n\n center, scale, bbox = bbox_from_keypoints(keypoints, imageHeight = img.shape[0])\n if center is None:\n return None, None, None, None, None\n\n img, boxScale_o2n, bboxTopLeft = crop_bboxInfo(img, center, scale, (input_res, input_res))\n\n # viewer2D.ImShow(img, name='cropped', waitTime=1) #224,224,3\n\n\n if img is None:\n return None, None, None, None, None\n\n\n # unCropped = uncrop(img, center, scale, (input_res, input_res))\n\n # if True:\n # viewer2D.ImShow(img)\n img = img.astype(np.float32) / 255.\n img = torch.from_numpy(img).permute(2,0,1)\n norm_img = normalize_img(img.clone())[None]\n # return img, norm_img, img_original, boxScale_o2n, bboxTopLeft, bbox\n bboxInfo ={\"center\": center, \"scale\": scale, \"bboxXYWH\":bbox}\n return img, norm_img, boxScale_o2n, bboxTopLeft, bboxInfo"
},
{
"identifier": "bbox_from_keypoints",
"path": "frankmocap/bodymocap/utils/imutils.py",
"snippet": "def bbox_from_keypoints(keypoints, rescale=1.2, detection_thresh=0.2, imageHeight= None):\n \"\"\"Get center and scale for bounding box from openpose detections.\"\"\"\n # with open(openpose_file, 'r') as f:\n # data = json.load(f)\n # if 'people' not in data or len(data['people'])==0:\n # return None, None\n # # keypoints = json.load(f)['people'][0]['pose_keypoints_2d']\n # keypoints = data['people'][0]['pose_keypoints_2d']\n keypoints = np.reshape(np.array(keypoints), (-1,3))\n valid = keypoints[:,-1] > detection_thresh\n\n # if g_debugUpperBodyOnly: #Intentionally remove lower bodies\n # valid[ [ 9,10,11,12,13,14, 22,23,24, 19,20,21] ] = False\n\n valid_keypoints = keypoints[valid][:,:-1] #(25,2)\n\n if len(valid_keypoints)<2:\n return None, None, None\n\n\n if False: #Should have all limbs and nose\n if np.sum(valid[ [ 2,3,4, 5,6,7, 9,10, 12,13,1,0] ]) <12:\n return None, None, None\n\n min_pt = np.min(valid_keypoints, axis=0)\n max_pt = np.max(valid_keypoints, axis=0)\n\n \n bbox= [ min_pt[0], min_pt[1], max_pt[0] - min_pt[0], max_pt[1] - min_pt[1]]\n\n\n\n # print(valid_keypoints)\n # print(valid)\n print(bbox)\n\n if imageHeight is not None:\n\n if valid[10]==False and valid[13] == False: # No knees ub ioeb\n max_pt[1] = min(max_pt[1] + (max_pt[1]- min_pt[1]), imageHeight )\n bbox= [ min_pt[0], min_pt[1], max_pt[0] - min_pt[0], max_pt[1] - min_pt[1]]\n valid_keypoints = np.vstack( (valid_keypoints, np.array(max_pt)) )\n\n\n elif valid[11]==False and valid[14] == False: #No foot\n max_pt[1] = min(max_pt[1] + (max_pt[1]- min_pt[1])*0.2, imageHeight )\n bbox= [ min_pt[0], min_pt[1], max_pt[0] - min_pt[0], max_pt[1] - min_pt[1]]\n\n valid_keypoints = np.vstack( (valid_keypoints, np.array(max_pt)) )\n\n\n center = valid_keypoints.mean(axis=0)\n bbox_size = (valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)).max()\n # adjust bounding box tightness\n scale = bbox_size / 200.0\n scale *= rescale\n return center, scale, bbox"
},
{
"identifier": "convert_smpl_to_bbox",
"path": "frankmocap/mocap_utils/coordconv.py",
"snippet": "def convert_smpl_to_bbox(data3D, scale, trans, bAppTransFirst=False):\n data3D = data3D.copy()\n resnet_input_size_half = 224 *0.5\n if bAppTransFirst: # Hand model\n data3D[:,0:2] += trans\n data3D *= scale # apply scaling\n else:\n data3D *= scale # apply scaling\n data3D[:,0:2] += trans\n \n data3D*= resnet_input_size_half # 112 is originated from hrm's input size (224,24)\n # data3D[:,:2]*= resnet_input_size_half # 112 is originated from hrm's input size (224,24)\n return data3D"
},
{
"identifier": "convert_bbox_to_oriIm",
"path": "frankmocap/mocap_utils/coordconv.py",
"snippet": "def convert_bbox_to_oriIm(data3D, boxScale_o2n, bboxTopLeft, imgSizeW, imgSizeH):\n data3D = data3D.copy()\n resnet_input_size_half = 224 *0.5\n imgSize = np.array([imgSizeW,imgSizeH])\n\n data3D /= boxScale_o2n\n\n if not isinstance(bboxTopLeft, np.ndarray):\n assert isinstance(bboxTopLeft, tuple)\n assert len(bboxTopLeft) == 2\n bboxTopLeft = np.array(bboxTopLeft)\n\n data3D[:,:2] += (bboxTopLeft + resnet_input_size_half/boxScale_o2n)\n\n return data3D"
}
] | import cv2
import sys
import torch
import numpy as np
import pickle
import frankmocap.mocap_utils.geometry_utils as gu
from torchvision.transforms import Normalize
from frankmocap.bodymocap.models import hmr, SMPL, SMPLX
from frankmocap.bodymocap import constants
from frankmocap.bodymocap.utils.imutils import crop, crop_bboxInfo, process_image_bbox, process_image_keypoints, \
bbox_from_keypoints
from frankmocap.mocap_utils.coordconv import convert_smpl_to_bbox, convert_bbox_to_oriIm | 7,441 | # Copyright (c) Facebook, Inc. and its affiliates.
class BodyMocap(object):
def __init__(self, regressor_checkpoint, smpl_dir, device=torch.device('cuda'), use_smplx=False):
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Load parametric model (SMPLX or SMPL)
if use_smplx:
smplModelPath = smpl_dir + '/SMPLX_NEUTRAL.pkl'
self.smpl = SMPLX(smpl_dir,
batch_size=1,
num_betas=10,
use_pca=False,
create_transl=False).to(self.device)
self.use_smplx = True
else:
smplModelPath = smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
self.smpl = SMPL(smplModelPath, batch_size=1, create_transl=False).to(self.device)
self.use_smplx = False
# Load pre-trained neural network
SMPL_MEAN_PARAMS = './frankmocap/extra_data/body_module/data_from_spin/smpl_mean_params.npz'
self.model_regressor = hmr(SMPL_MEAN_PARAMS).to(self.device)
checkpoint = torch.load(regressor_checkpoint)
self.model_regressor.load_state_dict(checkpoint['model'], strict=False)
self.model_regressor.eval()
def regress(self, img_original, body_bbox_list):
"""
args:
img_original: original raw image (BGR order by using cv2.imread)
body_bbox: bounding box around the target: (minX, minY, width, height)
outputs:
pred_vertices_img:
pred_joints_vis_img:
pred_rotmat
pred_betas
pred_camera
bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])
bboxTopLeft: bbox top left (redundant)
boxScale_o2n: bbox scaling factor (redundant)
"""
pred_output_list = list()
for body_bbox in body_bbox_list:
img, norm_img, boxScale_o2n, bboxTopLeft, bbox = process_image_bbox(
img_original, body_bbox, input_res=constants.IMG_RES)
bboxTopLeft = np.array(bboxTopLeft)
# bboxTopLeft = bbox['bboxXYWH'][:2]
if img is None:
pred_output_list.append(None)
continue
with torch.no_grad():
# model forward
pred_rotmat, pred_betas, pred_camera = self.model_regressor(norm_img.to(self.device))
# Convert rot_mat to aa since hands are always in aa
# pred_aa = rotmat3x3_to_angle_axis(pred_rotmat)
pred_aa = gu.rotation_matrix_to_angle_axis(pred_rotmat).cuda()
pred_aa = pred_aa.reshape(pred_aa.shape[0], 72)
# remove global rotation
pred_aa[:, :3] = 0
smpl_output = self.smpl(
betas=pred_betas,
body_pose=pred_aa[:, 3:],
global_orient=pred_aa[:, :3],
pose2rot=True)
pred_vertices = smpl_output.vertices
pred_joints_3d = smpl_output.joints
pred_vertices = pred_vertices[0].cpu().numpy()
pred_camera = pred_camera.cpu().numpy().ravel()
camScale = pred_camera[0] # *1.15
camTrans = pred_camera[1:]
pred_output = dict()
# Convert mesh to original image space (X,Y are aligned to image)
# 1. SMPL -> 2D bbox
# 2. 2D bbox -> original 2D image
| # Copyright (c) Facebook, Inc. and its affiliates.
class BodyMocap(object):
def __init__(self, regressor_checkpoint, smpl_dir, device=torch.device('cuda'), use_smplx=False):
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Load parametric model (SMPLX or SMPL)
if use_smplx:
smplModelPath = smpl_dir + '/SMPLX_NEUTRAL.pkl'
self.smpl = SMPLX(smpl_dir,
batch_size=1,
num_betas=10,
use_pca=False,
create_transl=False).to(self.device)
self.use_smplx = True
else:
smplModelPath = smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
self.smpl = SMPL(smplModelPath, batch_size=1, create_transl=False).to(self.device)
self.use_smplx = False
# Load pre-trained neural network
SMPL_MEAN_PARAMS = './frankmocap/extra_data/body_module/data_from_spin/smpl_mean_params.npz'
self.model_regressor = hmr(SMPL_MEAN_PARAMS).to(self.device)
checkpoint = torch.load(regressor_checkpoint)
self.model_regressor.load_state_dict(checkpoint['model'], strict=False)
self.model_regressor.eval()
def regress(self, img_original, body_bbox_list):
"""
args:
img_original: original raw image (BGR order by using cv2.imread)
body_bbox: bounding box around the target: (minX, minY, width, height)
outputs:
pred_vertices_img:
pred_joints_vis_img:
pred_rotmat
pred_betas
pred_camera
bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])
bboxTopLeft: bbox top left (redundant)
boxScale_o2n: bbox scaling factor (redundant)
"""
pred_output_list = list()
for body_bbox in body_bbox_list:
img, norm_img, boxScale_o2n, bboxTopLeft, bbox = process_image_bbox(
img_original, body_bbox, input_res=constants.IMG_RES)
bboxTopLeft = np.array(bboxTopLeft)
# bboxTopLeft = bbox['bboxXYWH'][:2]
if img is None:
pred_output_list.append(None)
continue
with torch.no_grad():
# model forward
pred_rotmat, pred_betas, pred_camera = self.model_regressor(norm_img.to(self.device))
# Convert rot_mat to aa since hands are always in aa
# pred_aa = rotmat3x3_to_angle_axis(pred_rotmat)
pred_aa = gu.rotation_matrix_to_angle_axis(pred_rotmat).cuda()
pred_aa = pred_aa.reshape(pred_aa.shape[0], 72)
# remove global rotation
pred_aa[:, :3] = 0
smpl_output = self.smpl(
betas=pred_betas,
body_pose=pred_aa[:, 3:],
global_orient=pred_aa[:, :3],
pose2rot=True)
pred_vertices = smpl_output.vertices
pred_joints_3d = smpl_output.joints
pred_vertices = pred_vertices[0].cpu().numpy()
pred_camera = pred_camera.cpu().numpy().ravel()
camScale = pred_camera[0] # *1.15
camTrans = pred_camera[1:]
pred_output = dict()
# Convert mesh to original image space (X,Y are aligned to image)
# 1. SMPL -> 2D bbox
# 2. 2D bbox -> original 2D image | pred_vertices_bbox = convert_smpl_to_bbox(pred_vertices, camScale, camTrans) | 9 | 2023-11-17 05:21:26+00:00 | 12k |
dazhangyu123/OCL | train_source.py | [
{
"identifier": "Eval",
"path": "utils/eval.py",
"snippet": "class Eval():\n def __init__(self, num_class):\n self.num_class = num_class\n self.confusion_matrix = np.zeros((self.num_class,)*2)\n self.ignore_index = None\n self.synthia = True if num_class == 16 else False\n\n\n def Pixel_Accuracy(self):\n if np.sum(self.confusion_matrix) == 0:\n print(\"Attention: pixel_total is zero!!!\")\n PA = 0\n else:\n PA = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()\n\n return PA\n\n def Mean_Pixel_Accuracy(self, out_16_13=False):\n MPA = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)\n if self.synthia:\n MPA_16 = np.nanmean(MPA[:self.ignore_index])\n MPA_13 = np.nanmean(MPA[synthia_set_16_to_13])\n return MPA_16, MPA_13\n if out_16_13:\n MPA_16 = np.nanmean(MPA[synthia_set_16])\n MPA_13 = np.nanmean(MPA[synthia_set_13])\n return MPA_16, MPA_13\n MPA = np.nanmean(MPA[:self.ignore_index])\n\n return MPA\n\n def Mean_Intersection_over_Union(self, out_16_13=False):\n MIoU = np.diag(self.confusion_matrix) / (\n np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n np.diag(self.confusion_matrix))\n if self.synthia:\n MIoU_16 = np.nanmean(MIoU[:self.ignore_index])\n MIoU_13 = np.nanmean(MIoU[synthia_set_16_to_13])\n return MIoU_16, MIoU_13\n if out_16_13:\n MIoU_16 = np.nanmean(MIoU[synthia_set_16])\n MIoU_13 = np.nanmean(MIoU[synthia_set_13])\n return MIoU_16, MIoU_13\n MIoU = np.nanmean(MIoU[:self.ignore_index])\n\n return MIoU\n\n def Frequency_Weighted_Intersection_over_Union(self, out_16_13=False):\n FWIoU = np.multiply(np.sum(self.confusion_matrix, axis=1), np.diag(self.confusion_matrix))\n FWIoU = FWIoU / (np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n np.diag(self.confusion_matrix))\n if self.synthia:\n FWIoU_16 = np.sum(i for i in FWIoU if not np.isnan(i)) / np.sum(self.confusion_matrix)\n FWIoU_13 = np.sum(i for i in FWIoU[synthia_set_16_to_13] if not np.isnan(i)) / np.sum(self.confusion_matrix)\n return FWIoU_16, FWIoU_13\n if out_16_13:\n FWIoU_16 = np.sum(i for i in FWIoU[synthia_set_16] if not np.isnan(i)) / np.sum(self.confusion_matrix)\n FWIoU_13 = np.sum(i for i in FWIoU[synthia_set_13] if not np.isnan(i)) / np.sum(self.confusion_matrix)\n return FWIoU_16, FWIoU_13\n FWIoU = sum(i for i in FWIoU if not np.isnan(i)) / np.sum(self.confusion_matrix)\n\n return FWIoU\n\n def Mean_Precision(self, out_16_13=False):\n Precision = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=0)\n if self.synthia:\n Precision_16 = np.nanmean(Precision[:self.ignore_index])\n Precision_13 = np.nanmean(Precision[synthia_set_16_to_13])\n return Precision_16, Precision_13\n if out_16_13:\n Precision_16 = np.nanmean(Precision[synthia_set_16])\n Precision_13 = np.nanmean(Precision[synthia_set_13])\n return Precision_16, Precision_13\n Precision = np.nanmean(Precision[:self.ignore_index])\n return Precision\n \n def Print_Every_class_Eval(self, out_16_13=False):\n MIoU = np.diag(self.confusion_matrix) / (\n np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n np.diag(self.confusion_matrix))\n MPA = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)\n Precision = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=0)\n Class_ratio = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)\n Pred_retio = np.sum(self.confusion_matrix, axis=0) / np.sum(self.confusion_matrix)\n print('===>Everyclass:\\t' + 'MPA\\t' + 'MIoU\\t' + 'PC\\t' + 'Ratio\\t' + 'Pred_Retio')\n if out_16_13: MIoU = MIoU[synthia_set_16]\n for ind_class in range(len(MIoU)):\n pa = str(round(MPA[ind_class] * 100, 2)) if not np.isnan(MPA[ind_class]) else 'nan'\n iou = str(round(MIoU[ind_class] * 100, 2)) if not np.isnan(MIoU[ind_class]) else 'nan'\n pc = str(round(Precision[ind_class] * 100, 2)) if not np.isnan(Precision[ind_class]) else 'nan'\n cr = str(round(Class_ratio[ind_class] * 100, 2)) if not np.isnan(Class_ratio[ind_class]) else 'nan'\n pr = str(round(Pred_retio[ind_class] * 100, 2)) if not np.isnan(Pred_retio[ind_class]) else 'nan'\n print('===>' + name_classes[ind_class] + ':\\t' + pa + '\\t' + iou + '\\t' + pc + '\\t' + cr + '\\t' + pr)\n\n def Get_class_ratio(self):\n MIoU = np.diag(self.confusion_matrix) / (\n np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n np.diag(self.confusion_matrix))\n Class_ratio = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)\n Pred_retio = np.sum(self.confusion_matrix, axis=0) / np.sum(self.confusion_matrix)\n return MIoU, Class_ratio, Pred_retio\n\n # generate confusion matrix\n def __generate_matrix(self, gt_image, pre_image):\n\n mask = (gt_image >= 0) & (gt_image < self.num_class)\n label = self.num_class * gt_image[mask].astype('int') + pre_image[mask]\n count = np.bincount(label, minlength=self.num_class**2)\n confusion_matrix = count.reshape(self.num_class, self.num_class)\n return confusion_matrix\n\n def add_batch(self, gt_image, pre_image):\n # assert the size of two images are same\n assert gt_image.shape == pre_image.shape\n\n self.confusion_matrix += self.__generate_matrix(gt_image, pre_image)\n\n def reset(self):\n self.confusion_matrix = np.zeros((self.num_class,) * 2)"
},
{
"identifier": "get_model",
"path": "utils/train_helper.py",
"snippet": "def get_model(args):\n if args.backbone == \"deeplabv2_multi\":\n model = DeeplabMulti(num_classes=args.num_classes,\n pretrained=args.imagenet_pretrained)\n params = model.optim_parameters(args)\n args.numpy_transform = True\n return model, params"
},
{
"identifier": "City_Dataset",
"path": "datasets/cityscapes_Dataset.py",
"snippet": "class City_Dataset(data.Dataset):\n def __init__(self,\n args,\n data_root_path='/data/zyl/dataset/cityscapes',\n list_path=os.path.abspath('./datasets/city_list'),\n split='train',\n base_size=769,\n crop_size=769,\n training=True,\n class_16=False,\n class_13=False):\n \"\"\"\n\n :param root_path:\n :param dataset:\n :param base_size:\n :param is_trainging:\n :param transforms:\n \"\"\"\n self.args = args\n self.data_path=data_root_path\n self.list_path=list_path\n self.split=split\n self.base_size=base_size\n self.crop_size=crop_size\n\n self.base_size = self.base_size if isinstance(self.base_size, tuple) else (self.base_size, self.base_size)\n self.crop_size = self.crop_size if isinstance(self.crop_size, tuple) else (self.crop_size, self.crop_size)\n self.training = training\n\n self.random_mirror = args.random_mirror\n self.random_crop = args.random_crop\n self.resize = args.resize\n self.gaussian_blur = args.gaussian_blur\n\n item_list_filepath = os.path.join(self.list_path, self.split+\".txt\")\n if not os.path.exists(item_list_filepath):\n raise Warning(\"split must be train/val/trainval\")\n\n self.image_filepath = os.path.join(self.data_path, \"leftImg8bit\")\n\n self.gt_filepath = os.path.join(self.data_path, \"gtFine\")\n\n self.items = [id.strip() for id in open(item_list_filepath)]\n\n ignore_label = -1\n self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,\n 3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,\n 7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,\n 14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,\n 18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,\n 28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}\n # In SYNTHIA-to-Cityscapes case, only consider 16 shared classes\n self.class_16 = class_16\n synthia_set_16 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 15, 17, 18]\n self.trainid_to_16id = {id:i for i,id in enumerate(synthia_set_16)}\n # In Cityscapes-to-NTHU case, only consider 13 shared classes\n self.class_13 = class_13\n synthia_set_13 = [0, 1, 2, 6, 7, 8, 10, 11, 12, 13, 15, 17, 18]\n self.trainid_to_13id = {id:i for i,id in enumerate(synthia_set_13)}\n \n print(\"{} num images in Cityscapes {} set have been loaded.\".format(len(self.items), self.split))\n if self.args.numpy_transform:\n print(\"use numpy_transform, instead of tensor transform!\")\n\n def id2trainId(self, label, reverse=False, ignore_label=-1):\n label_copy = ignore_label * np.ones(label.shape, dtype=np.float32)\n for k, v in self.id_to_trainid.items():\n label_copy[label == k] = v\n if self.class_16:\n label_copy_16 = ignore_label * np.ones(label.shape, dtype=np.float32)\n for k, v in self.trainid_to_16id.items():\n label_copy_16[label_copy == k] = v\n label_copy = label_copy_16\n if self.class_13:\n label_copy_13 = ignore_label * np.ones(label.shape, dtype=np.float32)\n for k, v in self.trainid_to_13id.items():\n label_copy_13[label_copy == k] = v\n label_copy = label_copy_13\n return label_copy\n\n def __getitem__(self, item):\n id = self.items[item]\n filename = id.split(\"train_\")[-1].split(\"val_\")[-1].split(\"test_\")[-1]\n image_filepath = os.path.join(self.image_filepath, id.split(\"_\")[0], id.split(\"_\")[1])\n image_filename = filename + \"_leftImg8bit.png\"\n image_path = os.path.join(image_filepath, image_filename)\n image = Image.open(image_path).convert(\"RGB\")\n\n gt_filepath = os.path.join(self.gt_filepath, id.split(\"_\")[0], id.split(\"_\")[1])\n gt_filename = filename + \"_gtFine_labelIds.png\"\n gt_image_path = os.path.join(gt_filepath, gt_filename)\n gt_image = Image.open(gt_image_path)\n\n if (self.split == \"train\" or self.split == \"trainval\") and self.training:\n image, gt_image = self._train_sync_transform(image, gt_image)\n else:\n image, gt_image = self._val_sync_transform(image, gt_image)\n\n return image, gt_image, item\n\n def _train_sync_transform(self, img, mask):\n '''\n :param image: PIL input image\n :param gt_image: PIL input gt_image\n :return:\n '''\n if self.random_mirror:\n # random mirror\n if random.random() < 0.5:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if mask: mask = mask.transpose(Image.FLIP_LEFT_RIGHT)\n crop_w, crop_h = self.crop_size\n\n if self.random_crop:\n # random scale\n base_w , base_h = self.base_size\n w, h = img.size\n assert w >= h\n if (base_w / w) > (base_h / h):\n base_size = base_w \n short_size = random.randint(int(base_size * 0.5), int(base_size * 2.0))\n ow = short_size\n oh = int(1.0 * h * ow / w)\n else:\n base_size = base_h\n short_size = random.randint(int(base_size * 0.5), int(base_size * 2.0))\n oh = short_size\n ow = int(1.0 * w * oh / h)\n\n img = img.resize((ow, oh), Image.BICUBIC)\n if mask: mask = mask.resize((ow, oh), Image.NEAREST)\n # pad crop\n if ow < crop_w or oh < crop_h:\n padh = crop_h - oh if oh < crop_h else 0\n padw = crop_w - ow if ow < crop_w else 0\n img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)\n if mask: mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)\n # random crop crop_size\n w, h = img.size\n x1 = random.randint(0, w - crop_w)\n y1 = random.randint(0, h - crop_h)\n img = img.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n if mask: mask = mask.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n elif self.resize:\n img = img.resize(self.crop_size, Image.BICUBIC)\n if mask: mask = mask.resize(self.crop_size, Image.NEAREST)\n \n if self.gaussian_blur:\n # gaussian blur as in PSP\n if random.random() < 0.5:\n img = img.filter(ImageFilter.GaussianBlur(\n radius=random.random()))\n # final transform\n if mask: \n img, mask = self._img_transform(img), self._mask_transform(mask)\n return img, mask\n else:\n img = self._img_transform(img)\n return img\n\n def _val_sync_transform(self, img, mask):\n if self.random_crop:\n crop_w, crop_h = self.crop_size\n w, h = img.size\n if crop_w / w < crop_h / h:\n oh = crop_h\n ow = int(1.0 * w * oh / h)\n else:\n ow = crop_w\n oh = int(1.0 * h * ow / w)\n img = img.resize((ow, oh), Image.BICUBIC)\n mask = mask.resize((ow, oh), Image.NEAREST)\n # center crop\n w, h = img.size\n x1 = int(round((w - crop_w) / 2.))\n y1 = int(round((h - crop_h) / 2.))\n img = img.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n mask = mask.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n elif self.resize:\n img = img.resize(self.crop_size, Image.BICUBIC)\n mask = mask.resize(self.crop_size, Image.NEAREST)\n\n # final transform\n img, mask = self._img_transform(img), self._mask_transform(mask)\n return img, mask\n\n def _img_transform(self, image):\n if self.args.numpy_transform:\n image = np.asarray(image, np.float32)\n image = image[:, :, ::-1] # change to BGR\n image -= IMG_MEAN\n image = image.transpose((2, 0, 1)).copy() # (C x H x W)\n new_image = torch.from_numpy(image)\n else:\n image_transforms = ttransforms.Compose([\n ttransforms.ToTensor(),\n ttransforms.Normalize([.485, .456, .406], [.229, .224, .225]),\n ])\n new_image = image_transforms(image)\n return new_image\n\n def _mask_transform(self, gt_image):\n target = np.asarray(gt_image, np.float32)\n target = self.id2trainId(target).copy()\n target = torch.from_numpy(target)\n\n return target\n\n def __len__(self):\n return len(self.items)"
},
{
"identifier": "City_DataLoader",
"path": "datasets/cityscapes_Dataset.py",
"snippet": "class City_DataLoader():\n def __init__(self, args, training=True):\n\n self.args = args\n\n data_set = City_Dataset(args, \n data_root_path='/mnt/Xsky/zyl/dataset/cityscapes',\n list_path='./datasets/city_list',\n split=args.split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=training,\n class_16=args.class_16,\n class_13=args.class_13)\n\n if (self.args.split == \"train\" or self.args.split == \"trainval\") and training:\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=True,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n else:\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n\n val_set = City_Dataset(args, \n data_root_path='./datasets/Cityscapes',\n list_path='./datasets/city_list',\n split='val',\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=False,\n class_16=args.class_16,\n class_13=args.class_13)\n self.val_loader = data.DataLoader(val_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n self.valid_iterations = (len(val_set) + self.args.batch_size) // self.args.batch_size\n\n self.num_iterations = (len(data_set) + self.args.batch_size) // self.args.batch_size"
},
{
"identifier": "inv_preprocess",
"path": "datasets/cityscapes_Dataset.py",
"snippet": "def inv_preprocess(imgs, num_images=1, img_mean=IMG_MEAN, numpy_transform=False):\n \"\"\"Inverse preprocessing of the batch of images.\n \n Args:\n imgs: batch of input images.\n num_images: number of images to apply the inverse transformations on.\n img_mean: vector of mean colour values.\n numpy_transform: whether change RGB to BGR during img_transform.\n \n Returns:\n The batch of the size num_images with the same spatial dimensions as the input.\n \"\"\"\n if numpy_transform:\n imgs = flip(imgs, 1)\n def norm_ip(img, min, max):\n img.clamp_(min=min, max=max)\n img.add_(-min).div_(max - min + 1e-5)\n norm_ip(imgs, float(imgs.min()), float(imgs.max()))\n return imgs"
},
{
"identifier": "decode_labels",
"path": "datasets/cityscapes_Dataset.py",
"snippet": "def decode_labels(mask, num_images=1, num_classes=NUM_CLASSES):\n \"\"\"Decode batch of segmentation masks.\n \n Args:\n mask: result of inference after taking argmax.\n num_images: number of images to decode from the batch.\n num_classes: number of classes to predict.\n \n Returns:\n A batch with num_images RGB images of the same size as the input. \n \"\"\"\n if isinstance(mask, torch.Tensor):\n mask = mask.data.cpu().numpy()\n n, h, w = mask.shape\n if n < num_images:\n num_images = n\n outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)\n for i in range(num_images):\n img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))\n pixels = img.load()\n for j_, j in enumerate(mask[i, :, :]):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_,j_] = label_colours[k]\n outputs[i] = np.array(img)\n return torch.from_numpy(outputs.transpose([0, 3, 1, 2]).astype('float32')).div_(255.0)"
},
{
"identifier": "GTA5_DataLoader",
"path": "datasets/gta5_Dataset.py",
"snippet": "class GTA5_DataLoader():\n def __init__(self, args, training=True):\n\n self.args = args\n\n data_set = GTA5_Dataset(args, \n data_root_path=args.data_root_path,\n list_path=args.list_path,\n split=args.split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=training)\n\n if self.args.split == \"train\" or self.args.split == \"trainval\" or self.args.split ==\"all\":\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=True,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n elif self.args.split ==\"val\" or self.args.split == \"test\":\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n else:\n raise Warning(\"split must be train/val/trainavl/test/all\")\n\n val_split = 'val' if self.args.split == \"train\" else 'test'\n val_set = GTA5_Dataset(args, \n data_root_path=args.data_root_path,\n list_path=args.list_path,\n split=val_split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=False)\n self.val_loader = data.DataLoader(val_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n self.valid_iterations = (len(val_set) + self.args.batch_size) // self.args.batch_size\n\n self.num_iterations = (len(data_set) + self.args.batch_size) // self.args.batch_size"
},
{
"identifier": "SYNTHIA_DataLoader",
"path": "datasets/synthia_Dataset.py",
"snippet": "class SYNTHIA_DataLoader():\n def __init__(self, args, training=True):\n\n self.args = args\n\n data_set = SYNTHIA_Dataset(args, \n data_root_path=args.data_root_path,\n list_path=args.list_path,\n split=args.split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=training,\n class_16=args.class_16)\n\n if self.args.split == \"train\" or self.args.split == \"trainval\" or self.args.split ==\"all\":\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=True,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n elif self.args.split ==\"val\" or self.args.split == \"test\":\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n else:\n raise Warning(\"split must be train/val/trainavl/test/all\")\n\n val_split = 'val' if self.args.split == \"train\" else 'test'\n val_set = SYNTHIA_Dataset(args, \n data_root_path=args.data_root_path,\n list_path=args.list_path,\n split=val_split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=False,\n class_16=args.class_16)\n self.val_loader = data.DataLoader(val_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n self.valid_iterations = (len(val_set) + self.args.batch_size) // self.args.batch_size\n\n self.num_iterations = (len(data_set) + self.args.batch_size) // self.args.batch_size"
}
] | import os
import random
import logging
import argparse
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.nn.functional as F
import numpy as np
import sys
import shutil
from tqdm import tqdm
from math import ceil
from distutils.version import LooseVersion
from tensorboardX import SummaryWriter
from torchvision.utils import make_grid
from utils.eval import Eval
from utils.train_helper import get_model
from datasets.cityscapes_Dataset import City_Dataset, City_DataLoader, inv_preprocess, decode_labels
from datasets.gta5_Dataset import GTA5_DataLoader
from datasets.synthia_Dataset import SYNTHIA_DataLoader | 8,602 | # validate
PA, MPA, MIoU, FWIoU = self.validate()
self.writer.add_scalar('PA', PA, self.current_epoch)
self.writer.add_scalar('MPA', MPA, self.current_epoch)
self.writer.add_scalar('MIoU', MIoU, self.current_epoch)
self.writer.add_scalar('FWIoU', FWIoU, self.current_epoch)
self.current_MIoU = MIoU
is_best = MIoU > self.best_MIou
if is_best:
self.best_MIou = MIoU
self.best_iter = self.current_iter
self.logger.info("=>saving a new best checkpoint...")
self.save_checkpoint(self.train_id+'best.pth')
else:
self.logger.info("=> The MIoU of val does't improve.")
self.logger.info("=> The best MIoU of val is {} at {}".format(self.best_MIou, self.best_iter))
self.current_epoch += 1
state = {
'epoch': self.current_epoch + 1,
'iteration': self.current_iter,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_MIou': self.current_MIoU
}
self.logger.info("=>best_MIou {} at {}".format(self.best_MIou, self.best_iter))
self.logger.info("=>saving the final checkpoint to " + os.path.join(self.args.checkpoint_dir, self.train_id+'final.pth'))
self.save_checkpoint(self.train_id+'final.pth')
def train_one_epoch(self):
tqdm_epoch = tqdm(self.dataloader.data_loader, total=self.dataloader.num_iterations,
desc="Train Epoch-{}-total-{}".format(self.current_epoch+1, self.epoch_num))
self.logger.info("Training one epoch...")
self.Eval.reset()
train_loss = []
loss_seg_value_2 = 0
iter_num = self.dataloader.num_iterations
if self.args.freeze_bn:
self.model.eval()
self.logger.info("freeze bacth normalization successfully!")
else:
self.model.train()
# Initialize your average meters
batch_idx = 0
for x, y, _ in tqdm_epoch:
self.poly_lr_scheduler(
optimizer=self.optimizer,
init_lr=self.args.lr,
iter=self.current_iter,
max_iter=self.args.iter_max,
power=self.args.poly_power,
)
if self.args.iter_stop is not None and self.current_iter >= self.args.iter_stop:
self.logger.info("iteration arrive {}(early stop)/{}(total step)!".format(self.args.iter_stop, self.args.iter_max))
break
if self.current_iter >= self.args.iter_max:
self.logger.info("iteration arrive {}!".format(self.args.iter_max))
break
self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]["lr"], self.current_iter)
if self.cuda:
x, y = x.to(self.device), y.to(device=self.device, dtype=torch.long)
y = torch.squeeze(y, 1)
self.optimizer.zero_grad()
# model
pred = self.model(x)
if isinstance(pred, tuple):
pred_2 = pred[1]
pred = pred[0]
pred = F.interpolate(pred, size=x.size()[2:], mode='bilinear', align_corners=True)
# loss
cur_loss = self.loss(pred, y)
if self.args.multi:
loss_2 = self.args.lambda_seg * self.loss(pred_2, y)
cur_loss += loss_2
loss_seg_value_2 += loss_2.cpu().item() / iter_num
# optimizer
cur_loss.backward()
self.optimizer.step()
train_loss.append(cur_loss.item())
if batch_idx % 1000 == 0:
if self.args.multi:
self.logger.info("The train loss of epoch{}-batch-{}:{};{}".format(self.current_epoch,
batch_idx, cur_loss.item(), loss_2.item()))
else:
self.logger.info("The train loss of epoch{}-batch-{}:{}".format(self.current_epoch,
batch_idx, cur_loss.item()))
batch_idx += 1
self.current_iter += 1
if np.isnan(float(cur_loss.item())):
raise ValueError('Loss is nan during training...')
pred = pred.data.cpu().numpy()
label = y.cpu().numpy()
argpred = np.argmax(pred, axis=1)
self.Eval.add_batch(label, argpred)
if batch_idx==self.dataloader.num_iterations:
break
self.log_one_train_epoch(x, label, argpred, train_loss)
tqdm_epoch.close()
def log_one_train_epoch(self, x, label, argpred, train_loss):
#show train image on tensorboard
images_inv = inv_preprocess(x.clone().cpu(), self.args.show_num_images, numpy_transform=self.args.numpy_transform)
|
sys.path.append(os.path.abspath('tools'))
datasets_path={
'cityscapes': {'data_root_path': '/mnt/Xsky/zyl/dataset/dataset/Cityscapes', 'list_path': './datasets/city_list',
'image_path':'/mnt/Xsky/zyl/dataset/Cityscapes/leftImg8bit',
'gt_path': './datasets/Cityscapes/gtFine'},
'gta5': {'data_root_path': '/mnt/Xsky/zyl/dataset/GTA5', 'list_path': './datasets/gta5_list',
'image_path':'/mnt/Xsky/zyl/dataset/GTA5/images',
'gt_path': './datasets/GTA5/labels'},
'synthia': {'data_root_path': '/mnt/Xsky/zyl/dataset/RAND_CITYSCAPES', 'list_path': './datasets/synthia_list',
'image_path':'/mnt/Xsky/zyl/dataset/RAND_CITYSCAPES/RGB',
'gt_path': './datasets/SYNTHIA/GT/LABELS'},
'NTHU': {'data_root_path': './datasets/NTHU_Datasets', 'list_path': './datasets/NTHU_list'}
}
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
ITER_MAX = 5000
class Trainer():
def __init__(self, args, cuda=None, train_id="None", logger=None):
self.args = args
os.environ["CUDA_VISIBLE_DEVICES"] = self.args.gpu
self.cuda = cuda and torch.cuda.is_available()
self.device = torch.device('cuda' if self.cuda else 'cpu')
self.train_id = train_id
self.logger = logger
self.current_MIoU = 0
self.best_MIou = 0
self.best_source_MIou = 0
self.current_epoch = 0
self.current_iter = 0
self.second_best_MIou = 0
# set TensorboardX
self.writer = SummaryWriter(self.args.checkpoint_dir)
# Metric definition
self.Eval = Eval(self.args.num_classes)
# loss definition
self.loss = nn.CrossEntropyLoss(weight=None, ignore_index= -1)
self.loss.to(self.device)
# model
self.model, params = get_model(self.args)
self.model = nn.DataParallel(self.model, device_ids=[0])
self.model.to(self.device)
if self.args.optim == "SGD":
self.optimizer = torch.optim.SGD(
params=params,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay
)
elif self.args.optim == "Adam":
self.optimizer = torch.optim.Adam(params, betas=(0.9, 0.99), weight_decay=self.args.weight_decay)
# dataloader
if self.args.dataset=="cityscapes":
self.dataloader = City_DataLoader(self.args)
elif self.args.dataset=="gta5":
self.dataloader = GTA5_DataLoader(self.args)
else:
self.dataloader = SYNTHIA_DataLoader(self.args)
self.dataloader.num_iterations = min(self.dataloader.num_iterations, ITER_MAX)
print(self.args.iter_max, self.dataloader.num_iterations)
self.epoch_num = ceil(self.args.iter_max / self.dataloader.num_iterations) if self.args.iter_stop is None else \
ceil(self.args.iter_stop / self.dataloader.num_iterations)
def main(self):
# display args details
self.logger.info("Global configuration as follows:")
for key, val in vars(self.args).items():
self.logger.info("{:16} {}".format(key, val))
# choose cuda
if self.cuda:
current_device = torch.cuda.current_device()
self.logger.info("This model will run on {}".format(torch.cuda.get_device_name(current_device)))
else:
self.logger.info("This model will run on CPU")
# load pretrained checkpoint
if self.args.pretrained_ckpt_file is not None:
if os.path.isdir(self.args.pretrained_ckpt_file):
self.args.pretrained_ckpt_file = os.path.join(self.args.checkpoint_dir, self.train_id + 'best.pth')
self.load_checkpoint(self.args.pretrained_ckpt_file)
if self.args.continue_training:
self.load_checkpoint(os.path.join(self.args.checkpoint_dir, self.train_id + 'best.pth'))
self.best_iter = self.current_iter
self.best_source_iter = self.current_iter
else:
self.current_epoch = 0
# train
self.train()
self.writer.close()
def train(self):
# self.validate() # check image summary
for epoch in tqdm(range(self.current_epoch, self.epoch_num),
desc="Total {} epochs".format(self.epoch_num)):
self.train_one_epoch()
# validate
PA, MPA, MIoU, FWIoU = self.validate()
self.writer.add_scalar('PA', PA, self.current_epoch)
self.writer.add_scalar('MPA', MPA, self.current_epoch)
self.writer.add_scalar('MIoU', MIoU, self.current_epoch)
self.writer.add_scalar('FWIoU', FWIoU, self.current_epoch)
self.current_MIoU = MIoU
is_best = MIoU > self.best_MIou
if is_best:
self.best_MIou = MIoU
self.best_iter = self.current_iter
self.logger.info("=>saving a new best checkpoint...")
self.save_checkpoint(self.train_id+'best.pth')
else:
self.logger.info("=> The MIoU of val does't improve.")
self.logger.info("=> The best MIoU of val is {} at {}".format(self.best_MIou, self.best_iter))
self.current_epoch += 1
state = {
'epoch': self.current_epoch + 1,
'iteration': self.current_iter,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_MIou': self.current_MIoU
}
self.logger.info("=>best_MIou {} at {}".format(self.best_MIou, self.best_iter))
self.logger.info("=>saving the final checkpoint to " + os.path.join(self.args.checkpoint_dir, self.train_id+'final.pth'))
self.save_checkpoint(self.train_id+'final.pth')
def train_one_epoch(self):
tqdm_epoch = tqdm(self.dataloader.data_loader, total=self.dataloader.num_iterations,
desc="Train Epoch-{}-total-{}".format(self.current_epoch+1, self.epoch_num))
self.logger.info("Training one epoch...")
self.Eval.reset()
train_loss = []
loss_seg_value_2 = 0
iter_num = self.dataloader.num_iterations
if self.args.freeze_bn:
self.model.eval()
self.logger.info("freeze bacth normalization successfully!")
else:
self.model.train()
# Initialize your average meters
batch_idx = 0
for x, y, _ in tqdm_epoch:
self.poly_lr_scheduler(
optimizer=self.optimizer,
init_lr=self.args.lr,
iter=self.current_iter,
max_iter=self.args.iter_max,
power=self.args.poly_power,
)
if self.args.iter_stop is not None and self.current_iter >= self.args.iter_stop:
self.logger.info("iteration arrive {}(early stop)/{}(total step)!".format(self.args.iter_stop, self.args.iter_max))
break
if self.current_iter >= self.args.iter_max:
self.logger.info("iteration arrive {}!".format(self.args.iter_max))
break
self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]["lr"], self.current_iter)
if self.cuda:
x, y = x.to(self.device), y.to(device=self.device, dtype=torch.long)
y = torch.squeeze(y, 1)
self.optimizer.zero_grad()
# model
pred = self.model(x)
if isinstance(pred, tuple):
pred_2 = pred[1]
pred = pred[0]
pred = F.interpolate(pred, size=x.size()[2:], mode='bilinear', align_corners=True)
# loss
cur_loss = self.loss(pred, y)
if self.args.multi:
loss_2 = self.args.lambda_seg * self.loss(pred_2, y)
cur_loss += loss_2
loss_seg_value_2 += loss_2.cpu().item() / iter_num
# optimizer
cur_loss.backward()
self.optimizer.step()
train_loss.append(cur_loss.item())
if batch_idx % 1000 == 0:
if self.args.multi:
self.logger.info("The train loss of epoch{}-batch-{}:{};{}".format(self.current_epoch,
batch_idx, cur_loss.item(), loss_2.item()))
else:
self.logger.info("The train loss of epoch{}-batch-{}:{}".format(self.current_epoch,
batch_idx, cur_loss.item()))
batch_idx += 1
self.current_iter += 1
if np.isnan(float(cur_loss.item())):
raise ValueError('Loss is nan during training...')
pred = pred.data.cpu().numpy()
label = y.cpu().numpy()
argpred = np.argmax(pred, axis=1)
self.Eval.add_batch(label, argpred)
if batch_idx==self.dataloader.num_iterations:
break
self.log_one_train_epoch(x, label, argpred, train_loss)
tqdm_epoch.close()
def log_one_train_epoch(self, x, label, argpred, train_loss):
#show train image on tensorboard
images_inv = inv_preprocess(x.clone().cpu(), self.args.show_num_images, numpy_transform=self.args.numpy_transform) | labels_colors = decode_labels(label, self.args.show_num_images) | 5 | 2023-11-14 02:01:11+00:00 | 12k |
raphaelreme/koft | src/experiments/track.py | [
{
"identifier": "FakeDetector",
"path": "src/detector.py",
"snippet": "class FakeDetector(byotrack.Detector): # TODO: include weight\n def __init__(self, mu: torch.Tensor, noise=1.0, fpr=0.1, fnr=0.2, generate_outside_particles=True):\n self.noise = noise\n self.fpr = fpr\n self.fnr = fnr\n self.mu = mu\n self.n_particles = mu.shape[1]\n self.generate_outside_particles = generate_outside_particles\n\n def run(self, video: Iterable[np.ndarray]) -> Collection[byotrack.Detections]:\n detections_sequence = []\n\n for k, frame in enumerate(tqdm.tqdm(video)):\n frame = frame[..., 0] # Drop channel\n shape = torch.tensor(frame.shape)\n\n detected = torch.rand(self.n_particles) >= self.fnr # Miss some particles (randomly)\n positions = self.mu[k, detected] + torch.randn((detected.sum(), 2)) * self.noise\n positions = positions[(positions > 0).all(dim=-1)]\n positions = positions[(positions < shape - 1).all(dim=-1)]\n\n # Create fake detections\n # 1- Quickly compute the background mask\n mask = torch.tensor(cv2.GaussianBlur(frame, (33, 33), 15) > 0.2)\n mask_proportion = mask.sum().item() / mask.numel()\n\n # 2- Scale fpr by the mask proportion\n n_fake = int(len(positions) * (self.fpr + torch.randn(1).item() * self.fpr / 10) / mask_proportion)\n false_alarm = torch.rand(n_fake, 2) * (shape - 1)\n\n if not self.generate_outside_particles: # Filter fake detections outside the mask\n false_alarm = false_alarm[mask[false_alarm.long()[:, 0], false_alarm.long()[:, 1]]]\n\n positions = torch.cat((positions, false_alarm))\n\n # bbox = torch.cat((positions - 1, torch.zeros_like(positions) + 3), dim=-1)\n detections_sequence.append(\n byotrack.Detections(\n {\n \"position\": positions,\n # \"bbox\": bbox.round().to(torch.int32),\n \"shape\": shape,\n },\n frame_id=k,\n )\n )\n\n return detections_sequence"
},
{
"identifier": "DetectionMetric",
"path": "src/metrics/detections.py",
"snippet": "class DetectionMetric:\n \"\"\"\"\"\"\n\n def __init__(self, dist_thresh: float, greedy=True) -> None:\n self.dist_thresh = dist_thresh\n self.greedy = greedy\n self.lap_solver = pylapy.LapSolver()\n\n def compute_at(\n self,\n detections: byotrack.Detections,\n true_position: torch.Tensor,\n true_weight: Optional[torch.Tensor] = None,\n prob_thresh=0.0,\n weight_thresh=0.0,\n ) -> Dict[str, float]:\n \"\"\"Compute the precision, recall and f1 at a given probability and weight thresholds\"\"\"\n if true_weight is not None:\n true_position = true_position[true_weight > weight_thresh]\n\n predicted_position = detections.position[detections.confidence > prob_thresh]\n\n dist = torch.cdist(predicted_position, true_position)\n\n if self.greedy:\n dist[dist > self.dist_thresh] = torch.inf\n tp = self.lap_solver.solve(dist.numpy()).shape[0]\n else:\n tp = self.lap_solver.solve(dist.numpy(), self.dist_thresh).shape[0]\n\n n_pred = len(predicted_position)\n n_true = len(true_position)\n precision = tp / n_pred if n_pred else 1.0\n recall = tp / n_true if n_true else 1.0\n f1 = 2 * tp / (n_true + n_pred) if n_pred + n_true else 1.0\n\n return {\n \"precision\": precision,\n \"recall\": recall,\n \"f1\": f1,\n \"n_pred\": n_pred,\n \"n_true\": n_true,\n \"tp\": tp,\n }\n\n def average_precision_weight(\n self,\n detections: byotrack.Detections,\n true_position: torch.Tensor,\n true_weight: Optional[torch.Tensor] = None,\n prob_thresh=0.0,\n ) -> float:\n recalls = []\n precisions = []\n\n for weight_thresh in torch.linspace(0, 2.0, 201):\n metrics = self.compute_at(detections, true_position, true_weight, prob_thresh, weight_thresh.item())\n recalls.append(metrics[\"recall\"])\n precisions.append(metrics[\"precision\"])\n\n return compute_ap(recalls, precisions)\n\n def average_precision_prob(\n self,\n detections: byotrack.Detections,\n true_position: torch.Tensor,\n true_weight: Optional[torch.Tensor] = None,\n weight_thresh=0.0,\n ) -> float:\n recalls = []\n precisions = []\n\n for prob_thresh in torch.linspace(1.0, 0.0, 101):\n metrics = self.compute_at(detections, true_position, true_weight, prob_thresh.item(), weight_thresh)\n recalls.append(metrics[\"recall\"])\n precisions.append(metrics[\"precision\"])\n\n return compute_ap(recalls, precisions)"
},
{
"identifier": "compute_tracking_metrics",
"path": "src/metrics/tracking.py",
"snippet": "def compute_tracking_metrics(\n tracks: Collection[byotrack.Track], ground_truth: Dict[str, torch.Tensor]\n) -> Dict[str, float]:\n \"\"\"Compute [email protected] (consider that gt matches with pred if dist < 1.5 pixels)\n\n Also returns localization errors when matching at 4.5 pixels.\n\n We choose not to aggregate the HOTA performances at different thresholds, but rather choose one,\n and use LocA to measure localization errors. (Converted in pixels)\n\n Keys:\n HOTA: HOTA at 1.5 pixels\n DetA: Jacquard of detections\n DetPr: Precision of detections\n DetRe: Recall of detections\n AssA: Jacquard of associations\n AssPr: Precision of associations\n AssRe: Recall of associations\n Loca: Localization errors (but at 4.5 pixels)\n \"\"\"\n gt_data = simulator_to_eval(ground_truth[\"mu\"], ground_truth[\"weight\"])\n track_data = tracks_to_eval(tracks)\n data = {**gt_data, **track_data}\n add_similarity(data)\n\n metric = trackeval.metrics.hota.HOTA()\n metrics = metric.eval_sequence(data)\n\n # -6 => 0.7 similarity => 1 - 1.5 / 5\n return {\n \"HOTA\": float(metrics[\"HOTA\"][-6]),\n \"DetA\": float(metrics[\"DetA\"][-6]),\n \"DetPr\": float(metrics[\"DetPr\"][-6]),\n \"DetRe\": float(metrics[\"DetRe\"][-6]),\n \"AssA\": float(metrics[\"AssA\"][-6]),\n \"AssPr\": float(metrics[\"AssPr\"][-6]),\n \"AssRe\": float(metrics[\"AssRe\"][-6]),\n \"Loca\": 5 - 5 * float(metrics[\"LocA\"][1]), # Mean of pixel errors for TP associations\n }"
},
{
"identifier": "constant_kalman_filter",
"path": "src/skt.py",
"snippet": "def constant_kalman_filter(measurement_std: torch.Tensor, process_std: torch.Tensor, dim=2, order=1) -> KalmanFilter:\n \"\"\"Create a constant Velocity/Acceleration/Jerk Kalman Filter\n\n Create a kalman filter with a state containing the positions on each dimension (x, y, z, ...)\n with their derivatives up to `order`. The order-th derivatives are supposed constant.\n\n Let x be the positions for each dim and x^i the i-th derivatives of these positions\n Prediction follows:\n x^i_{t+1} = x^i_t + x^{i+1}_t, for i < order\n x^order_{t+1} = x^order_t\n\n Args:\n measurement_std (torch.Tensor): Std of the measurements\n 99.7% of measurements should fall within 3 std of the true position\n Shape: Broadcastable to dim, dtype: float64\n process_std (torch.Tensor): Process noise, a typical value is maximum diff between two consecutive\n order-th derivative. (Eg: for constant velocity -> Maximum acceleration between two frames)\n Shape: Broadcastable to dim, dtype: float64\n dim (int): Dimension of the motion (1d, 2d, 3d, ...)\n Default: 2\n order (int): Order of the filer (The order-th derivatives are constants)\n Default: 1 (Constant velocity)\n\n \"\"\"\n measurement_std = torch.broadcast_to(measurement_std, (dim,))\n process_std = torch.broadcast_to(process_std, (dim,))\n\n state_dim = (order + 1) * dim\n\n # Measurement model\n # We only measure the positions\n # Noise is independent and can have a different value in each direction\n measurement_matrix = torch.eye(dim, state_dim)\n measurement_noise = torch.eye(dim) * measurement_std**2\n\n # Process\n # Constant model\n # Noise in velocity estimation (which induce a noise in position estimation)\n process_matrix = torch.eye(state_dim) + torch.tensor(np.eye(state_dim, k=dim)).to(torch.float32)\n process_noise = torch.tensor(\n filterpy.common.Q_discrete_white_noise(order + 1, block_size=dim, order_by_dim=False)\n ).to(torch.float32) * torch.cat([process_std**2] * (order + 1))\n\n return KalmanFilter(process_matrix, measurement_matrix, process_noise, measurement_noise)"
},
{
"identifier": "Dist",
"path": "src/skt.py",
"snippet": "class Dist(enum.Enum):\n MAHALANOBIS = \"mahalanobis\"\n EUCLIDIAN = \"euclidian\"\n LIKELIHOOD = \"likelihood\""
},
{
"identifier": "Method",
"path": "src/skt.py",
"snippet": "class Method(enum.Enum):\n \"\"\"Matching methods\n\n Opt: GDM with Jonker-volgenant algorithm (Linear assignement solver)\n Can be smooth thresholding or hard\n Greedy: Takes the best matches iteratively\n \"\"\"\n\n OPT_SMOOTH = \"opt_smooth\"\n OPT_HARD = \"opt_hard\"\n GREEDY = \"greedy\""
},
{
"identifier": "MatchingConfig",
"path": "src/skt.py",
"snippet": "class MatchingConfig:\n thresh: float\n dist: Dist = Dist.MAHALANOBIS\n method: Method = Method.OPT_SMOOTH"
},
{
"identifier": "SimpleKalmanTracker",
"path": "src/skt.py",
"snippet": "class SimpleKalmanTracker(byotrack.Linker):\n \"\"\"Simple Kalman tracker (SKT)\"\"\"\n\n def __init__(self, kalman_filter: KalmanFilter, match_cfg: MatchingConfig) -> None:\n super().__init__()\n self.kalman_filter = kalman_filter\n self.tracks: List[PartialTrack] = []\n self.active_tracks: List[PartialTrack] = []\n self.state = GaussianState( # Current state of active tracks\n torch.zeros((0, self.kalman_filter.state_dim, 1)),\n torch.zeros((0, self.kalman_filter.state_dim, self.kalman_filter.state_dim)),\n )\n\n self.match_cfg = match_cfg\n\n def run(\n self, video: Iterable[np.ndarray], detections_sequence: Collection[byotrack.Detections]\n ) -> Collection[byotrack.Track]:\n # Reset tracks and states\n self.tracks = []\n self.active_tracks = []\n self.state = GaussianState(\n torch.zeros((0, self.kalman_filter.state_dim, 1)),\n torch.zeros((0, self.kalman_filter.state_dim, self.kalman_filter.state_dim)),\n ) # The first iteration will predict and associate with 0 tracks, leading to no association\n # Thus creating tracks for all detections in the first frame\n\n for detections in tqdm.tqdm(detections_sequence):\n self.update(detections)\n\n tracks = []\n for track in self.tracks + self.active_tracks:\n if track.track_state in (track.TrackState.DELETED, track.TrackState.INITIATED):\n continue # Ignore unconfirmed tracks\n tracks.append(\n byotrack.Track(\n track.start,\n track.points,\n track.track_id,\n )\n )\n return tracks\n\n def match(self, projection: GaussianState, measures: torch.Tensor) -> torch.Tensor:\n \"\"\"Match projection with measures using positions\n\n If velocity measure (KOFT) is available, we do not use it here (Even if it could be better)\n\n Args:\n projection (GaussianState): Projection for all tracks. Only supports 2D (dim_z = 2 or 4\n if velocities are included). Mean: (n, dim_z, 1), Cov: (n, dim_z, dim_z)\n measures (torch.Tensor): Measures to match with tracks. Only supports 2D. Measures can\n include velocities but it won't be used for matching. (Though could be an easy upgrade)\n Shape: (m, 2, 1) or (m, 4 ,1), dtype: float32\n\n Returns:\n torch.Tensor: Links between tracks and measures\n Shape: (L, 2), dtype: int32\n \"\"\"\n dist: torch.Tensor\n thresh: float\n\n if self.match_cfg.dist in (Dist.MAHALANOBIS, Dist.LIKELIHOOD):\n if projection.precision is None:\n # Register in case someone needs it afterwards (like kf.update)\n projection.precision = projection.covariance.inverse().contiguous()\n\n precision = projection.precision[:, None, :2, :2] # Handle 4d projection with speed. (n, 1, 2, 2)\n # We noticed that it is more efficient to use inv(cov)[:2, :2] rather than inv(cov[:2, :2])...\n # Need more investigatation but: This solution is equivalent to consider than the speed prediction\n # is perfect and using covariance between speed and position to quantify the errors on positions\n # precision != torch.linalg.inv(projection.covariance[:, None, :2, :2])\n\n diff = projection.mean[:, None, :2] - measures[None, :, :2] # Shape: (n, m, 2, 1)\n dist = diff.mT @ precision @ diff # Shape: (n, m, 1, 1)\n if self.match_cfg.dist == Dist.MAHALANOBIS:\n dist = dist[..., 0, 0]\n thresh = self.match_cfg.thresh**2 # No need to take the sqrt, let's compare to the sq thresh\n else: # likelihood\n log_det = torch.log(torch.det(projection.covariance))[:, None] # Shape (N, 1)\n # Dist = - log likelihood\n dist = 0.5 * (diff.shape[2] * torch.log(2 * torch.tensor(torch.pi)) + log_det + dist[..., 0, 0])\n thresh = -torch.log(torch.tensor(self.match_cfg.thresh)).item()\n else: # Euclidian\n dist = torch.cdist(projection.mean[:, :2, 0], measures[:, :2, 0])\n thresh = self.match_cfg.thresh\n\n if self.match_cfg.method == Method.GREEDY:\n links = greedy_assignment_solver(dist.numpy(), thresh)\n else:\n dist[dist > thresh] = torch.inf\n links = pylapy.LapSolver().solve(\n dist.numpy(),\n float(\"inf\") if self.match_cfg.method == Method.OPT_HARD else thresh,\n )\n\n return torch.tensor(links.astype(np.int32))\n\n def update(self, detections: byotrack.Detections):\n prior = self.kalman_filter.predict(self.state)\n projection = self.kalman_filter.project(prior)\n positions = detections.position[..., None].clone() # Shape m, d, 1\n\n # Association\n links = self.match(projection, positions)\n\n # Update linked kalman filter\n posterior = self.kalman_filter.update(\n GaussianState(prior.mean[links[:, 0]], prior.covariance[links[:, 0]]),\n positions[links[:, 1]],\n GaussianState(\n projection.mean[links[:, 0]],\n projection.covariance[links[:, 0]],\n projection.precision[links[:, 0]] if projection.precision is not None else None,\n ),\n )\n\n # Take prior by default if non-linked\n prior.mean[links[:, 0]] = posterior.mean\n prior.covariance[links[:, 0]] = posterior.covariance\n posterior = prior\n\n self._handle_tracks(posterior, positions, links, detections.frame_id)\n\n def _handle_tracks(\n self, posterior: GaussianState, measures: torch.Tensor, links: torch.Tensor, frame_id: int\n ) -> None:\n \"\"\"Handle tracks to save track data, start new tracks and delete lost ones\n\n Args:\n posterior (GaussianState): Posterior for all active tracks.\n Mean: (n, dim_x, 1), Cov: (n, dim_x, dim_x)\n measures (torch.Tensor): Measures (Only supports 2D). Measures can include velocities (KOFT)\n Shape: (m, 2, 1) or (m, 4 ,1), dtype: float32\n links (torch.Tensor): Links between tracks and measures\n Shape: (L, 2), dtype: int32\n frame_id (int): Current frame id\n\n \"\"\"\n\n # Save both state and measurement in partial tracks.\n i_to_j = torch.full((len(self.active_tracks),), -1, dtype=torch.int32)\n i_to_j[links[:, 0]] = links[:, 1]\n active_mask = torch.full((len(self.active_tracks),), False)\n still_active = []\n for i, track in enumerate(self.active_tracks):\n j = i_to_j[i]\n if j == -1:\n track.update(posterior.mean[i], posterior.covariance[i], None)\n else:\n track.update(posterior.mean[i], posterior.covariance[i], measures[j])\n\n if track.is_active():\n still_active.append(track)\n active_mask[i] = True\n else:\n self.tracks.append(track)\n\n # Restrict posterior states to active tracks\n posterior = GaussianState(posterior.mean[active_mask], posterior.covariance[active_mask])\n\n # Create new track for every unmatch detection\n measures[links[:, 1]] = torch.nan\n unmatched_measures = measures[~torch.isnan(measures).squeeze().any(dim=-1)]\n\n if not unmatched_measures.numel():\n self.state = posterior\n self.active_tracks = still_active\n return\n\n # Initial state at measures,. Unmeasured state ([velocity, ]acceleration, jerk) are initialize at 0\n # Variance for unmeasured state is the process_noise\n # Variance for measured state is the measurement_noise\n unmatched_state = GaussianState(\n torch.zeros((unmatched_measures.shape[0], self.kalman_filter.state_dim, 1)),\n torch.cat([self.kalman_filter.process_noise[None]] * unmatched_measures.shape[0]),\n )\n unmatched_state.mean[:, : unmatched_measures.shape[1]] = unmatched_measures\n unmatched_state.covariance[\n :, : unmatched_measures.shape[1], : unmatched_measures.shape[1]\n ] = self.kalman_filter.measurement_noise\n\n # Create a new active track for each new state created\n for i in range(unmatched_measures.shape[0]):\n still_active.append(\n PartialTrack(\n len(self.tracks) + len(still_active),\n frame_id,\n unmatched_state.mean[i],\n unmatched_state.covariance[i],\n unmatched_measures[i],\n )\n )\n\n # State is the posterior for all active tracks (concatenation of new tracks with old kept ones)\n self.active_tracks = still_active\n self.state = GaussianState(\n torch.cat((posterior.mean, unmatched_state.mean)),\n torch.cat((posterior.covariance, unmatched_state.covariance)),\n )"
},
{
"identifier": "PartialTrack",
"path": "src/skt.py",
"snippet": "class PartialTrack:\n \"\"\"Partial track class\n\n Partial tracks are created for each unlinked detections, and then updated with following detections.\n It requires CONFIRMED_AT consecutive detections to confirm the tracks (INITIATED => CONFIRMED). If a miss detection\n occurs, it deletes it (INITIATED => DELETED).\n\n Once confirmed, it is resilient to miss detections, waiting MAX_NON_MEASURE frames before ending the track\n (CONFIRMED => ENDED)\n\n Will also store the kalman data for analysis.\n \"\"\"\n\n MAX_NON_MEASURE = 3\n CONFIRMED_AT = 3\n\n class TrackState(enum.IntEnum):\n INITIATED = 0\n CONFIRMED = 1\n ENDED = 2\n DELETED = 3\n\n def __init__(\n self,\n track_id: int,\n start: int,\n mean: torch.Tensor,\n covariance: torch.Tensor,\n measure: torch.Tensor,\n points=(0, 1),\n ) -> None:\n self._points = points # Points data in state\n self.track_id = track_id\n self.start = start\n self.track_state = PartialTrack.TrackState.INITIATED\n self.last_measurement = 0\n self._mean = [mean.clone()]\n self._covariance = [covariance.clone()]\n self._measure = [measure.clone()]\n\n def __len__(self) -> int:\n return len(self._mean) - self.last_measurement\n\n def is_active(self) -> bool:\n return self.track_state < 2\n\n def update(self, mean: torch.Tensor, covariance: torch.Tensor, measure: Optional[torch.Tensor]) -> None:\n \"\"\"Should be called only if the track is active\"\"\"\n self._mean.append(mean.clone())\n self._covariance.append(covariance.clone())\n\n if measure is None: # Not associated with a measure\n self._measure.append(torch.full_like(self._measure[-1], torch.nan))\n self.last_measurement += 1\n\n if self.track_state == PartialTrack.TrackState.INITIATED:\n self.track_state = PartialTrack.TrackState.DELETED\n\n elif self.last_measurement >= self.MAX_NON_MEASURE: # Could also check the width of the state covariance\n self.track_state = PartialTrack.TrackState.ENDED\n\n return\n\n self._measure.append(measure.clone())\n self.last_measurement = 0\n\n if self.track_state == PartialTrack.TrackState.INITIATED:\n if len(self) >= self.CONFIRMED_AT:\n self.track_state = PartialTrack.TrackState.CONFIRMED\n\n @property\n def points(self) -> torch.Tensor:\n return torch.cat([mean[None, self._points, 0] for mean in self._mean[: len(self)]])"
},
{
"identifier": "constant_koft_filter",
"path": "src/koft.py",
"snippet": "def constant_koft_filter(\n pos_std: torch.Tensor, vel_std: torch.Tensor, process_std: torch.Tensor, dim=2, order=1\n) -> KalmanFilter:\n \"\"\"Create a constant Velocity/Acceleration/Jerk Kalman Filter with pos and velocity measurements\n\n Create a kalman filter with a state containing the positions on each dimension (x, y, z, ...)\n with their derivatives up to `order`. The order-th derivatives are supposed constant.\n\n Let x be the positions for each dim and x^i the i-th derivatives of these positions\n Prediction follows:\n x^i_{t+1} = x^i_t + x^{i+1}_t, for i < order\n x^order_{t+1} = x^order_t\n\n Args:\n measurement_std (torch.Tensor): Std of the measurements\n 99.7% of measurements should fall within 3 std of the true position\n Shape: Broadcastable to dim, dtype: float64\n process_std (torch.Tensor): Process noise, a typical value is maximum diff between two consecutive\n order-th derivative. (Eg: for constant velocity -> Maximum acceleration between two frames)\n Shape: Broadcastable to dim, dtype: float64\n dim (int): Dimension of the motion (1d, 2d, 3d, ...)\n Default: 2\n order (int): Order of the filer (The order-th derivatives are constants)\n Default: 1 (Constant velocity)\n\n \"\"\"\n\n assert order >= 1, \"Velocity is measured and has to be set\"\n\n measurement_std = torch.cat((torch.broadcast_to(pos_std, (dim,)), torch.broadcast_to(vel_std, (dim,))))\n process_std = torch.broadcast_to(process_std, (dim,))\n\n measure_dim = 2 * dim\n state_dim = (order + 1) * dim\n\n # Measurement model\n # We measure position and velocity\n # Noise is independent and can have a different value in each direction\n measurement_matrix = torch.eye(measure_dim, state_dim)\n measurement_noise = torch.eye(measure_dim) * measurement_std**2\n\n # Process\n # Constant model\n # Noise in velocity estimation (which induce a noise in position estimation)\n process_matrix = torch.eye(state_dim) + torch.tensor(np.eye(state_dim, k=dim)).to(torch.float32)\n process_noise = torch.tensor(\n filterpy.common.Q_discrete_white_noise(order + 1, block_size=dim, order_by_dim=False)\n ).to(torch.float32) * torch.cat([process_std**2] * (order + 1))\n\n return KalmanFilter(process_matrix, measurement_matrix, process_noise, measurement_noise)"
},
{
"identifier": "OptFlowExtraction",
"path": "src/koft.py",
"snippet": "class OptFlowExtraction(enum.Enum):\n \"\"\"Extraction of optical flow from different positions\"\"\"\n\n DETECTED = 0\n POSTERIOR = 1\n PRIOR = 2"
},
{
"identifier": "SingleUpdateKOFTracker",
"path": "src/koft.py",
"snippet": "class SingleUpdateKOFTracker(SimpleKalmanTracker):\n \"\"\"Kalman and Optical Flow tracker with a single update\n\n Update velocities only for matched tracks and measyre velocity from detected positions\n \"\"\"\n\n __ALWAYS_UPDATE_VEL = False\n\n def __init__(self, kalman_filter: KalmanFilter, opt_flow: OptFlow, match_cfg: MatchingConfig) -> None:\n super().__init__(kalman_filter, match_cfg)\n self.opt_flow = opt_flow\n self.flow = np.zeros((1, 1, 2))\n\n def run(\n self, video: Iterable[np.ndarray], detections_sequence: Collection[byotrack.Detections]\n ) -> Collection[byotrack.Track]:\n assert isinstance(video, Sequence), \"Only indexable videos are supported\"\n\n # Reset tracks and states\n self.tracks = []\n self.active_tracks = []\n self.state = GaussianState(\n torch.zeros((0, self.kalman_filter.state_dim, 1)),\n torch.zeros((0, self.kalman_filter.state_dim, self.kalman_filter.state_dim)),\n )\n\n # Extract initial frame and prepare for optflow\n frame = video[next(iter(detections_sequence)).frame_id][..., 0]\n src = self.opt_flow.prepare(frame)\n\n for detections in tqdm.tqdm(detections_sequence):\n try:\n # We could compute flow from t-1 to t, or t-1 to t+1\n # But it is much better to compute flow from\n # frame = video[max(detections.frame_id - 1, 0)]\n # src = self.opt_flow.prepare(frame)\n # frame = video[detections.frame_id][..., 0]\n frame = video[detections.frame_id + 1][..., 0]\n except IndexError:\n pass\n\n dest = self.opt_flow.prepare(frame)\n self.flow = self.opt_flow.calc(src, dest) # / 2 if computed from t-1 to t+1\n\n self.update(detections)\n\n src = dest\n\n tracks = []\n for track in self.tracks + self.active_tracks:\n if track.track_state in (track.TrackState.DELETED, track.TrackState.INITIATED):\n continue # Ignore unconfirmed tracks\n tracks.append(\n byotrack.Track(\n track.start,\n track.points,\n track.track_id,\n )\n )\n return tracks\n\n def update(self, detections: byotrack.Detections):\n prior = self.kalman_filter.predict(self.state)\n projection = self.kalman_filter.project(prior)\n positions = detections.position[..., None].clone() # Shape m, d, 1\n\n # Measures = positions + velocities\n velocities = self.opt_flow.flow_at(self.flow, positions[..., 0].numpy().astype(np.float64), self.opt_flow.scale)\n measures = torch.cat([positions, torch.tensor(velocities[..., None]).to(torch.float32)], dim=1)\n\n # Association\n links = self.match(projection, measures)\n\n if self.__ALWAYS_UPDATE_VEL: # Single update for everyone even unmatched tracks (updated with inf pos cov)\n # Add measures for unlinked state\n prior_velocities = self.opt_flow.flow_at(\n self.flow, prior.mean[:, :2, 0].numpy().astype(np.float64), self.opt_flow.scale\n )\n all_measures = torch.cat(\n [prior.mean[:, :2], torch.tensor(prior_velocities[..., None]).to(torch.float32)], dim=1\n )\n all_measures[links[:, 0]] = measures[links[:, 1]]\n\n # For unmatched tracks, uncertainty on measurements (which is the prior here) is set to inf\n # Note that dropping this helps => Future investigation here\n cov = projection.covariance.clone()\n projection.covariance[:, 0, 0] = torch.inf\n projection.covariance[:, 1, 1] = torch.inf\n projection.covariance[links[:, 0]] = cov[links[:, 0]]\n projection.precision = None\n\n # Update linked kalman filter\n posterior = self.kalman_filter.update(\n prior,\n all_measures,\n projection,\n )\n else: # Classic single update\n # Update linked kalman filter\n posterior = self.kalman_filter.update(\n GaussianState(prior.mean[links[:, 0]], prior.covariance[links[:, 0]]),\n measures[links[:, 1]],\n GaussianState(\n projection.mean[links[:, 0]],\n projection.covariance[links[:, 0]],\n projection.precision[links[:, 0]] if projection.precision is not None else None,\n ),\n )\n\n # Take prior by default if non-linked\n prior.mean[links[:, 0]] = posterior.mean\n prior.covariance[links[:, 0]] = posterior.covariance\n posterior = prior\n\n self._handle_tracks(posterior, measures, links, detections.frame_id)"
},
{
"identifier": "TwoUpdateKOFTracker",
"path": "src/koft.py",
"snippet": "class TwoUpdateKOFTracker(SingleUpdateKOFTracker):\n \"\"\"Kalman and Optical Flow tracker\"\"\"\n\n def __init__(\n self,\n kalman_filter: KalmanFilter,\n opt_flow: OptFlow,\n match_cfg: MatchingConfig,\n opt_flow_at=OptFlowExtraction.POSTERIOR,\n always_update_vel=True,\n ) -> None:\n super().__init__(kalman_filter, opt_flow, match_cfg)\n self.opt_flow_at = opt_flow_at\n self.always_update_vel = always_update_vel\n\n def update(self, detections: byotrack.Detections):\n projection = self.kalman_filter.project(\n self.state,\n # self.kalman_filter.measurement_matrix[:2], # Let's also project velocity (useful for matching)\n # self.kalman_filter.measurement_noise[:2, :2],\n )\n\n positions = detections.position[..., None].clone() # Shape m, d, 1\n\n # Association\n links = self.match(projection, positions)\n\n # First update (Update with associate detections positions)\n posterior = self.kalman_filter.update(\n GaussianState(self.state.mean[links[:, 0]], self.state.covariance[links[:, 0]]),\n positions[links[:, 1]],\n GaussianState(\n projection.mean[links[:, 0], :2],\n projection.covariance[links[:, 0], :2, :2],\n None, # /!\\ inv(cov[:2,:2]) != inv(cov)[:2, :2]\n ),\n self.kalman_filter.measurement_matrix[:2],\n self.kalman_filter.measurement_noise[:2, :2],\n )\n\n # Compute velocities\n velocities_measured = torch.tensor( # Measured velocities\n self.opt_flow.flow_at(self.flow, positions[..., 0].numpy().astype(np.float64), self.opt_flow.scale)\n )[..., None].to(torch.float32)\n\n if self.opt_flow_at == OptFlowExtraction.DETECTED:\n velocities = velocities_measured[links[:, 1]]\n elif self.opt_flow_at == OptFlowExtraction.POSTERIOR:\n velocities = torch.tensor(\n self.opt_flow.flow_at(\n self.flow, posterior.mean[..., :2, 0].numpy().astype(np.float64), self.opt_flow.scale\n )\n )[..., None].to(torch.float32)\n velocities_measured[links[:, 1]] = velocities\n else: # Prior\n velocities = torch.tensor(\n self.opt_flow.flow_at(\n self.flow, projection.mean[links[:, 0], :2, 0].numpy().astype(np.float64), self.opt_flow.scale\n )\n )[..., None].to(torch.float32)\n velocities_measured[links[:, 1]] = velocities\n\n # Update matched tracks with velocities\n posterior = self.kalman_filter.update(\n posterior,\n velocities,\n None,\n self.kalman_filter.measurement_matrix[2:],\n self.kalman_filter.measurement_noise[2:, 2:],\n )\n\n measures = torch.cat([positions, velocities_measured], dim=1)\n\n if self.always_update_vel:\n velocities = torch.tensor(\n self.opt_flow.flow_at(\n self.flow, projection.mean[:, :2, 0].numpy().astype(np.float64), self.opt_flow.scale\n )\n )[..., None].to(torch.float32)\n self.state = self.kalman_filter.update( # Update unmatched tracks with velocities\n self.state,\n velocities,\n None,\n self.kalman_filter.measurement_matrix[2:],\n self.kalman_filter.measurement_noise[2:, 2:],\n )\n\n # Take prior by default if non-linked, else posterior\n self.state.mean[links[:, 0]] = posterior.mean\n self.state.covariance[links[:, 0]] = posterior.covariance\n\n self._handle_tracks(self.state, measures, links, detections.frame_id)\n\n self.state = self.kalman_filter.predict(self.state)"
},
{
"identifier": "farneback",
"path": "src/optical_flow.py",
"snippet": "class OptFlow:\n def __init__(self, method: Callable[[np.ndarray, np.ndarray], np.ndarray], threshs=(0.0, 1.0), scale=2, blur=0.0):\n def prepare(self, frame: np.ndarray) -> np.ndarray:\n def calc(self, source: np.ndarray, destination: np.ndarray) -> np.ndarray:\n def flow_at(flow: np.ndarray, points: np.ndarray, scale: int) -> np.ndarray:\n def transform(self, flow: np.ndarray, points: np.ndarray) -> np.ndarray:"
},
{
"identifier": "enforce_all_seeds",
"path": "src/utils.py",
"snippet": "def enforce_all_seeds(seed: int, strict=True):\n \"\"\"Enforce all the seeds\n\n If strict you may have to define the following env variable:\n CUBLAS_WORKSPACE_CONFIG=:4096:8 (Increase a bit the memory foot print ~25Mo)\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n if strict:\n torch.backends.cudnn.benchmark = False # By default should already be to False\n torch.use_deterministic_algorithms(True)"
}
] | import dataclasses
import enum
import pathlib
import dacite
import torch
import tqdm # type: ignore
import yaml # type: ignore
import byotrack
from typing import Collection, List
from byotrack.implementation.detector.wavelet import WaveletDetector
from byotrack.implementation.linker.icy_emht import EMHTParameters, IcyEMHTLinker, Motion
from byotrack.implementation.linker.trackmate.trackmate import TrackMateLinker, TrackMateParameters
from byotrack.implementation.refiner.interpolater import ForwardBackwardInterpolater
from ..detector import FakeDetector
from ..metrics.detections import DetectionMetric
from ..metrics.tracking import compute_tracking_metrics
from ..skt import constant_kalman_filter, Dist, Method, MatchingConfig, SimpleKalmanTracker, PartialTrack
from ..koft import constant_koft_filter, OptFlowExtraction, SingleUpdateKOFTracker, TwoUpdateKOFTracker
from ..optical_flow import farneback
from ..utils import enforce_all_seeds | 10,339 | matching_method: Method
always_update_velocities: bool = True
dim: int = 2
order: int = 1
class TrackingMethod(enum.Enum):
SKT = "skt"
KOFT = "koft"
KOFTmm = "koft--"
KOFTpp = "koft++"
TRACKMATE = "trackmate"
TRACKMATE_KF = "trackmate-kf"
EMHT = "emht"
@dataclasses.dataclass
class ExperimentConfig:
seed: int
simulation_path: pathlib.Path
tracking_method: TrackingMethod
detection: DetectionConfig
kalman: KalmanConfig
icy_path: pathlib.Path
fiji_path: pathlib.Path
def create_linker(self, thresh: float) -> byotrack.Linker:
"""Create a linker"""
if self.tracking_method is TrackingMethod.EMHT:
return IcyEMHTLinker(
self.icy_path,
EMHTParameters(
gate_factor=thresh,
motion=Motion.MULTI,
tree_depth=2,
),
)
if self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF):
# As kalman tracking we let a gap of 2 consecutive miss detections
# In that case, we allow 1.5 thresh
return TrackMateLinker(
self.fiji_path,
TrackMateParameters(
max_frame_gap=PartialTrack.MAX_NON_MEASURE,
linking_max_distance=thresh,
gap_closing_max_distance=thresh * 1.5,
kalman_search_radius=thresh if self.tracking_method is TrackingMethod.TRACKMATE_KF else None,
),
)
if self.tracking_method is TrackingMethod.SKT:
kalman_filter = constant_kalman_filter(
torch.tensor(self.kalman.detection_noise),
torch.tensor(self.kalman.process_noise),
self.kalman.dim,
self.kalman.order,
)
return SimpleKalmanTracker(
kalman_filter, MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method)
)
# self.tracking_method is TrackingMethod.KOFT:
kalman_filter = constant_koft_filter(
torch.tensor(self.kalman.detection_noise),
torch.tensor(self.kalman.of_noise),
torch.tensor(self.kalman.process_noise),
self.kalman.dim,
self.kalman.order,
)
if self.tracking_method is TrackingMethod.KOFTmm:
return SingleUpdateKOFTracker(
kalman_filter, farneback, MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method)
)
# <=> two updates, without updating vel for all tracks and using OptFlowExtraction at Detected pos
# return TwoUpdateKOFTracker(
# kalman_filter,
# farneback,
# MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method),
# OptFlowExtraction.DETECTED,
# False,
# )
PartialTrack.MAX_NON_MEASURE = 5 if self.tracking_method is TrackingMethod.KOFTpp else 3
return TwoUpdateKOFTracker(
kalman_filter,
farneback,
MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method),
OptFlowExtraction.POSTERIOR,
self.kalman.always_update_velocities,
)
def create_thresholds(self) -> List[float]:
if self.tracking_method is TrackingMethod.EMHT:
# XXX: EMHT struggle to converge in some scenarios with high frp and fnr.
# On those where it converges 3.0 is the best, and it converges for 3.0 in all of them
# So lets manually select [3.0] in high fpr/fnr. In other cases, let's keep the default grid search
# return [3.0]
return [3.0, 4.0, 5.0, 6.0] # MAHA
if (
self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF)
or self.kalman.dist is Dist.EUCLIDIAN
):
return [3.0, 5.0, 7.0, 10.0, 15.0]
if self.kalman.dist is Dist.MAHALANOBIS:
return [0.5, 1.0, 2.0, 3.0, 4.0]
# self.dist is Dist.LIKELIHOOD:
return [1e-6, 1e-5, 1e-4, 1e-3, 1e-2]
def main(name: str, cfg_data: dict) -> None:
print("Running:", name)
print(yaml.dump(cfg_data))
cfg = dacite.from_dict(ExperimentConfig, cfg_data, dacite.Config(cast=[pathlib.Path, tuple, enum.Enum]))
|
class DetectionMethod(enum.Enum):
WAVELET = "wavelet"
FAKE = "fake"
@dataclasses.dataclass
class WaveletConfig:
k: float = 3.0
scale: int = 1
min_area: float = 10.0
@dataclasses.dataclass
class FakeConfig:
fpr: float = 0.1 # Bad detection rate
fnr: float = 0.2 # Miss detection rate
measurement_noise: float = 1.0
@dataclasses.dataclass
class DetectionConfig:
detector: DetectionMethod
wavelet: WaveletConfig
fake: FakeConfig
# interactive = False # Could tweak the detector parameters interactively ?
def create_detector(self, mu: torch.Tensor) -> byotrack.Detector:
if self.detector == DetectionMethod.WAVELET:
return WaveletDetector(self.wavelet.scale, self.wavelet.k, self.wavelet.min_area)
return FakeDetector(mu, self.fake.measurement_noise, self.fake.fpr, self.fake.fnr)
@dataclasses.dataclass
class KalmanConfig:
detection_noise: float
of_noise: float
process_noise: float # Miss evaluation of the process
dist: Dist
matching_method: Method
always_update_velocities: bool = True
dim: int = 2
order: int = 1
class TrackingMethod(enum.Enum):
SKT = "skt"
KOFT = "koft"
KOFTmm = "koft--"
KOFTpp = "koft++"
TRACKMATE = "trackmate"
TRACKMATE_KF = "trackmate-kf"
EMHT = "emht"
@dataclasses.dataclass
class ExperimentConfig:
seed: int
simulation_path: pathlib.Path
tracking_method: TrackingMethod
detection: DetectionConfig
kalman: KalmanConfig
icy_path: pathlib.Path
fiji_path: pathlib.Path
def create_linker(self, thresh: float) -> byotrack.Linker:
"""Create a linker"""
if self.tracking_method is TrackingMethod.EMHT:
return IcyEMHTLinker(
self.icy_path,
EMHTParameters(
gate_factor=thresh,
motion=Motion.MULTI,
tree_depth=2,
),
)
if self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF):
# As kalman tracking we let a gap of 2 consecutive miss detections
# In that case, we allow 1.5 thresh
return TrackMateLinker(
self.fiji_path,
TrackMateParameters(
max_frame_gap=PartialTrack.MAX_NON_MEASURE,
linking_max_distance=thresh,
gap_closing_max_distance=thresh * 1.5,
kalman_search_radius=thresh if self.tracking_method is TrackingMethod.TRACKMATE_KF else None,
),
)
if self.tracking_method is TrackingMethod.SKT:
kalman_filter = constant_kalman_filter(
torch.tensor(self.kalman.detection_noise),
torch.tensor(self.kalman.process_noise),
self.kalman.dim,
self.kalman.order,
)
return SimpleKalmanTracker(
kalman_filter, MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method)
)
# self.tracking_method is TrackingMethod.KOFT:
kalman_filter = constant_koft_filter(
torch.tensor(self.kalman.detection_noise),
torch.tensor(self.kalman.of_noise),
torch.tensor(self.kalman.process_noise),
self.kalman.dim,
self.kalman.order,
)
if self.tracking_method is TrackingMethod.KOFTmm:
return SingleUpdateKOFTracker(
kalman_filter, farneback, MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method)
)
# <=> two updates, without updating vel for all tracks and using OptFlowExtraction at Detected pos
# return TwoUpdateKOFTracker(
# kalman_filter,
# farneback,
# MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method),
# OptFlowExtraction.DETECTED,
# False,
# )
PartialTrack.MAX_NON_MEASURE = 5 if self.tracking_method is TrackingMethod.KOFTpp else 3
return TwoUpdateKOFTracker(
kalman_filter,
farneback,
MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method),
OptFlowExtraction.POSTERIOR,
self.kalman.always_update_velocities,
)
def create_thresholds(self) -> List[float]:
if self.tracking_method is TrackingMethod.EMHT:
# XXX: EMHT struggle to converge in some scenarios with high frp and fnr.
# On those where it converges 3.0 is the best, and it converges for 3.0 in all of them
# So lets manually select [3.0] in high fpr/fnr. In other cases, let's keep the default grid search
# return [3.0]
return [3.0, 4.0, 5.0, 6.0] # MAHA
if (
self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF)
or self.kalman.dist is Dist.EUCLIDIAN
):
return [3.0, 5.0, 7.0, 10.0, 15.0]
if self.kalman.dist is Dist.MAHALANOBIS:
return [0.5, 1.0, 2.0, 3.0, 4.0]
# self.dist is Dist.LIKELIHOOD:
return [1e-6, 1e-5, 1e-4, 1e-3, 1e-2]
def main(name: str, cfg_data: dict) -> None:
print("Running:", name)
print(yaml.dump(cfg_data))
cfg = dacite.from_dict(ExperimentConfig, cfg_data, dacite.Config(cast=[pathlib.Path, tuple, enum.Enum]))
| enforce_all_seeds(cfg.seed) | 14 | 2023-11-10 10:18:39+00:00 | 12k |
david9dragon9/LOMOLite | lomo/lomo_base.py | [
{
"identifier": "LOMO",
"path": "lomo/lomo_orig.py",
"snippet": "class LOMO(Optimizer):\n \"\"\"\n 一个自定义的优化器类LOMO,用于在分布式训练中的梯度更新。\n\n 该类实现两个梯度更新函数 :meth:`fuse_update` 和 :meth:`fuse_update_zero3`,分别用于非ZeRO和ZeRO模式下的梯度更新。\n\n :param model: 待优化的模型\n :param lr: 学习率,默认值为1e-3\n :param clip_grad_norm: 梯度裁剪的范数阈值\n\n .. note::\n\n clip_grad_norm须为正数\n\n :param clip_grad_value: 梯度裁剪的值域阈值\n \"\"\"\n\n def __init__(self, model, lr=1e-3, clip_grad_norm=None, clip_grad_value=None):\n self.model = model\n self.lr = lr\n self.local_rank = int(os.environ[\"LOCAL_RANK\"])\n self.world_size = dist.get_world_size()\n self.clip_grad_norm = clip_grad_norm\n self.clip_grad_value = clip_grad_value\n\n # for grad norm\n if self.clip_grad_norm is not None and self.clip_grad_norm <= 0:\n raise ValueError(\n f\"clip_grad_norm should be positive, got {self.clip_grad_norm}.\"\n )\n self.gather_norm = False\n self.grad_norms = []\n self.clip_coef = None\n\n # check if zero3 is enabled\n p0 = list(self.model.parameters())[0]\n if hasattr(p0, \"ds_tensor\"): # zero3 is enabled\n self.grad_func = self.fuse_update_zero3()\n else:\n self.grad_func = self.fuse_update()\n # check if fp16 is enabled\n if False: # p0.dtype == torch.float16:\n self.loss_scaler = DynamicLossScaler(\n init_scale=2**16,\n ) # TODO: add args\n if self.clip_grad_norm is None:\n raise ValueError(\n \"Loss scaling is recommended to be used with grad norm to get better performance.\"\n )\n else:\n self.loss_scaler = None\n\n # register hook function, which will be called through the backward process\n for n, p in self.model.named_parameters():\n if p.requires_grad:\n p.register_hook(self.grad_func)\n defaults = dict(\n lr=lr, clip_grad_norm=clip_grad_norm, clip_grad_value=clip_grad_value\n )\n super(LOMO, self).__init__(self.model.parameters(), defaults)\n\n def fuse_update(self):\n \"\"\"\n 在非ZeRO模式下更新模型参数的梯度。\n\n :return: func,一个闭包函数,用于更新模型参数的梯度\n \"\"\"\n\n def func(x):\n \"\"\"\n 闭包函数,用于更新模型参数的梯度。\n \"\"\"\n with torch.no_grad():\n for n, p in self.model.named_parameters():\n if p.requires_grad and p.grad is not None:\n if self.loss_scaler:\n if (\n self.loss_scaler.has_overflow_serial\n or self.loss_scaler._has_inf_or_nan(p.grad)\n ):\n # if the overflow is detected, drop the gradient\n p.grad = None\n self.loss_scaler.has_overflow_serial = True\n break\n grad_fp32 = p.grad.to(torch.float32)\n p.grad = None\n if self.loss_scaler:\n grad_fp32.div_(self.loss_scaler.loss_scale)\n if self.gather_norm:\n # we adopt two backward pass for gradient norm compuation and parameter update, respectively.\n self.grad_norms.append(torch.norm(grad_fp32, 2.0))\n else:\n if (\n self.clip_grad_value is not None\n and self.clip_grad_value > 0\n ):\n # Clipping gradients by their value\n grad_fp32.clamp_(\n min=-self.clip_grad_value, max=self.clip_grad_value\n )\n if (\n self.clip_grad_norm is not None\n and self.clip_grad_norm > 0\n and self.clip_coef is not None\n ):\n # Normalize the gradient according to its norm (computed in another pass)\n grad_fp32.mul_(self.clip_coef)\n p_fp32 = p.data.to(torch.float32)\n p_fp32.add_(grad_fp32, alpha=-self.lr)\n p.data.copy_(p_fp32)\n\n return x\n\n return func\n\n def fuse_update_zero3(self):\n \"\"\"\n 在ZeRO模式下更新模型参数的梯度。\n\n :return: func,一个闭包函数,用于更新模型参数的梯度。\n \"\"\"\n\n def func(x):\n with torch.no_grad():\n for n, p in self.model.named_parameters():\n if p.grad is not None:\n torch.distributed.all_reduce(\n p.grad, op=torch.distributed.ReduceOp.AVG, async_op=False\n )\n if self.loss_scaler:\n if (\n self.loss_scaler.has_overflow_serial\n or self.loss_scaler._has_inf_or_nan(p.grad)\n ):\n # if the overflow is detected, drop the gradient\n p.grad = None\n self.loss_scaler.has_overflow_serial = True\n break\n\n grad_fp32 = p.grad.to(torch.float32)\n p.grad = None\n param_fp32 = p.ds_tensor.to(torch.float32)\n if self.loss_scaler:\n grad_fp32.div_(self.loss_scaler.loss_scale)\n\n if self.gather_norm:\n # we adopt two backward pass for gradient norm compuation and parameter update, respectively.\n self.grad_norms.append(torch.norm(grad_fp32, 2.0))\n else: # update param\n one_dim_grad_fp32 = grad_fp32.view(-1)\n partition_size = p.ds_tensor.numel()\n start = partition_size * self.local_rank\n end = min(start + partition_size, grad_fp32.numel())\n partitioned_grad_fp32 = one_dim_grad_fp32.narrow(\n 0, start, end - start\n )\n\n if self.clip_grad_value is not None:\n # Clipping gradients by their value\n partitioned_grad_fp32.clamp_(\n min=-self.clip_grad_value, max=self.clip_grad_value\n )\n if (\n self.clip_grad_norm is not None\n and self.clip_grad_norm > 0\n and self.clip_coef is not None\n ):\n # Normalize the gradient according to its norm (computed in another pass)\n partitioned_grad_fp32.mul_(self.clip_coef)\n\n partitioned_p = param_fp32.narrow(0, 0, end - start)\n partitioned_p.add_(partitioned_grad_fp32, alpha=-self.lr)\n p.ds_tensor[: end - start] = partitioned_p\n return x\n\n return func\n\n def fused_backward(self, loss, lr):\n \"\"\"\n 执行一步反向传播并更新模型的梯度。\n\n :param loss: 模型的loss值\n :param lr: 学习率\n \"\"\"\n self.lr = lr\n # Users need call grad_norm themselves and then call backward_step\n if (\n self.clip_grad_norm is not None\n and self.clip_grad_norm > 0\n and self.clip_coef is None\n ):\n raise ValueError(\n \"clip_grad_norm is not None, but clip_coef is None. \"\n \"Please call optimizer.grad_norm() before optimizer.fused_backward().\"\n )\n if self.loss_scaler:\n loss = loss * self.loss_scaler.loss_scale\n loss.backward()\n # update the last parameter since the last parameter in the computaiton graph is not ready when calling hook functions\n # the argument of grad_func is just a placeholder, and it can be anything.\n self.grad_func(0)\n\n def grad_norm(self, loss):\n \"\"\"\n 计算梯度的范数。\n\n :param loss: 模型的loss值\n \"\"\"\n self.gather_norm = True\n self.grad_norms = []\n if self.loss_scaler:\n self.loss_scaler.has_overflow_serial = False\n loss = loss * self.loss_scaler.loss_scale\n loss.backward(retain_graph=True)\n # update the last parameter since the last parameter in the computaiton graph is not ready when calling hook functions\n # the argument of grad_func is just a placeholder, and it can be anything.\n self.grad_func(0)\n\n if self.loss_scaler and self.loss_scaler.has_overflow_serial:\n self.loss_scaler.update_scale(overflow=True)\n with torch.no_grad(): # clear gradients\n for n, p in self.model.named_parameters():\n p.grad = None\n return\n\n with torch.no_grad():\n # The norm is computed over all gradients together, as if they were\n # concatenated into a single vector. Gradients are modified in-place.\n self.grad_norms = torch.stack(self.grad_norms)\n\n total_norm = torch.norm(self.grad_norms, 2.0)\n self.clip_coef = float(self.clip_grad_norm) / (total_norm + 1e-6)\n self.clip_coef = torch.clamp(self.clip_coef, max=1.0)\n self.gather_norm = False"
},
{
"identifier": "AdaLomo",
"path": "lomo/adalomo_orig.py",
"snippet": "class AdaLomo(Optimizer):\n \"\"\"\n 一个自定义的优化器类AdaLomo,用于在分布式训练中的梯度更新。\n\n 该类实现两个梯度更新函数 :meth:`fuse_update` 和 :meth:`fuse_update_zero3`,分别用于非ZeRO和ZeRO模式下的梯度更新。\n\n :param model: 待优化的模型\n :param lr: 学习率,默认值为1e-3\n :param eps: 正则化系数。eps[0]防止梯度平方太小,eps[1]用于在根据参数的RMS放缩学习率时防止步长太大\n :param clip_threshold: 归一化update矩阵时的阈值\n :param decay_rate: 梯度平方移动平均的衰减率\n :param clip_grad_norm: 梯度裁剪的范数阈值\n\n .. note::\n\n clip_grad_norm须为正数\n :param clip_grad_value: 梯度裁剪的值域阈值\n :param weight_decay: 权重衰减系数,默认值为0.0\n :param loss_scale: 损失缩放系数,可以用来提高训练精度,但是太大可能会导致nan\n \"\"\"\n\n def __init__(\n self,\n model,\n lr=1e-3,\n loss_scale=2**10,\n eps=(1e-30, 1e-3),\n clip_threshold=1.0,\n decay_rate=-0.8,\n clip_grad_norm=None,\n clip_grad_value=None,\n weight_decay=0.0,\n ):\n self.model = model\n self.lr = lr\n self.clip_grad_norm = clip_grad_norm\n self.clip_grad_value = clip_grad_value\n self.weight_decay = weight_decay\n self.loss_scale = loss_scale\n if self.weight_decay > 0.0:\n self.do_weight_decay = True\n else:\n self.do_weight_decay = False\n self.eps = eps\n self.step_num = 0\n self.decay_rate = decay_rate\n self.clip_threshold = clip_threshold\n\n # for grad norm\n if self.clip_grad_norm is not None and self.clip_grad_norm <= 0:\n raise ValueError(\n f\"clip_grad_norm should be positive, got {self.clip_grad_norm}.\"\n )\n self.gather_norm = False\n self.grad_norms = []\n self.clip_coef = None\n\n # check if zero3 is enabled\n self.zero3_enabled = True # is_deepspeed_zero3_enabled()\n if self.zero3_enabled: # zero3 is enabled\n self.grad_func = self.fuse_update_zero3()\n else:\n self.grad_func = self.fuse_update()\n\n self.exp_avg_sq = {}\n self.exp_avg_sq_row = {}\n self.exp_avg_sq_col = {}\n\n # register hook function, which will be called through the backward process\n for n, p in self.model.named_parameters():\n if len(p.ds_shape) == 1:\n self.exp_avg_sq[n] = torch.zeros(\n p.ds_shape[0], dtype=torch.float32\n ).cuda()\n else:\n self.exp_avg_sq_row[n] = torch.zeros(\n p.ds_shape[0], dtype=torch.float32\n ).cuda()\n self.exp_avg_sq_col[n] = torch.zeros(\n p.ds_shape[1], dtype=torch.float32\n ).cuda()\n\n if p.requires_grad:\n p.register_hook(self.grad_func)\n defaults = dict(\n lr=lr,\n eps=eps,\n weight_decay=weight_decay,\n clip_grad_norm=clip_grad_norm,\n clip_grad_value=clip_grad_value,\n )\n super(AdaLomo, self).__init__(self.model.parameters(), defaults)\n self.dp_rank = 0\n\n @staticmethod\n def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col):\n # copy from fairseq's adafactor implementation:\n # https://github.com/huggingface/transformers/blob/8395f14de6068012787d83989c3627c3df6a252b/src/transformers/optimization.py#L505\n r_factor = (\n (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True))\n .rsqrt_()\n .unsqueeze(-1)\n )\n c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()\n return torch.mul(r_factor, c_factor)\n\n @staticmethod\n def _rms(tensor):\n return tensor.norm(2) / (tensor.numel() ** 0.5)\n\n def fuse_update(self):\n \"\"\"\n 在非ZeRO模式下更新模型参数的梯度。\n\n :return: func,一个闭包函数,用于更新模型参数的梯度\n \"\"\"\n\n def func(x):\n \"\"\"\n 闭包函数,用于更新模型参数的梯度。\n \"\"\"\n with torch.no_grad():\n for n, p in self.model.named_parameters():\n if p.requires_grad and p.grad is not None:\n grad_fp32 = p.grad.to(torch.float32)\n p.grad = None\n if self.loss_scale:\n grad_fp32.div_(self.loss_scale)\n if self.gather_norm:\n # we adopt two backward pass for gradient norm computation and parameter update, respectively.\n self.grad_norms.append(torch.norm(grad_fp32, 2.0))\n else:\n # grad clip or norm\n if (\n self.clip_grad_value is not None\n and self.clip_grad_value > 0\n ):\n # Clipping gradients by their value\n grad_fp32.clamp_(\n min=-self.clip_grad_value, max=self.clip_grad_value\n )\n if (\n self.clip_grad_norm is not None\n and self.clip_grad_norm > 0\n and self.clip_coef is not None\n ):\n # Normalize the gradient according to its norm (computed in another pass)\n grad_fp32.mul_(self.clip_coef)\n\n beta2t = 1.0 - math.pow(self.step_num, self.decay_rate)\n update = (grad_fp32**2) + self.eps[0]\n\n if len(p.data.shape) > 1:\n self.exp_avg_sq_row[n].mul_(beta2t).add_(\n update.mean(dim=-1), alpha=1.0 - beta2t\n )\n self.exp_avg_sq_col[n].mul_(beta2t).add_(\n update.mean(dim=-2), alpha=1.0 - beta2t\n )\n update = self._approx_sq_grad(\n self.exp_avg_sq_row[n], self.exp_avg_sq_col[n]\n )\n update.mul_(grad_fp32)\n else:\n self.exp_avg_sq[n].mul_(beta2t).add_(\n update, alpha=1.0 - beta2t\n )\n update = self.exp_avg_sq[n].rsqrt().mul_(grad_fp32)\n\n update.div_(\n (self._rms(update) / self.clip_threshold).clamp_(\n min=1.0\n )\n )\n\n p_fp32 = p.data.to(torch.float32)\n p_rms = torch.norm(p_fp32, 2.0) / math.sqrt(p.numel())\n lr = self.lr\n param_scale = max(self.eps[1], p_rms)\n lr = lr * param_scale\n\n if self.do_weight_decay:\n p_fp32.mul_(1.0 - lr * self.weight_decay)\n p_fp32.add_(update, alpha=-lr)\n p.data.copy_(p_fp32)\n\n return x\n\n return func\n\n def fuse_update_zero3(self):\n \"\"\"\n 在ZeRO模式下更新模型参数的梯度。\n\n :return: func,一个闭包函数,用于更新模型参数的梯度。\n \"\"\"\n\n def func(x):\n with torch.no_grad():\n for n, p in self.model.named_parameters():\n if p.grad is not None:\n torch.distributed.all_reduce(\n p.grad, op=torch.distributed.ReduceOp.AVG, async_op=False\n )\n\n grad_fp32 = p.grad.to(torch.float32)\n p.grad = None\n if self.loss_scale:\n grad_fp32.div_(self.loss_scale)\n\n if self.gather_norm:\n # we adopt two backward pass for gradient norm computation and parameter update, respectively.\n self.grad_norms.append(torch.norm(grad_fp32, 2.0))\n else: # update param\n partition_size = p.ds_tensor.numel()\n start = partition_size * self.dp_rank\n end = min(start + partition_size, grad_fp32.numel())\n\n if self.clip_grad_value is not None:\n # Clipping gradients by their value\n grad_fp32.clamp_(\n min=-self.clip_grad_value, max=self.clip_grad_value\n )\n if (\n self.clip_grad_norm is not None\n and self.clip_grad_norm > 0\n and self.clip_coef is not None\n ):\n # Normalize the gradient according to its norm (computed in another pass)\n grad_fp32.mul_(self.clip_coef)\n\n beta2t = 1.0 - math.pow(self.step_num, self.decay_rate)\n update = (grad_fp32**2) + self.eps[0] # 改成addcmul_\n\n if len(p.ds_shape) > 1:\n self.exp_avg_sq_row[n].mul_(beta2t).add_(\n update.mean(dim=-1), alpha=1.0 - beta2t\n )\n self.exp_avg_sq_col[n].mul_(beta2t).add_(\n update.mean(dim=-2), alpha=1.0 - beta2t\n )\n update = self._approx_sq_grad(\n self.exp_avg_sq_row[n], self.exp_avg_sq_col[n]\n )\n update.mul_(grad_fp32)\n else:\n self.exp_avg_sq[n].mul_(beta2t).add_(\n update, alpha=1.0 - beta2t\n )\n update = self.exp_avg_sq[n].rsqrt().mul_(grad_fp32)\n\n update.div_(\n (self._rms(update) / self.clip_threshold).clamp_(\n min=1.0\n )\n )\n\n one_dim_update = update.view(-1)\n partitioned_update = one_dim_update.narrow(\n 0, start, end - start\n )\n param_fp32 = p.ds_tensor.to(torch.float32)\n partitioned_p = param_fp32.narrow(0, 0, end - start)\n\n p_rms = torch.norm(partitioned_p, 2.0) ** 2\n dist.all_reduce(p_rms, op=torch.distributed.ReduceOp.SUM)\n p_rms = (p_rms / p.ds_numel).sqrt()\n\n lr = self.lr\n param_scale = max(self.eps[1], p_rms)\n lr = lr * param_scale\n\n if self.do_weight_decay:\n partitioned_p.mul_(1.0 - lr * self.weight_decay)\n partitioned_p.add_(partitioned_update, alpha=-lr)\n p.ds_tensor[: end - start] = partitioned_p\n\n return x\n\n return func\n\n def fused_backward(self, loss, lr):\n \"\"\"\n 执行一步反向传播并更新模型的梯度。\n\n :param loss: 模型的loss值\n :param lr: 学习率\n \"\"\"\n self.lr = lr\n if self.loss_scale:\n loss = loss * self.loss_scale\n self.step_num += 1\n loss.backward()\n # update the last parameter since the last parameter in the computaiton graph is not ready when calling hook functions\n # the argument of grad_func is just a placeholder, and it can be anything.\n self.grad_func(0)\n\n def grad_norm(self, loss):\n \"\"\"\n 计算梯度的范数。\n\n :param loss: 模型的loss值\n \"\"\"\n self.gather_norm = True\n self.grad_norms = []\n if self.loss_scale:\n loss = loss * self.loss_scale\n loss.backward(retain_graph=True)\n # update the last parameter since the last parameter in the computaiton graph is not ready when calling hook functions\n # the argument of grad_func is just a placeholder, and it can be anything.\n self.grad_func(0)\n\n with torch.no_grad():\n # The norm is computed over all gradients together, as if they were\n # concatenated into a single vector. Gradients are modified in-place.\n self.grad_norms = torch.stack(self.grad_norms)\n\n total_norm = torch.norm(self.grad_norms, 2.0)\n self.clip_coef = float(self.clip_grad_norm) / (total_norm + 1e-6)\n self.clip_coef = torch.clamp(self.clip_coef, max=1.0)\n self.gather_norm = False"
},
{
"identifier": "LearningRateScheduler",
"path": "lomo/lomo_utils.py",
"snippet": "class LearningRateScheduler:\n r\"\"\"\n Learning rate scheduler with warmup.\n\n :param warmup: if ``warmup`` is an integer, ``warmup`` stands for warmup steps, if ``warmup`` is a float,\n such as 0.1, then it stands for warmup_ratio.\n :param schedule: the learning rate will be adjusted according to ``schedule`` strategy,\n which can be: linear or constant.\n \"\"\"\n\n def __init__(\n self, warmup: float, schedule: str, learning_rate: float, n_steps: int = 0\n ):\n\n self.warmup = max(warmup, 0.0)\n self.schedule = schedule\n self.initial_lr = learning_rate\n\n if self.warmup > 1:\n self.warmup = self.warmup / n_steps\n self.t_steps = max(2, n_steps)\n\n if self.schedule == \"constant\":\n self.get_lr = self._get_constant_lr\n elif self.schedule == \"linear\":\n self.get_lr = self._get_linear_lr\n else:\n raise NotImplementedError(\"Only support 'linear', 'constant'.\")\n\n def _get_constant_lr(self, progress):\n if progress < self.warmup:\n return progress / self.warmup\n return 1\n\n def _get_linear_lr(self, progress):\n if progress < self.warmup:\n return progress / self.warmup\n return max((progress - 1.0) / (self.warmup - 1.0), 0.0)\n\n def step(self, global_step):\n progress = global_step / self.t_steps\n return self.initial_lr * self.get_lr(progress)"
},
{
"identifier": "DynamicLossScaler",
"path": "lomo/lomo_utils.py",
"snippet": "class DynamicLossScaler:\n def __init__(\n self,\n init_scale=2**32,\n scale_factor=2.0,\n scale_window=1000,\n min_scale=1,\n delayed_shift=1,\n consecutive_hysteresis=False,\n raise_error_at_min_scale=True,\n dtype=torch.half,\n ):\n self.cur_scale = init_scale\n self.cur_iter = 0\n self.last_overflow_iter = -1\n self.scale_factor = scale_factor\n self.scale_window = scale_window\n self.min_scale = min_scale\n self.delayed_shift = delayed_shift\n self.cur_hysteresis = delayed_shift\n self.consecutive_hysteresis = consecutive_hysteresis\n self.raise_error_at_min_scale = raise_error_at_min_scale\n self.dtype = dtype\n self.has_overflow_serial = False\n\n @property\n def loss_scale(self):\n return self.cur_scale\n\n # `x` is a torch.Tensor\n def _has_inf_or_nan(self, x):\n try:\n # if x is half, the .float() incurs an additional deep copy, but it's necessary if\n # Pytorch's .sum() creates a one-element tensor of the same type as x\n # (which is true for some recent version of pytorch).\n cpu_sum = float(x.float().sum())\n # More efficient version that can be used if .sum() returns a Python scalar\n # cpu_sum = float(x.sum())\n except RuntimeError as instance:\n # We want to check if inst is actually an overflow exception.\n # RuntimeError could come from a different error.\n # If so, we still want the exception to propagate.\n if \"value cannot be converted\" not in instance.args[0]:\n raise\n return True\n else:\n if cpu_sum in [float(\"inf\"), -float(\"inf\")] or cpu_sum != cpu_sum:\n return True\n return False\n\n # `overflow` is boolean indicating whether the gradient overflowed\n def update_scale(self, overflow):\n if overflow:\n # self.cur_scale /= self.scale_factor\n if self.delayed_shift == 1 or self.cur_hysteresis == 1:\n if (self.cur_scale == self.min_scale) and self.raise_error_at_min_scale:\n raise Exception(\n \"Current loss scale already at minimum - cannot decrease scale anymore. Exiting run.\"\n )\n else:\n next_scale = max(self.cur_scale / self.scale_factor, self.min_scale)\n if torch.distributed.get_rank() == 0:\n overflow_msg = f\"[deepspeed] OVERFLOW! Rank {torch.distributed.get_rank()} Skipping step.\"\n if self.dtype == torch.half:\n overflow_msg += f\" Attempted loss scale: {int(self.cur_scale)}, reducing to {int(next_scale)}\"\n print(overflow_msg)\n self.cur_scale = next_scale\n else:\n if torch.distributed.get_rank() == 0:\n overflow_msg = f\"[deepspeed] OVERFLOW! Rank {torch.distributed.get_rank()} Skipping step.\"\n if self.dtype == torch.half:\n overflow_msg += f\" Attempted loss scale: {int(self.cur_scale)}, but hysteresis is {self.cur_hysteresis}. Reducing hysteresis to {self.cur_hysteresis - 1}\"\n print(overflow_msg)\n self.cur_hysteresis -= 1\n self.last_overflow_iter = self.cur_iter\n else:\n if self.consecutive_hysteresis:\n if torch.distributed.get_rank() == 0:\n hysteresis_msg = f\"Consecutive hysteresis is enabled. Restoring hysteresis to {self.delayed_shift}\"\n print(hysteresis_msg)\n self.cur_hysteresis = self.delayed_shift\n if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:\n if not self.consecutive_hysteresis:\n self.cur_hysteresis = self.delayed_shift\n self.cur_scale *= self.scale_factor\n self.cur_iter += 1"
}
] | import torch
import sys
import os
import tqdm
import deepspeed
import deepspeed
import os
from transformers.deepspeed import HfDeepSpeedConfig
from transformers import AutoConfig
from collections import OrderedDict
from lomo.lomo_orig import LOMO
from lomo.adalomo_orig import AdaLomo
from lomo.lomo_utils import LearningRateScheduler, DynamicLossScaler
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator | 7,257 | # Source: https://github.com/OpenLMLab/LOMO
# Source: https://github.com/OpenLMLab/collie/tree/dev/collie
try:
except:
pass
def setup_lomo(model_name_or_path):
torch.set_default_dtype(torch.float16)
ds_config = __file__.replace("lomo_base.py", "ds_config.json")
dschf = HfDeepSpeedConfig(ds_config)
config = AutoConfig.from_pretrained(model_name_or_path)
config.gradient_checkpointing = True
return config
def create_lomo_lr_scheduler(
learning_rate=0.03,
n_steps=1000,
num_train_epochs=10,
warmup=0.1,
lr_scheduler_type="linear",
):
| # Source: https://github.com/OpenLMLab/LOMO
# Source: https://github.com/OpenLMLab/collie/tree/dev/collie
try:
except:
pass
def setup_lomo(model_name_or_path):
torch.set_default_dtype(torch.float16)
ds_config = __file__.replace("lomo_base.py", "ds_config.json")
dschf = HfDeepSpeedConfig(ds_config)
config = AutoConfig.from_pretrained(model_name_or_path)
config.gradient_checkpointing = True
return config
def create_lomo_lr_scheduler(
learning_rate=0.03,
n_steps=1000,
num_train_epochs=10,
warmup=0.1,
lr_scheduler_type="linear",
): | return LearningRateScheduler( | 2 | 2023-11-11 03:29:00+00:00 | 12k |
quantuminterface/qiclib | src/qiclib/code/qi_sequencer.py | [
{
"identifier": "QiCellProperty",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class QiCellProperty(QiExpression):\n \"\"\"When describing experiments, properties of cells might not yet be defined. Instead a QiCellProperty object will be generated.\n This object can be used as length definition in cQiWait commands and QiPulse\"\"\"\n\n def __init__(self, cell, name):\n super().__init__()\n from .qi_jobs import QiCell\n\n self.name: str = name\n self.cell: QiCell = cell\n self.operations = lambda val: val\n self.opcode = \"x\"\n\n @property\n def opcode_p(self):\n \"\"\"Old opcode in parantheses for building new opcode\"\"\"\n return self.opcode if self.opcode == \"x\" else f\"({self.opcode})\"\n\n def resolve_equal(self, o: object) -> bool:\n if isinstance(o, QiCellProperty):\n return self.name == o.name and self.opcode == o.opcode\n elif o is None:\n return False\n try:\n return o == self()\n except KeyError:\n return False # At time of comparison, unresolved property is not equal to o\n\n def __call__(self):\n value = self.cell._properties.get(self.name)\n\n if isinstance(value, QiCellProperty) or value is None:\n raise KeyError(\"Property could not be resolved\")\n return self.operations(value)\n\n @property\n def value(self):\n if self.type == QiType.TIME:\n return util.conv_time_to_cycles(self())\n elif self.type == QiType.FREQUENCY:\n return util.conv_freq_to_nco_phase_inc(self())\n elif self.type == QiType.NORMAL:\n return self()\n elif self.type == QiType.STATE:\n return self()\n else:\n raise RuntimeError(\n \"Mising type information to resolve value to convert to a machine value.\"\n )\n\n @property\n def float_value(self):\n assert self.type in (QiType.TIME, QiType.FREQUENCY)\n return self()\n\n @abstractmethod\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_cell_property(self)\n\n @property\n def contained_variables(self):\n return QiVariableSet()\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return isinstance(other, QiCellProperty) and self.resolve_equal(other)\n\n def move_add_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations # Necessary because of recursion otherwise\n self.operations = lambda val: old_op(val) + x.value\n self.opcode = f\"{self.opcode_p} + {x}\"\n return self\n\n def move_radd_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations\n self.operations = lambda val: x.value + old_op(val)\n self.opcode = f\"{self.opcode_p} + {x}\"\n return self\n\n def move_sub_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) - x.value\n self.opcode = f\"{self.opcode_p} - {x}\"\n return self\n\n def move_rsub_op_to_property(self, x: _QiConstValue):\n old_op = self.operations\n self.operations = lambda val: x.value - old_op(val)\n self.opcode = f\"{x} - {self.opcode_p}\"\n return self\n\n def move_mul_op_to_property(self, x: _QiConstValue):\n if x._given_value == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) * x.value\n self.opcode = f\"{x} * {self.opcode_p}\"\n return self\n\n def move_rmul_op_to_property(self, x: _QiConstValue):\n if x._given_value == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: x.value * old_op(val)\n self.opcode = f\"{x} * {self.opcode_p}\"\n return self\n\n # These operations are not implemented for general QiExpressions\n # and are, therefore, left as they are.\n\n def __truediv__(self, x):\n if (isinstance(x, _QiConstValue) and x._given_value == 1) or x == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) / x\n self.opcode = f\"{self.opcode_p} / {x}\"\n return self\n\n def __rtruediv__(self, x):\n old_op = self.operations\n self.operations = lambda val: x / old_op(val)\n self.opcode = f\"{x} / {self.opcode_p}\"\n return self"
},
{
"identifier": "QiVariableSet",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class QiVariableSet:\n \"\"\"Class provides Set functionality for QiVariables.\n QiVariables overwrite comparison operations to build operation trees, to still allow comparisons ids are used.\n \"\"\"\n\n def __init__(self) -> None:\n self._var_list: List[\"_QiVariableBase\"] = []\n self._var_id_list: List[int] = []\n\n def __contains__(self, x):\n return x.id in self._var_id_list\n\n def add(self, x: \"_QiVariableBase\"):\n if x.id not in self._var_id_list:\n self._var_id_list.append(x.id)\n self._var_list.append(x)\n\n def update(self, var_set):\n for var in var_set:\n self.add(var)\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self._var_list):\n var = self._var_list[self.n]\n self.n += 1\n return var\n else:\n raise StopIteration\n\n def __len__(self):\n return len(self._var_list)"
},
{
"identifier": "_QiCalcBase",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class _QiCalcBase(QiExpression):\n \"\"\"Represents binary and unary operations.\"\"\"\n\n def __init__(self, val1, op, val2) -> None:\n super().__init__()\n\n self.val1 = val1\n self.op: QiOp = op\n self.val2 = val2\n\n from .qi_types import add_qi_calc_constraints\n\n add_qi_calc_constraints(op, val1, val2, self)\n\n @property\n def contained_variables(self):\n \"\"\"Function traverses the operation tree to determine which QiVariables are used for the calculations.\n Found QiVariables are added to _contained_variables\"\"\"\n if len(self._contained_variables) == 0:\n self._variables_to_container()\n\n return self._contained_variables\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_calc(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return (\n isinstance(other, _QiCalcBase)\n and self.op == other.op\n and self.val1._equal_syntax(other.val1)\n and self.val2._equal_syntax(other.val2)\n )\n\n def __str__(self):\n return (\n \"(\"\n + self.val1.__str__()\n + \" \"\n + self.op.value\n + \" \"\n + self.val2.__str__()\n + \")\"\n )"
},
{
"identifier": "_QiVariableBase",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class _QiVariableBase(QiExpression):\n \"\"\"Base class for QiVariables.\n Variables can be relevant to only a subset of QiCells, this subset is saved in _relevant_cells.\n Variables are simple expressions and, therefore, are typed.\n Variables can be compared by self.id.\"\"\"\n\n id_iter = itertools.count()\n str_id_iter = itertools.count()\n\n def __init__(\n self,\n type: QiType,\n value: Optional[Union[int, float]] = None,\n name=None,\n ):\n from .qi_jobs import QiCell\n\n assert isinstance(type, QiType)\n assert value is None or isinstance(value, (int, float))\n\n super().__init__()\n\n if type != QiType.UNKNOWN:\n self._type_info.set_type(type, _TypeDefiningUse.VARIABLE_DEFINITION)\n\n self.value = value\n\n self._value = value\n self._relevant_cells: Set[QiCell] = set()\n self.id = next(_QiVariableBase.id_iter)\n self.str_id = next(_QiVariableBase.str_id_iter)\n\n self._contained_variables.add(self)\n\n self.name = name\n\n @property\n def contained_variables(self):\n return self._contained_variables\n\n @staticmethod\n def reset_str_id():\n _QiVariableBase.str_id_iter = itertools.count()\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_variable(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return isinstance(other, _QiVariableBase) and self.id == other.id\n\n def __hash__(self) -> int:\n return self.id\n\n def __str__(self) -> str:\n return f\"QiVariable({self.name or ''})\""
},
{
"identifier": "QiExpression",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class QiExpression:\n \"\"\"Superclass of every possible qicode expression.\"\"\"\n\n def __init__(self):\n self._contained_variables = QiVariableSet()\n self._type_info = _TypeInformation(self)\n\n @property\n def type(self):\n return self._type_info.type\n\n @staticmethod\n def _from(x):\n \"\"\"Creates an instance of QiExpression of the provided argument if possible.\"\"\"\n if isinstance(x, (float, int)):\n return _QiConstValue(x)\n elif isinstance(x, QiExpression):\n return x\n else:\n raise RuntimeError(f\"Can not create QiExpression from type {type(x)}.\")\n\n @abstractmethod\n def accept(self, visitor: QiExpressionVisitor):\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `accept`. This is a bug.\"\n )\n\n @property\n def contained_variables(self):\n \"\"\"Returns the variables used in this expression.\n QiExpression subclasses which contain variables (_QiCalcBase and _QiVariableBase) need to overwrite this.\n \"\"\"\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `contained_variables`. This is a bug.\"\n )\n\n def _variables_to_container(self):\n if isinstance(self, _QiVariableBase):\n self._contained_variables.add(self)\n elif isinstance(self, _QiCalcBase):\n self._contained_variables.update(self.val1.contained_variables)\n self._contained_variables.update(self.val2.contained_variables)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `_equal_syntax`. This is a bug.\"\n )\n\n # QiCellProperties are supposed to support some form of constant folding.\n # However, originally, instead of implementing this in an extra pass over\n # QiJob they were added to the QiCellProperty class.\n # In order to keep support for this limited form of constant folding\n # This logic was placed here.\n\n # (I'm not sure why we don't fold when both operands are QiCellProperty.\n # And I think the reason we don't fold tow _QiConstValue is that originally\n # They were just int/float and would \"fold\" implicitely when using any\n # math operator on them)\n\n # If anyone ever feels the need to improve this situation, I would\n # encourage them to implement a constant folding pass using the existing\n # dataflow infrastructure.\n # This pdf seems to give a nice short introduction into the topic:\n # http://openclassroom.stanford.edu/MainFolder/courses/Compilers/docs/slides/15-02-constant-propagation-annotated.pdf\n\n def __add__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_add_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_radd_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.PLUS, x)\n\n def __radd__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_radd_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_add_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.PLUS, self)\n\n def __sub__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_sub_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_rsub_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.MINUS, x)\n\n def __rsub__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_rsub_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_sub_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.MINUS, self)\n\n def __mul__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_mul_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_rmul_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.MULT, x)\n\n def __rmul__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_rmul_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_mul_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.MULT, self)\n\n def __lshift__(self, x):\n return _QiCalcBase(self, QiOp.LSH, QiExpression._from(x))\n\n def __rshift__(self, x):\n return _QiCalcBase(self, QiOp.RSH, QiExpression._from(x))\n\n def __and__(self, x):\n return _QiCalcBase(self, QiOp.AND, QiExpression._from(x))\n\n def __rand__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.AND, self)\n\n def __or__(self, x):\n return _QiCalcBase(self, QiOp.OR, QiExpression._from(x))\n\n def __ror__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.OR, self)\n\n def __xor__(self, x):\n return _QiCalcBase(self, QiOp.XOR, QiExpression._from(x))\n\n def __rxor__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.XOR, self)\n\n def __invert__(self):\n return _QiCalcBase(self, QiOp.NOT, None)\n\n def __lt__(self, x):\n return QiCondition(self, QiOpCond.LT, QiExpression._from(x))\n\n def __le__(self, x):\n return QiCondition(self, QiOpCond.LE, QiExpression._from(x))\n\n def __gt__(self, x):\n return QiCondition(self, QiOpCond.GT, QiExpression._from(x))\n\n def __ge__(self, x):\n return QiCondition(self, QiOpCond.GE, QiExpression._from(x))\n\n def __eq__(self, x):\n return QiCondition(self, QiOpCond.EQ, QiExpression._from(x))\n\n def __ne__(self, x):\n return QiCondition(self, QiOpCond.NE, QiExpression._from(x))"
},
{
"identifier": "_QiConstValue",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class _QiConstValue(QiExpression):\n \"\"\"Represents QiExpression which are a constant (compiletime known) values.\n Integers can be used as either NORMAL, TIME or FREQUENCY values. It is up to the type inference to figure it out.\n If the value can be represented as a float value it has an additional attribute float_value which represents the value before\n it has been converted to the integer representation used by the sequencer.\n \"\"\"\n\n def __init__(self, value: Union[int, float]):\n super().__init__()\n\n self._given_value = value # Value given to the constructor. Is interpreted differently depending on the type.\n\n # Constant STATE values can only be 0 or 1, therefore we forbid QiType.STATE if we have a different value.\n if isinstance(self._given_value, float) or self._given_value not in [1, 0]:\n self._type_info.add_illegal_type(\n QiType.STATE, _IllegalTypeReason.INVALID_STATE_CONSTANT\n )\n\n if isinstance(self._given_value, float):\n self._type_info.add_illegal_type(\n QiType.NORMAL, _IllegalTypeReason.INVALID_NORMAL_CONSTANT\n )\n\n @property\n def float_value(self):\n assert self.type in (QiType.TIME or self.type, QiType.FREQUENCY)\n return self._given_value\n\n @property\n def value(self):\n \"\"\"\n Integer representation of the constant value.\n Since the sequencer doesn't have a floating point unit, any calculations has to be using integers.\n In practice, this means we only perform fixpoint arithmetic and need to convert any float like value\n to such an fixpoint value.\n The correct conversion depends on the type.\n \"\"\"\n if self.type in (QiType.NORMAL, QiType.STATE, QiType.UNKNOWN):\n return self._given_value\n elif self.type == QiType.TIME:\n return int(util.conv_time_to_cycles(self._given_value, \"ceil\"))\n else:\n assert self.type == QiType.FREQUENCY\n return util.conv_freq_to_nco_phase_inc(self._given_value)\n\n @property\n def contained_variables(self):\n return QiVariableSet()\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_constant(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n assert QiType.UNKNOWN not in (self.type, other.type)\n return isinstance(other, _QiConstValue) and self.value == other.value\n\n def __str__(self):\n if self.type in (QiType.TIME, QiType.FREQUENCY):\n value = self.float_value\n elif self.type in (QiType.NORMAL, QiType.STATE, QiType.UNKNOWN):\n value = self.value\n else:\n raise RuntimeError(\n \"This program point should be unreacheable. Please file a bug report.\"\n )\n return f\"{value:g}\""
},
{
"identifier": "QiCondition",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class QiCondition:\n \"\"\"Saves conditional comparisons.\n Can only be root node\"\"\"\n\n def __init__(\n self,\n val1: QiExpression,\n op: QiOpCond = QiOpCond.GT,\n val2: QiExpression = _QiConstValue(0),\n ) -> None:\n self._contained_variables = QiVariableSet()\n\n self.val1 = val1\n self.op = op\n self.val2 = val2\n\n from .qi_types import add_qi_condition_constraints\n\n add_qi_condition_constraints(op, val1, val2)\n\n @property\n def contained_variables(self):\n if len(self._contained_variables) == 0:\n self._contained_variables.update(self.val1.contained_variables)\n self._contained_variables.update(self.val2.contained_variables)\n\n return self._contained_variables\n\n def accept(self, visitor):\n visitor.visit_condition(self)\n\n def __str__(self) -> str:\n return f\"{self.val1} {self.op.value} {self.val2}\""
},
{
"identifier": "QiOpCond",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class QiOpCond(Enum):\n LT = \"<\"\n LE = \"<=\"\n GT = \">\"\n GE = \">=\"\n EQ = \"==\"\n NE = \"!=\"\n\n @staticmethod\n def invert(condition):\n inverted = {\n QiOpCond.EQ: QiOpCond.NE,\n QiOpCond.NE: QiOpCond.EQ,\n QiOpCond.LT: QiOpCond.GE,\n QiOpCond.LE: QiOpCond.GT,\n QiOpCond.GT: QiOpCond.LE,\n QiOpCond.GE: QiOpCond.LT,\n }\n inv = inverted.get(condition)\n if inv is None:\n raise RuntimeError(\"Condition not found: \" + str(condition))\n return inv"
},
{
"identifier": "QiOp",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class QiOp(Enum):\n PLUS = \"+\"\n MINUS = \"-\"\n MULT = \"*\"\n LSH = \"<<\"\n RSH = \">>\"\n AND = \"&\"\n OR = \"|\"\n XOR = \"^\"\n NOT = \"~\""
},
{
"identifier": "SeqLoad",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqLoad(SeqITypeInst):\n def __init__(\n self,\n dst: int,\n base: int,\n offset: int = 0,\n ):\n \"\"\"Load Sequencer instruction.\n\n :param dst: The register address which will contain the loaded value.\n :param base: The register address which contains the source address.\n :param offset: Constant offset added to the source address. Defaults to 0.\n :param width: Number of bits to be loaded. Defaults to 32.\n :param signed: Is the loaded value signed. Depending on this flag the loaded value is sign extended.\n \"\"\"\n\n assert SequencerInstruction.is_value_in_lower_immediate(\n offset\n ), \"Invalid offset ({offset}) to load instruction.\"\n\n # The hardware currently only supports 32 bit memory accesses.\n super().__init__(\n SeqOpCode.LOAD,\n SeqMemFunct3.get_from_width(32, False),\n dst,\n base,\n offset,\n )\n\n @property\n def base_reg(self):\n return self.register"
},
{
"identifier": "SeqStore",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqStore(SeqSTypeInst):\n \"\"\"Store Sequencer instruction.\n\n :param src: The register address which contains the value to be stored.\n :param base: The register address which contains the destination address.\n :param offset: Constant offset added to the destination address. Defaults to 0.\n :param width: Number of bits to be stored. Defaults to 32.\n \"\"\"\n\n def __init__(\n self,\n src: int,\n base: int,\n offset: int = 0,\n ):\n assert SequencerInstruction.is_value_in_lower_immediate(\n offset\n ), \"Invalid offset ({offset}) to store instruction.\"\n\n # The hardware currently only supports 32 bit memory accesses.\n super().__init__(\n SeqOpCode.STORE, SeqMemFunct3.get_from_width(32, False), base, src, offset\n )\n\n @property\n def base_reg(self):\n return self.reg1\n\n @property\n def src_reg(self):\n return self.reg2"
},
{
"identifier": "SeqAwaitQubitState",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqAwaitQubitState(SeqITypeInst):\n def __init__(\n self,\n cell: int = 0,\n dst: int = 0,\n ) -> None:\n super().__init__(\n SeqOpCode.SYNCH, SeqExtSynchFunct3.QUBIT_STATE, dst, 0, cell, 0\n )"
},
{
"identifier": "SequencerInstruction",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SequencerInstruction:\n OPCODE_WIDTH = 7\n FUNCT3_WIDTH = 3\n FUNCT7_WIDTH = 7\n REGISTER_WIDTH = 5\n LOWER_IMMEDIATE_WIDTH = 12\n UPPER_IMMEDIATE_WIDTH = 20\n\n LOWER_IMM_MAX = (\n 2 ** (LOWER_IMMEDIATE_WIDTH - 1)\n ) - 1 # Lower immediate 12 Bits - 1Bit Signed\n LOWER_IMM_MIN = -(2 ** (LOWER_IMMEDIATE_WIDTH - 1))\n\n UPPER_IMM_MAX = (\n 2 ** (UPPER_IMMEDIATE_WIDTH - 1)\n ) - 1 # Upper immediate 20 Bits - 1Bit Signed\n UPPER_IMM_MIN = -(2 ** (UPPER_IMMEDIATE_WIDTH - 1))\n UPPER_IMM_MAX_UNSIGNED = 2**UPPER_IMMEDIATE_WIDTH\n\n imm_type = Union[int] # might include float in the future\n\n def __init__(self, OpCode: SeqOpCode) -> None:\n self.op = OpCode\n\n @staticmethod\n def is_value_in_lower_immediate(val: imm_type) -> bool:\n return (\n SequencerInstruction.LOWER_IMM_MIN\n <= val\n <= SequencerInstruction.LOWER_IMM_MAX\n )\n\n @staticmethod\n def is_value_in_unsigned_upper_immediate(val: imm_type) -> bool:\n return SequencerInstruction.UPPER_IMM_MAX_UNSIGNED >= abs(val)\n\n @abstractmethod\n def get_riscv_instruction(self) -> int:\n pass"
},
{
"identifier": "SeqRegImmediateInst",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqRegImmediateInst(SeqITypeInst):\n def __init__(\n self,\n operator: QiOp,\n dst_reg: int = 0,\n register: int = 0,\n immediate: SequencerInstruction.imm_type = 0,\n ) -> None:\n funct3 = super().QiOpToFunct3(operator)\n funct7 = super().QiOpToFunct7(operator)\n super().__init__(\n SeqOpCode.REG_IMM, funct3, dst_reg, register, immediate, funct7\n )"
},
{
"identifier": "SeqRegRegInst",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqRegRegInst(SeqRTypeInst):\n def __init__(\n self, operator: QiOp, dst_reg: int = 0, reg_1: int = 0, reg_2: int = 0\n ) -> None:\n funct3 = super().QiOpToFunct3(operator)\n funct7 = super().QiOpToFunct7(operator)\n super().__init__(\n SeqOpCode.REGISTER_REGISTER, funct3, funct7, dst_reg, reg_1, reg_2\n )"
},
{
"identifier": "SeqLoadUpperImm",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqLoadUpperImm(SeqUTypeInst):\n def __init__(\n self, dst_reg: int = 0, immediate: SequencerInstruction.imm_type = 0\n ) -> None:\n super().__init__(SeqOpCode.LOAD_UPPER_IMM, dst_reg, immediate)"
},
{
"identifier": "SeqJump",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqJump(SequencerInstruction):\n \"\"\"Does not represent actual J-Type instruction, RISC-V only supports address sizes as multiples of 2\"\"\"\n\n def __init__(self, rel_jump: int = 0) -> None:\n super().__init__(SeqOpCode.JUMP)\n self.jump_val = rel_jump\n\n def get_riscv_instruction(self) -> int:\n instruction = 0\n instruction |= self.op.value\n instruction |= (\n (self.jump_val & 0x7F800) >> 11\n ) << SequencerInstruction.OPCODE_WIDTH + SequencerInstruction.REGISTER_WIDTH\n instruction |= (\n ((self.jump_val & 0x400) >> 10)\n << SequencerInstruction.OPCODE_WIDTH\n + SequencerInstruction.REGISTER_WIDTH\n + 8\n )\n instruction |= (\n (self.jump_val & 0x3FF)\n << SequencerInstruction.OPCODE_WIDTH\n + SequencerInstruction.REGISTER_WIDTH\n + 9\n )\n instruction |= (\n ((self.jump_val & 0x80000) >> 19)\n << SequencerInstruction.OPCODE_WIDTH\n + SequencerInstruction.REGISTER_WIDTH\n + 19\n )\n\n return instruction\n\n def __str__(self) -> str:\n return f\"Op: {self.op.name}, immediate: {hex(self.jump_val)}\\n\""
},
{
"identifier": "SeqBranch",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqBranch(SeqBTypeInst):\n def __init__(self, operator, reg1: int, reg2: int, rel_jump: int = 0) -> None:\n op_reg1_reg2 = super().get_register_operation_tuple(operator, reg1, reg2)\n super().__init__(\n SeqOpCode.BRANCH,\n op_reg1_reg2[0],\n op_reg1_reg2[1],\n op_reg1_reg2[2],\n rel_jump,\n )\n\n def set_jump_value(self, jump_val: int):\n self.immediate = jump_val"
},
{
"identifier": "SeqWaitImm",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqWaitImm(SeqUTypeInst):\n def __init__(self, duration: int = 0) -> None:\n super().__init__(\n OpCode=SeqOpCode.WAIT_IMM, immediate=((duration & 0xFFFFF) << 12)\n )\n\n @property\n def immediate(self):\n return self._immediate >> 12\n\n def __str__(self):\n return f\"Op: {self.op.name}, dst: {str(self.dst_reg)}, immediate: {hex(self.immediate & 0x000FFFFF)}\\n\""
},
{
"identifier": "SeqWaitRegister",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqWaitRegister(SeqUTypeInst):\n def __init__(self, reg: int) -> None:\n super().__init__(OpCode=SeqOpCode.WAIT_REG, dst_reg=reg)"
},
{
"identifier": "SeqTrigger",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqTrigger(SeqUTypeInst):\n def __init__(\n self,\n module0: int = 0,\n module1: int = 0,\n module2: int = 0,\n module3: int = 0,\n module4: int = 0,\n sync=False,\n reset=False,\n ) -> None:\n self._trig_indices = [module0, module1, module2, module3, module4]\n\n immediate = 0\n immediate |= (reset & 0x1) << 12\n immediate |= (sync & 0x1) << 14\n immediate |= (module0 & 0xF) << 16\n immediate |= (module1 & 0xF) << 20\n immediate |= (module2 & 0xF) << 22\n immediate |= (module3 & 0xF) << 26\n immediate |= (module4 & 0xF) << 30\n super().__init__(OpCode=SeqOpCode.TRIGGER, immediate=immediate)\n\n def __str__(self) -> str:\n return (\n f\"Op: {self.op.name}, mod0: {hex(self._trig_indices[0])}, mod1: {hex(self._trig_indices[1])}\"\n f\", mod2: {hex(self._trig_indices[2])}, mod3: {hex(self._trig_indices[3])}, mod4: {hex(self._trig_indices[4])}\\n\"\n )"
},
{
"identifier": "SeqEnd",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqEnd(SeqSTypeInst):\n def __init__(self) -> None:\n super().__init__(SeqOpCode.SYNCH, SeqExtSynchFunct3.START, 0, 0, 0)"
},
{
"identifier": "SeqTriggerWaitRegister",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqTriggerWaitRegister(SeqUTypeInst):\n def __init__(self, reg: int) -> None:\n super().__init__(OpCode=SeqOpCode.TRIG_WAIT_REG, dst_reg=reg)"
},
{
"identifier": "_get_for_range_iterations",
"path": "src/qiclib/code/qi_util.py",
"snippet": "def _get_for_range_iterations(start, end, step):\n \"\"\"Returns number of iterations of ForRange or None if start or end are QiVariables.\n Stupid but no need to check validity of input, in case of unrolled loop\"\"\"\n from .qi_var_definitions import _QiVariableBase, _QiConstValue, QiCellProperty\n\n if (\n isinstance(start, _QiVariableBase)\n or start is None\n or isinstance(end, _QiVariableBase)\n or end is None\n ):\n return None\n\n if isinstance(start, (_QiConstValue, QiCellProperty)):\n start = start.value\n if isinstance(end, (_QiConstValue, QiCellProperty)):\n end = end.value\n if isinstance(step, (_QiConstValue, QiCellProperty)):\n step = step.value\n\n iterations = 0\n for _ in range(start, end, step):\n iterations += 1\n return iterations"
}
] | from enum import Enum
from typing import List, Union, Any, Dict, Optional, Tuple
from qiclib.code.qi_jobs import (
ForRange,
If,
Parallel,
cQiRecording,
cQiSync,
)
from .qi_var_definitions import (
QiCellProperty,
QiVariableSet,
_QiCalcBase,
_QiVariableBase,
QiExpression,
_QiConstValue,
QiCondition,
QiOpCond,
QiOp,
)
from .qi_seq_instructions import (
SeqLoad,
SeqStore,
SeqAwaitQubitState,
SequencerInstruction,
SeqRegImmediateInst,
SeqRegRegInst,
SeqLoadUpperImm,
SeqJump,
SeqBranch,
SeqWaitImm,
SeqWaitRegister,
SeqTrigger,
SeqEnd,
SeqTriggerWaitRegister,
)
from .qi_util import _get_for_range_iterations
from .qi_var_definitions import _QiVariableBase
from .qi_var_definitions import _QiCalcBase
from .qi_var_definitions import _QiVariableBase
from .qi_jobs import _cQiPlay_base
import warnings
import qiclib.packages.utility as util | 10,761 | return util.conv_cycles_to_time(self.RECORDING_MODULE_DELAY_CYCLES)
@property
def readout_active(self):
return self._trigger_mods.is_readout_active
@property
def manipulation_active(self):
return self._trigger_mods.is_manipulation_active
def add_variable(self, var):
"""Adds variable to sequencer, reserving a register for it"""
reg = self.request_register()
self._var_reg_dict[var.id] = reg
# Named variables can be initialized externally
if var.name is not None:
reg.valid = False
reg.value = 0
def release_variable(self, var):
self.release_register(self.get_var_register(var))
def get_var_register(self, var) -> _Register:
"""Returns _Register of QiVariable var"""
reg = self._var_reg_dict.get(var.id)
if reg is None:
raise RuntimeError(
f"Variable not defined for Sequencer, var.id:{var.id}, {self._var_reg_dict}"
)
return reg
def get_var_value(self, var) -> Union[int, float, None]:
return self.get_var_register(var).get_value()
def request_register(self) -> _Register:
"""Returns register from stack, raises exception, if no registers are on stack anymore"""
try:
return self._register_stack.pop()
except IndexError as e:
print(
"Not enough registers available, sequencer "
+ str(self)
+ " error: "
+ str(e)
)
raise
def get_cycles_from_length(self, length) -> Union[_Register, int]:
"""If length is QiVariable, return _Register, else return numbers of cycles ceiled"""
if isinstance(length, _QiVariableBase):
return self.get_var_register(length)
elif isinstance(length, int):
length = float(length)
return util.conv_time_to_cycles(length, "ceil")
def release_register(self, reg: _Register):
"""Returns register to stack; Raises exception when register is already in stack, or addressing is faulty.
Releasing register 0 does nothing"""
if reg in self._register_stack:
raise IndexError("Release Register: Already released register")
if (reg.adr > Sequencer.AVAILABLE_REGISTERS) or (reg.adr < 0):
raise IndexError("Release Register: Address out of Range")
if reg == self.reg0:
return
reg.valid = True # if register was invalidated and is released again, return it to initial valid state
self._register_stack.append(reg)
def add_instruction_to_list(
self,
instruction: SequencerInstruction,
length_in_cycles: int = 1,
length_valid=True,
):
"""Adds instruction to list. If pulses are still running, adds choke instruction before adding the current command to the list"""
if self._trigger_mods.is_pulse_active:
self.trigger_choke_pulse()
if length_in_cycles == 0:
length_in_cycles = 1 # length is always at least 1 per instruction
self.instruction_list.append(instruction)
self._prog_cycles.add(
length_in_cycles, length_valid
) # Will be deprecated when external sync is possible.
def get_prog_size(self) -> int:
return len(self.instruction_list)
def add_mov_command(self, dst_reg: _Register, src_reg: _Register):
"""Copies value of src_reg to dst_reg."""
self.add_calculation(src_reg, QiOp.PLUS, 0, dst_reg)
def get_upper_immediate_value(self, value: SequencerInstruction.imm_type):
"""If bit 11 of lower value is 1, ADDI command sign extends the value. To account for that, sign extend lower 12 bits
and subtract from upper 20 bits."""
sign_extended_lower = (
value | 0xFFFFF000 if value & 0x00000800 != 0 else value & 0x00000FFF
)
return (value - sign_extended_lower) & 0xFFFFF000
def immediate_to_register(
self, val: SequencerInstruction.imm_type, dst_reg: Optional[_Register] = None
) -> _Register:
"""Loads immediate to dst_reg.
If dst_reg is not defined a new register is used to save val to.
If value == 0 and no register is specified, reg0 is returned, which always contains 0.
dst_reg.value is updated to reflect changes."""
if val == 0 and dst_reg is None:
return self.reg0
elif dst_reg is None:
dst_reg = self.request_register()
if isinstance(val, float):
raise NotImplementedError("float not implemented yet")
if SequencerInstruction.is_value_in_lower_immediate(val):
self.add_instruction_to_list(
| # Copyright © 2017-2023 Quantum Interface ([email protected])
# Richard Gebauer, IPE, Karlsruhe Institute of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
The lower level logic of the code generation.
This module tracks the sequencer state at the current point (e.g. register values, variable to register mapping, etc.),
provides helper functions to generate code for expressions and more.
"""
class _Register:
"""Class of Sequencer representing registers.
Keeps track of values in register. Values are used for program length. Program length is invalidated by use of If/Else.
TODO load commands invalidate value"""
def __init__(self, address) -> None:
self.adr = address
self.value = None
self.valid = True
def addition(self, val1, val2):
self.value = val1 + val2
def subtraction(self, val1, val2):
self.value = val1 - val2
def multiplication(self, val1, val2):
self.value = val1 * val2
def and_values(self, val1, val2):
self.value = val1 & val2
def or_values(self, val1, val2):
self.value = val1 | val2
def xor_values(self, val1, val2):
self.value = val1 ^ val2
def lshift(self, val1, val2):
self.value = val1 << val2
def rshift(self, val1, val2):
self.value = val1 >> val2
def inversion(self, val1, val2):
self.value = ~val1
# Dictionary used to receive function from input QiOp
eval_operation = {
QiOp.PLUS: addition,
QiOp.MINUS: subtraction,
QiOp.MULT: multiplication,
QiOp.AND: and_values,
QiOp.OR: or_values,
QiOp.XOR: xor_values,
QiOp.LSH: lshift,
QiOp.RSH: rshift,
QiOp.NOT: inversion,
}
def get_value(self):
if self.valid:
return self.value
return None
def update_register_value(self, val1, op, val2):
"""Register Values are updated to allow implicit synchronisations through wait when variable Wait/Pulse is used.
When a calculation is done using a invalid variable value, the ensuing value is also invalidated.
"""
if self.adr == 0:
self.value = 0 # reg0 always contains 0
return
if isinstance(val1, _Register):
if val1.value is None:
raise RuntimeError(
f"Variable at Register {val1.adr} has not been properly initialised"
)
if not val1.valid:
self.valid = False
val1 = val1.value
if isinstance(val2, _Register):
if val2.value is None:
raise RuntimeError(
f"Variable at Register {val2.adr} has not been properly initialised"
)
if not val2.valid:
self.valid = False
val2 = val2.value
self.eval_operation[op](self, val1, val2)
class ForRangeEntry:
def __init__(self, reg_addr, start_val, end_val, step_val) -> None:
self.reg_addr = reg_addr
self.start = start_val
self.end = end_val
self.step = step_val
self.end_addr = 0
self.iterations = 0
self.aggregate_iterations = 0
self.contained_entries: List[ForRangeEntry] = []
def _calc_aggregate(self):
"""Calculates the number of loops contained inside, considering nested entries, for later use at progress bar."""
self.iterations = _get_for_range_iterations(self.start, self.end, self.step)
if len(self.contained_entries) == 0 or self.iterations is None:
if self.iterations is None:
self.aggregate_iterations = 0
warnings.warn(
"A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate."
)
else:
self.aggregate_iterations = self.iterations
else:
nested = 0
for entry in self.contained_entries:
if entry.aggregate_iterations == 0:
warnings.warn(
"A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate."
)
continue
nested += entry.aggregate_iterations
self.aggregate_iterations = self.iterations * (nested if nested != 0 else 1)
def get_iteration(self, value: int) -> int:
"""Returns the current iteration depending on the parameter value"""
if isinstance(self.start, _QiVariableBase):
return 0
_step = self.step if isinstance(self.step, int) else self.step.value
iterations = 0
for _ in range(self.start, value, _step):
iterations += 1
return iterations
@staticmethod
def get_total_loops(entry_list):
if len(entry_list) == 0:
return 1
iterations = 0
for entry in entry_list:
iterations += entry.aggregate_iterations
return iterations if iterations > 0 else 1
@staticmethod
def calculate_current_loop(entry_list, register_list, prog_counter):
loop = 0
for entry in entry_list:
if entry.end_addr < prog_counter:
loop += entry.aggregate_iterations
else:
iteration = entry.get_iteration(register_list[entry.reg_addr])
if len(entry.contained_entries) == 0:
loop += iteration
else:
loop += iteration * ForRangeEntry.get_total_loops(
entry.contained_entries
) + ForRangeEntry.calculate_current_loop(
entry.contained_entries, register_list, prog_counter
)
return loop
return loop
class Sequencer:
AVAILABLE_REGISTERS = 31
MULTIPLICATION_LENGTH = 6
JUMP_EXECUTION_CYCLES = 2
LOAD_STORE_LENGTH = 8
# Additional delay to prevent ignored trigger for consecutive readouts
RECORDING_MODULE_DELAY_CYCLES = 1
CHOKE_PULSE_INDEX = 14
def __init__(self, cell_index=None):
self.alu = _ALU(self)
self.reset()
self.cell_index = cell_index
def reset(self):
self._register_stack: List[_Register] = []
self.instruction_list: List[SequencerInstruction] = []
self._prog_cycles = _ProgramCycles()
self._var_reg_dict: Dict[Any, _Register] = {}
self._trigger_mods = _TriggerModules()
self._for_range_list = []
self._for_range_stack: List[ForRangeEntry] = []
# register 0 always contains 0, so is not in stack
self.reg0 = _Register(0)
for x in range(Sequencer.AVAILABLE_REGISTERS, 0, -1):
self._register_stack.append(_Register(x))
def print_assembler(self):
pc = 0
for instruction in self.instruction_list:
print(str(pc) + "# ", end="")
print(instruction)
pc += 1
@property
def prog_cycles(self):
"""Program length is used for implicit synchs with Wait-Commands. If a program contains variable If/Else or loads to wait registers
prog_length can not be determined. Invalid prog_cycles are some value less than 0.
"""
if self._prog_cycles.valid:
return self._prog_cycles.cycles
return _ProgramCycles.INVALID
@prog_cycles.setter
def prog_cycles(self, x):
"""Set externally when ForRange is used."""
self._prog_cycles.cycles = x
@property
def recording_delay(self):
return util.conv_cycles_to_time(self.RECORDING_MODULE_DELAY_CYCLES)
@property
def readout_active(self):
return self._trigger_mods.is_readout_active
@property
def manipulation_active(self):
return self._trigger_mods.is_manipulation_active
def add_variable(self, var):
"""Adds variable to sequencer, reserving a register for it"""
reg = self.request_register()
self._var_reg_dict[var.id] = reg
# Named variables can be initialized externally
if var.name is not None:
reg.valid = False
reg.value = 0
def release_variable(self, var):
self.release_register(self.get_var_register(var))
def get_var_register(self, var) -> _Register:
"""Returns _Register of QiVariable var"""
reg = self._var_reg_dict.get(var.id)
if reg is None:
raise RuntimeError(
f"Variable not defined for Sequencer, var.id:{var.id}, {self._var_reg_dict}"
)
return reg
def get_var_value(self, var) -> Union[int, float, None]:
return self.get_var_register(var).get_value()
def request_register(self) -> _Register:
"""Returns register from stack, raises exception, if no registers are on stack anymore"""
try:
return self._register_stack.pop()
except IndexError as e:
print(
"Not enough registers available, sequencer "
+ str(self)
+ " error: "
+ str(e)
)
raise
def get_cycles_from_length(self, length) -> Union[_Register, int]:
"""If length is QiVariable, return _Register, else return numbers of cycles ceiled"""
if isinstance(length, _QiVariableBase):
return self.get_var_register(length)
elif isinstance(length, int):
length = float(length)
return util.conv_time_to_cycles(length, "ceil")
def release_register(self, reg: _Register):
"""Returns register to stack; Raises exception when register is already in stack, or addressing is faulty.
Releasing register 0 does nothing"""
if reg in self._register_stack:
raise IndexError("Release Register: Already released register")
if (reg.adr > Sequencer.AVAILABLE_REGISTERS) or (reg.adr < 0):
raise IndexError("Release Register: Address out of Range")
if reg == self.reg0:
return
reg.valid = True # if register was invalidated and is released again, return it to initial valid state
self._register_stack.append(reg)
def add_instruction_to_list(
self,
instruction: SequencerInstruction,
length_in_cycles: int = 1,
length_valid=True,
):
"""Adds instruction to list. If pulses are still running, adds choke instruction before adding the current command to the list"""
if self._trigger_mods.is_pulse_active:
self.trigger_choke_pulse()
if length_in_cycles == 0:
length_in_cycles = 1 # length is always at least 1 per instruction
self.instruction_list.append(instruction)
self._prog_cycles.add(
length_in_cycles, length_valid
) # Will be deprecated when external sync is possible.
def get_prog_size(self) -> int:
return len(self.instruction_list)
def add_mov_command(self, dst_reg: _Register, src_reg: _Register):
"""Copies value of src_reg to dst_reg."""
self.add_calculation(src_reg, QiOp.PLUS, 0, dst_reg)
def get_upper_immediate_value(self, value: SequencerInstruction.imm_type):
"""If bit 11 of lower value is 1, ADDI command sign extends the value. To account for that, sign extend lower 12 bits
and subtract from upper 20 bits."""
sign_extended_lower = (
value | 0xFFFFF000 if value & 0x00000800 != 0 else value & 0x00000FFF
)
return (value - sign_extended_lower) & 0xFFFFF000
def immediate_to_register(
self, val: SequencerInstruction.imm_type, dst_reg: Optional[_Register] = None
) -> _Register:
"""Loads immediate to dst_reg.
If dst_reg is not defined a new register is used to save val to.
If value == 0 and no register is specified, reg0 is returned, which always contains 0.
dst_reg.value is updated to reflect changes."""
if val == 0 and dst_reg is None:
return self.reg0
elif dst_reg is None:
dst_reg = self.request_register()
if isinstance(val, float):
raise NotImplementedError("float not implemented yet")
if SequencerInstruction.is_value_in_lower_immediate(val):
self.add_instruction_to_list( | SeqRegImmediateInst(QiOp.PLUS, dst_reg.adr, 0, val) | 13 | 2023-11-10 10:26:10+00:00 | 12k |
jpcadena/fastapi-boilerplate | app/api/api_v1/router/user.py | [
{
"identifier": "get_redis_dep",
"path": "app/api/deps.py",
"snippet": "async def get_redis_dep(\n redis_dependency: Annotated[RedisDependency, Depends()]\n) -> AsyncGenerator[Redis, None]: # type: ignore\n \"\"\"\n Lazy generation of Redis dependency\n :param redis_dependency: The dependency injection on Redis\n :type redis_dependency: RedisDependency\n :return: The Redis connection instance as a generator\n :rtype: AsyncGenerator[Redis, None]\n \"\"\"\n async with redis_dependency as redis:\n yield redis"
},
{
"identifier": "get_current_user",
"path": "app/api/oauth2_validation.py",
"snippet": "async def get_current_user(\n token: Annotated[str, Depends(oauth2_scheme)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n user_service: Annotated[UserService, Depends(get_user_service)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserAuth:\n \"\"\"\n Fetches the current authenticated user based on the provided JWT\n access token\n :param token: The Access token from OAuth2PasswordBearer\n :type token: str\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :param user_service: Dependency method for User service object\n :type user_service: UserService\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: Authenticated user information\n :rtype: UserAuth\n \"\"\"\n token_service: TokenService = TokenService(redis, auth_settings)\n is_blacklisted: bool = await token_service.is_token_blacklisted(token)\n if is_blacklisted:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Token is blacklisted\",\n )\n return await authenticate_user(token, auth_settings, user_service, redis)"
},
{
"identifier": "get_auth_settings",
"path": "app/config/config.py",
"snippet": "def get_init_settings() -> InitSettings:\ndef get_settings() -> Settings:\ndef get_sql_settings() -> SQLDatabaseSettings:\ndef get_auth_settings() -> AuthSettings:"
},
{
"identifier": "AuthSettings",
"path": "app/config/db/auth_settings.py",
"snippet": "class AuthSettings(BaseSettings):\n \"\"\"\n Settings class for authentication using JWT and Redis\n \"\"\"\n\n model_config = SettingsConfigDict(\n env_file=\".env\",\n env_file_encoding=\"utf-8\",\n case_sensitive=True,\n extra=\"allow\",\n )\n\n MAX_REQUESTS: PositiveInt = 30\n RATE_LIMIT_DURATION: PositiveInt = 60\n BLACKLIST_EXPIRATION_SECONDS: PositiveInt = 3600\n API_V1_STR: str = \"/api/v1\"\n ALGORITHM: str = \"HS256\"\n AUTH_URL: str = \"api/v1/auth/\"\n TOKEN_URL: str = \"api/v1/auth/login\"\n OAUTH2_SCHEME: str = \"JWT\"\n OAUTH2_TOKEN_DESCRIPTION: str = (\n \"JWT token used to authenticate most of\" \" the API endpoints.\"\n )\n OAUTH2_REFRESH_TOKEN_DESCRIPTION: str = (\n \"JWT token used to authenticate\" \" most ofhe API endpoints.\"\n )\n TOKEN_USER_INFO_REGEX: str = (\n r\"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-\"\n r\"[0-9a-f]{4}-[0-9a-f]{12}:\\d{1,3}\\.\"\n r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\"\n )\n SUB_REGEX: str = (\n r\"^username:[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-\"\n r\"[89ab][0-9a-f]{3}-[0-9a-f]{12}$\"\n )\n HEADERS: dict[str, str] = {\"WWW-Authenticate\": \"Bearer\"}\n DETAIL: str = \"Could not validate credentials\"\n NO_CLIENT_FOUND: str = \"No client found on the request\"\n SECRET_KEY: str\n SERVER_URL: AnyHttpUrl\n SERVER_DESCRIPTION: str\n CACHE_SECONDS: PositiveInt = 3600\n ACCESS_TOKEN_EXPIRE_MINUTES: float\n REFRESH_TOKEN_EXPIRE_MINUTES: PositiveInt\n EMAIL_RESET_TOKEN_EXPIRE_HOURS: PositiveInt\n AUDIENCE: Optional[AnyHttpUrl] = None\n STRICT_TRANSPORT_SECURITY_MAX_AGE: PositiveInt\n\n @field_validator(\"AUDIENCE\", mode=\"before\")\n def assemble_audience(\n cls, v: Optional[str], info: ValidationInfo\n ) -> AnyHttpUrl:\n \"\"\"\n Combine server host and API_V1_STR to create the audience\n string.\n :param v: The value of audience attribute\n :type v: Optional[str]\n :param info: The field validation info\n :type info: ValidationInfo\n :return: The AUDIENCE attribute\n :rtype: AnyHttpUrl\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n return AnyHttpUrl(\n f'{str(info.data.get(\"SERVER_URL\"))[:-1]}:8000/'\n f'{info.data.get(\"TOKEN_URL\")}'\n )\n\n REDIS_SCHEME: str\n REDIS_HOST: str\n REDIS_USERNAME: str\n REDIS_PASSWORD: str\n REDIS_PORT: PositiveInt\n REDIS_DATABASE_URI: Optional[RedisDsn] = None\n\n @field_validator(\"REDIS_DATABASE_URI\", mode=\"before\")\n def assemble_redis_connection(\n cls, v: Optional[str], info: ValidationInfo\n ) -> RedisDsn:\n \"\"\"\n Assemble the cache database connection as URI string\n :param v: Variables to consider\n :type v: str\n :param info: The field validation info\n :type info: ValidationInfo\n :return: Redis URI\n :rtype: RedisDsn\n \"\"\"\n # pylint: disable=no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n return RedisDsn(\n str(\n Url.build(\n scheme=info.data.get(\"REDIS_SCHEME\", \"\"),\n username=info.data.get(\"REDIS_USERNAME\"),\n password=info.data.get(\"REDIS_PASSWORD\"),\n host=info.data.get(\"REDIS_HOST\", \"\"),\n port=info.data.get(\"REDIS_PORT\"),\n )\n )\n )"
},
{
"identifier": "InitSettings",
"path": "app/config/init_settings.py",
"snippet": "class InitSettings(BaseSettings):\n \"\"\"\n Init Settings class based on Pydantic Base Settings\n \"\"\"\n\n model_config = SettingsConfigDict(\n case_sensitive=True,\n extra=\"allow\",\n )\n\n ITERATIONS: PositiveInt = 100000\n KEY_BYTES_LENGTH: PositiveInt = 32\n SALT_BYTES: PositiveInt = 16\n IV_BYTES: PositiveInt = 12\n PUBLIC_EXPONENT: PositiveInt = 65537\n RSA_KEY_BITS: PositiveInt = 2048\n SALUTE: str = \"Salute!\"\n ROOT_MSG: str = \"Hello, World!\"\n SERVER_NAME: str = \"FastAPI Boilerplate\"\n PROJECT_NAME: str = \"fastapi-boilerplate\"\n VERSION: str = \"1.0\"\n ENCODING: str = \"UTF-8\"\n DEFAULT_REGION: str = \"Guayas\"\n DEFAULT_COUNTRY: str = \"Ecuador\"\n OPENAPI_FILE_PATH: str = \"/openapi.json\"\n DATE_FORMAT: str = \"%Y-%m-%d\"\n DATETIME_FORMAT: str = \"%Y-%m-%d %H:%M:%S\"\n FILE_DATE_FORMAT: str = \"%d-%b-%Y-%H-%M-%S\"\n IMAGES_APP: str = \"images\"\n IMAGES_PATH: str = \"/assets/images\"\n IMAGES_DIRECTORY: str = \"assets/images\"\n EMAIL_TEMPLATES_DIR: str = \"templates\"\n PASSWORD_RECOVERY_SUBJECT: str = \"Password recovery for user\"\n NEW_ACCOUNT_SUBJECT: str = \"New account for user\"\n WELCOME_SUBJECT: str = \"Welcome to \"\n PASSWORD_CHANGED_CONFIRMATION_SUBJECT: str = (\n \"Successfully password \" \"changed for \"\n )\n DELETE_ACCOUNT_SUBJECT: str = \"Account deleted for \"\n LOG_FORMAT: str = (\n \"[%(name)s][%(asctime)s][%(levelname)s][%(module)s]\"\n \"[%(funcName)s][%(lineno)d]: %(message)s\"\n )\n PASSWORD_REGEX: str = (\n \"^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?\" \"[#?!@$%^&*-]).{8,14}$\"\n )\n\n SUMMARY: str = \"\"\"This backend project is FastAPI template.\n This project serves as the backend, which aims to provide a robust and\n reliable system to its users.\n This backend application plays a crucial role in providing the\n functionality for user authentication, real-time monitoring,\n data processing, and advanced alerting system. It is designed to ensure\n the scalability and maintainability of the mobile app,\n making it a vital part of the overall solution.\n \"\"\"\n DESCRIPTION: str = f\"\"\"**FastAPI**, **SQLAlchemy** and **Redis** helps you\n do awesome stuff. 🚀\n \\n\\n<img src=\"data:image/png;base64,{img_b64}\"/>\"\"\"\n LICENSE_INFO: dict[str, str] = {\n \"name\": \"MIT\",\n \"identifier\": \"MIT\",\n }\n TAGS_METADATA: list[dict[str, str]] = [\n {\n \"name\": \"user\",\n \"description\": f\"\"\"Operations with users, such as register, get,\n update and delete.\\n\\n<img src=\"data:image/png;base64,\n {users_b64}\" width=\"150\" height=\"100\"/>\"\"\",\n },\n {\n \"name\": \"auth\",\n \"description\": f\"\"\"The authentication logic is here as well as\n password recovery and reset.\n \\n\\n<img src=\"data:image/png;base64,{auth_b64}\" width=\"75\"\n height=\"75\"/>\"\"\",\n },\n ]\n USER_CREATE_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** user create object that works \"\n \"correctly.\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(\"+593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n },\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert phone number `strings` to \"\n \"actual `numbers` automatically\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(593987654321),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"username\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"miclave123\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(95, 12, 31),\n \"phone_number\": PhoneNumber(\"5939876a4321\"),\n \"address\": {\n \"street_address\": \"True\",\n \"locality\": \"123\",\n \"region\": \"Andes\",\n \"country\": \"New York\",\n \"postal_code\": \"999999\",\n },\n },\n },\n }\n USER_UPDATE_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** user update object that works \"\n \"correctly.\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(593987654321),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n },\n },\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert phone numbers `strings` to \"\n \"actual `numbers` automatically\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(\"593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n },\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"username\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"miclave123\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(95, 12, 31),\n \"phone_number\": PhoneNumber(\"59398x54321\"),\n \"address\": {\n \"street_address\": \"True\",\n \"locality\": \"123\",\n },\n },\n },\n }\n EMAIL_BODY_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** email object that works correctly.\",\n \"value\": \"[email protected]\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": 123,\n },\n }\n TOKEN_PAYLOAD_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** token payload object that works \"\n \"correctly.\",\n \"value\": {\n \"token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"token\": \"123\",\n \"password\": \"abc123\",\n },\n },\n }\n AUTHORIZATION_HEADER_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** authorization token object that works \"\n \"correctly.\",\n \"value\": jwt.encode(\n claims=jsonable_encoder(\n {\n \"sub\": f\"username:{str(uuid4())}\",\n \"nationalities\": [\"ECU\"],\n \"email\": \"[email protected]\",\n \"nickname\": \"example\",\n \"preferred_username\": \"example\",\n \"given_name\": \"Some\",\n \"family_name\": \"Example\",\n \"middle_name\": \"One\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"updated_at\": datetime.now(),\n \"phone_number\": PhoneNumber(\"+593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n \"exp\": int(time.time()) + 1800,\n \"nbf\": int(time.time()) - 1,\n \"iat\": int(time.time()),\n }\n ),\n key=\"f52e826e62cdd364c86f129cb18db2fe2be93859c5104cac9585f\"\n \"305378dce65\",\n algorithm=\"HS256\",\n ),\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": \"123\",\n },\n }\n LIMIT_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** limit query parameter that works \"\n \"correctly.\",\n \"value\": 1,\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert limit `strings` to actual\"\n \" `numbers` automatically\",\n \"value\": \"5\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": -1,\n },\n }\n SKIP_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** skip query parameter that works \"\n \"correctly.\",\n \"value\": 0,\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert skip `strings` to actual\"\n \" `numbers` automatically\",\n \"value\": \"20\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": -1,\n },\n }"
},
{
"identifier": "Settings",
"path": "app/config/settings.py",
"snippet": "class Settings(BaseSettings):\n \"\"\"\n Settings class based on Pydantic Base Settings\n \"\"\"\n\n model_config = SettingsConfigDict(\n env_file=\".env\",\n env_file_encoding=\"utf-8\",\n case_sensitive=True,\n extra=\"allow\",\n )\n\n SMTP_PORT: PositiveInt\n SMTP_HOST: str\n SMTP_USER: str\n SMTP_PASSWORD: str\n MAIL_SUBJECT: str\n MAIL_TIMEOUT: float\n EMAILS_FROM_EMAIL: Optional[EmailStr] = None\n EMAILS_FROM_NAME: Optional[str] = None\n SUPERUSER_EMAIL: EmailStr\n SUPERUSER_FIRST_NAME: str\n SUPERUSER_PASSWORD: str\n SUPERUSER_LAST_NAME: str\n SUPERUSER_STREET_ADDRESS: str\n SUPERUSER_LOCALITY: str\n SUPERUSER_POSTAL_CODE: str\n BACKEND_CORS_ORIGINS: list[AnyHttpUrl] = []\n\n PUBLIC_KEY_PATH: FilePath\n PRIVATE_KEY_PATH: FilePath\n\n @field_validator(\n \"PUBLIC_KEY_PATH\", \"PRIVATE_KEY_PATH\", mode=\"before\", check_fields=True\n )\n def validate_key_paths(cls, key_path: FilePath) -> FilePath:\n \"\"\"\n Validate the provided key path.\n :param key_path: Provided key path\n :type key_path: FilePath\n :return: The validated key path\n :rtype: FilePath\n \"\"\"\n if not str(key_path).endswith(\".pem\"):\n raise ValueError(f\"{key_path} must have a .pem extension\")\n base_name: str = os.path.basename(key_path)\n if not base_name.endswith(\"key.pem\"):\n raise ValueError(\n f\"{key_path} must have a file name ending with 'key'\"\n )\n return key_path\n\n @field_validator(\"BACKEND_CORS_ORIGINS\", mode=\"before\")\n def assemble_cors_origins(\n cls, v: Union[str, list[str]]\n ) -> Union[list[str], str]:\n \"\"\"\n Assemble a list of allowed CORS origins.\n :param v: Provided CORS origins, either a string or a list of\n strings\n :type v: Union[str, list[str]]\n :return: List of Backend CORS origins to be accepted\n :rtype: Union[list[str], str]\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if isinstance(v, str) and not v.startswith(\"[\"):\n return [i.strip() for i in v.split(\",\")]\n if isinstance(v, list):\n return v\n raise ValueError(v)\n\n CONTACT_NAME: Optional[str] = None\n CONTACT_URL: Optional[AnyHttpUrl] = None\n CONTACT_EMAIL: Optional[EmailStr] = None\n CONTACT: Optional[dict[str, Any]] = None\n\n @field_validator(\"CONTACT\", mode=\"before\")\n def assemble_contact(\n cls, v: Optional[str], info: ValidationInfo\n ) -> dict[str, str]:\n \"\"\"\n Assemble contact information\n :param v: Variables to consider\n :type v: str\n :param info: The field validation info\n :type info: ValidationInfo\n :return: The contact attribute\n :rtype: dict[str, str]\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n contact: dict[str, Any] = {}\n if info.data.get(\"CONTACT_NAME\"):\n contact[\"name\"] = info.data.get(\"CONTACT_NAME\")\n if info.data.get(\"CONTACT_URL\"):\n contact[\"url\"] = info.data.get(\"CONTACT_URL\")\n if info.data.get(\"CONTACT_EMAIL\"):\n contact[\"email\"] = info.data.get(\"CONTACT_EMAIL\")\n return contact"
},
{
"identifier": "NotFoundException",
"path": "app/exceptions/exceptions.py",
"snippet": "class NotFoundException(Exception):\n \"\"\"\n Not Found Exception class\n \"\"\"\n\n def __init__(self, message: str, note: Optional[str] = None):\n super().__init__(message)\n if note:\n self.add_note(note)"
},
{
"identifier": "ServiceException",
"path": "app/exceptions/exceptions.py",
"snippet": "class ServiceException(Exception):\n \"\"\"\n Service Layer Exception class\n \"\"\"\n\n def __init__(self, message: str, note: Optional[str] = None):\n super().__init__(message)\n if note:\n self.add_note(note)"
},
{
"identifier": "UserCreate",
"path": "app/schemas/external/user.py",
"snippet": "class UserCreate(UserBase, UserOptional):\n \"\"\"\n Schema for creating a User record.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_create_example,\n )\n\n password: str = Field(\n ...,\n title=\"Password\",\n description=\"Password of the User\",\n min_length=8,\n max_length=14,\n )\n\n @field_validator(\"password\", mode=\"before\")\n def validate_password(cls, v: Optional[str]) -> str:\n \"\"\"\n Validates the password attribute\n :param v: The password to be validated\n :type v: Optional[str]\n :return: The validated password\n :rtype: str\n \"\"\"\n # pylint: disable=no-self-argument\n return validate_password(v)"
},
{
"identifier": "UserCreateResponse",
"path": "app/schemas/external/user.py",
"snippet": "class UserCreateResponse(UserID, UserBase):\n \"\"\"\n Schema for the response when creating a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_create_response_example,\n )"
},
{
"identifier": "UserResponse",
"path": "app/schemas/external/user.py",
"snippet": "class UserResponse(UserID, UserBase, UserOptional, UserInDB):\n \"\"\"\n Schema for the response when retrieving a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_response_example,\n )"
},
{
"identifier": "UsersResponse",
"path": "app/schemas/external/user.py",
"snippet": "class UsersResponse(BaseModel):\n \"\"\"\n Class representation for a list of users response\n \"\"\"\n\n users: list[UserResponse]"
},
{
"identifier": "UserUpdate",
"path": "app/schemas/external/user.py",
"snippet": "class UserUpdate(BaseModel):\n \"\"\"\n Schema for updating a User record.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_update_example,\n )\n\n username: Optional[str] = Field(\n default=None,\n title=\"Username\",\n description=\"Username to identify the user\",\n min_length=4,\n max_length=15,\n )\n email: Optional[EmailStr] = Field(\n default=None,\n title=\"Email\",\n description=\"Preferred e-mail address of the User\",\n )\n first_name: Optional[str] = Field(\n default=None,\n title=\"First name\",\n description=\"First name(s) of the User\",\n min_length=1,\n max_length=50,\n )\n middle_name: Optional[str] = Field(\n default=None,\n title=\"Middle Name\",\n description=\"Middle name(s) of the User\",\n max_length=50,\n )\n last_name: Optional[str] = Field(\n default=None,\n title=\"Last name\",\n description=\"Last name(s) of the User\",\n min_length=1,\n max_length=100,\n )\n password: Optional[str] = Field(\n default=None,\n title=\"New Password\",\n description=\"New Password of the User\",\n min_length=8,\n max_length=14,\n )\n gender: Optional[Gender] = Field(\n default=None, title=\"Gender\", description=\"Gender of the User\"\n )\n birthdate: Optional[date] = Field(\n default=None, title=\"Birthdate\", description=\"Birthday of the User\"\n )\n phone_number: Optional[PhoneNumber] = Field(\n default=None,\n title=\"Phone number\",\n description=\"Preferred telephone number of the User\",\n )\n address: Optional[Address] = Field(\n default=None, title=\"Address\", description=\"Address of the User\"\n )\n\n @field_validator(\"password\", mode=\"before\")\n def validate_password(cls, v: Optional[str]) -> str:\n \"\"\"\n Validates the password attribute\n :param v: The password value to validate\n :type v: Optional[str]\n :return: The validated password\n :rtype: str\n \"\"\"\n # pylint: disable=no-self-argument\n return validate_password(v)"
},
{
"identifier": "UserUpdateResponse",
"path": "app/schemas/external/user.py",
"snippet": "class UserUpdateResponse(\n UserAuth, UserName, UserPassword, UserOptional, UserInDB\n):\n \"\"\"\n Schema for the response when updating a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_update_response_example,\n )"
},
{
"identifier": "UserAuth",
"path": "app/schemas/infrastructure/user.py",
"snippet": "class UserAuth(UserID, UserBaseAuth):\n \"\"\"\n User Auth that inherits from UserID.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_auth_example,\n )"
},
{
"identifier": "CachedUserService",
"path": "app/services/infrastructure/cached_user.py",
"snippet": "class CachedUserService:\n \"\"\"\n Service class for cached user-related business logic.\n \"\"\"\n\n def __init__(\n self,\n redis: Redis, # type: ignore\n ):\n self._redis: Redis = redis # type: ignore\n self._cache_seconds: PositiveInt = auth_setting.CACHE_SECONDS\n\n async def get_model_from_cache(self, key: UUID4) -> Optional[User]:\n \"\"\"\n Get the user model instance for the given key from the cache database\n :param key: The unique identifier for the model user instance\n :type key: UUID4\n :return: The user model instance\n :rtype: User\n \"\"\"\n value: Optional[str] = await self._redis.get(str(key))\n if not value:\n return None\n user_data: dict[str, Any] = json.loads(value)\n if address_data := user_data.pop(\"address\", None):\n address_instance: Address = Address(**address_data)\n address_create: AddressDB = AddressDB(\n **address_instance.model_dump()\n )\n user_instance: User = User(address=address_create, **user_data)\n return user_instance\n return None\n\n async def get_schema_from_cache(self, key: UUID4) -> Optional[UserResponse]:\n \"\"\"\n Get the user auth schema instance for the given key from the cache\n database\n :param key: The unique identifier for the user instance\n :type key: UUID4\n :return: The user schema instance\n :rtype: UserResponse\n \"\"\"\n value: Optional[str] = await self._redis.get(str(key))\n if value:\n user_data: dict[str, Any] = json.loads(value)\n if len(user_data.keys()) > 3:\n return UserResponse(**user_data)\n return None\n\n async def set_to_cache(\n self,\n key: UUID4,\n value: dict[str, Any],\n ) -> None:\n \"\"\"\n Set the user schema instance to the cache database using the given key\n :param key: The unique identifier for the user instance\n :type key: UUID4\n :param value: The user schema instance to be used\n :type value: dict[str, Any]\n :return: None\n :rtype: NoneType\n \"\"\"\n await self._redis.setex(\n str(key), self._cache_seconds, json.dumps(custom_serializer(value))\n )"
},
{
"identifier": "UserService",
"path": "app/services/infrastructure/user.py",
"snippet": "class UserService:\n \"\"\"\n Service class for user-related business logic.\n \"\"\"\n\n def __init__(\n self,\n user_repo: UserRepository,\n redis: Redis, # type: ignore\n ):\n self._user_repo: UserRepository = user_repo\n self._redis: Redis = redis # type: ignore\n self._cache_seconds: PositiveInt = auth_setting.CACHE_SECONDS\n\n async def get_user_by_id(self, user_id: UUID4) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its unique identifier\n :param user_id: The unique identifier of the user\n :type user_id: UUID4\n :return: User information\n :rtype: Optional[UserResponse]\n \"\"\"\n user: Optional[User]\n try:\n user = await self._user_repo.read_by_id(IdSpecification(user_id))\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n detail: str = f\"User with id {user_id} not found in the system.\"\n logger.error(detail)\n raise NotFoundException(detail)\n user_response: Optional[\n UserResponse\n ] = await model_to_response( # type: ignore\n user, UserResponse\n )\n return user_response\n\n async def get_login_user(self, username: str) -> User:\n \"\"\"\n Retrieve user information for login purposes by its username\n :param username: The username to retrieve User from\n :type username: str\n :return: User information\n :rtype: User\n \"\"\"\n try:\n user: Optional[User] = await self._user_repo.read_by_username(\n UsernameSpecification(username)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n raise ServiceException(f\"User not found with username: {username}\")\n return user\n\n async def get_user_by_username(\n self, username: str\n ) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its username\n :param username: The username to retrieve User from\n :type username: str\n :return: User information\n :rtype: UserResponse\n \"\"\"\n try:\n user: User = await self.get_login_user(username)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n return await model_to_response(user, UserResponse) # type: ignore\n\n async def get_user_by_email(\n self, email: EmailStr\n ) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its unique email.\n :param email: The email to retrieve User from\n :type email: EmailStr\n :return: User found in database\n :rtype: Optional[UserResponse]\n \"\"\"\n try:\n user: Optional[User] = await self._user_repo.read_by_email(\n EmailSpecification(email)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n raise ServiceException(f\"User not found with email: {email}\")\n return await model_to_response(user, UserResponse) # type: ignore\n\n async def get_user_id_by_email(self, email: EmailStr) -> UUID4:\n \"\"\"\n Read the user ID from the database with unique email.\n :param email: Email to retrieve User from\n :type email: EmailStr\n :return: User ID found in database\n :rtype: UUID4\n \"\"\"\n try:\n user_id: Optional[UUID4] = await self._user_repo.read_id_by_email(\n EmailSpecification(email)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user_id:\n raise ServiceException(f\"User ID not found with email: {email}\")\n return user_id\n\n async def register_user(\n self, user: Union[UserCreate, UserSuperCreate]\n ) -> Optional[UserCreateResponse]:\n \"\"\"\n Register a new user in the database\n :param user: Request object representing the user\n :type user: Union[UserCreate, UserSuperCreate]\n :return: Response object representing the created user in the\n database\n :rtype: UserCreateResponse\n \"\"\"\n try:\n created_user = await self._user_repo.create_user(user)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n return await model_to_response(\n created_user, UserCreateResponse # type: ignore\n )\n\n async def get_users(\n self, offset: Optional[NonNegativeInt], limit: Optional[PositiveInt]\n ) -> list[UserResponse]:\n \"\"\"\n Retrieve users' information from the table\n :param offset: Offset from where to start returning users\n :type offset: NonNegativeInt\n :param limit: Limit the number of results from query\n :type limit: PositiveInt\n :return: User information\n :rtype: list[UserResponse]\n \"\"\"\n try:\n users: list[User] = await self._user_repo.read_users(offset, limit)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n found_users: list[UserResponse] = [\n await model_to_response(user, UserResponse) # type: ignore\n for user in users\n ]\n return found_users\n\n async def update_user(\n self, user_id: UUID4, user: UserUpdate\n ) -> Optional[UserUpdateResponse]:\n \"\"\"\n Update user information from table\n :param user_id: Unique identifier of the user\n :type user_id: UUID4\n :param user: Requested user information to update\n :type user: UserUpdate\n :return: User information\n :rtype: Optional[UserUpdateResponse]\n \"\"\"\n try:\n updated_user: Optional[User] = await self._user_repo.update_user(\n IdSpecification(user_id), user\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not updated_user:\n raise ServiceException(\n f\"User with user_id: {user_id} could not be updated\"\n )\n return await model_to_response(\n updated_user, UserUpdateResponse # type: ignore\n )\n\n async def delete_user(self, user_id: UUID4) -> dict[str, Any]:\n \"\"\"\n Deletes a user by its id\n :param user_id: Unique identifier of the user\n :type user_id: UUID4\n :return: Data to confirmation info about the delete process\n :rtype: dict[str, Any]\n \"\"\"\n deleted: bool\n deleted_at: Optional[datetime]\n try:\n deleted = await self._user_repo.delete_user(\n IdSpecification(user_id)\n )\n deleted_at = datetime.now()\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n deleted = False\n deleted_at = None\n finally:\n return {\"ok\": deleted, \"deleted_at\": deleted_at}"
},
{
"identifier": "get_user_service",
"path": "app/services/infrastructure/user.py",
"snippet": "async def get_user_service(\n user_repo: Annotated[UserRepository, Depends(get_user_repository)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserService:\n \"\"\"\n Get an instance of the user service with the given repository.\n :param user_repo: User repository object for database connection\n :type user_repo: UserRepository\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: UserService instance with repository associated\n :rtype: UserService\n \"\"\"\n return UserService(user_repo, redis)"
},
{
"identifier": "send_new_account_email",
"path": "app/tasks/email_tasks/email_tasks.py",
"snippet": "@with_logging\nasync def send_new_account_email(\n email_to: EmailStr,\n username: str,\n settings: Annotated[Settings, Depends(get_settings)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n init_settings: Annotated[InitSettings, Depends(get_init_settings)],\n) -> None:\n \"\"\"\n Send a new account email\n :param email_to: The email address of the recipient with new\n account\n :type email_to: EmailStr\n :param username: Username of the recipient\n :type username: str\n :param settings: Dependency method for cached setting object\n :type settings: Settings\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :param init_settings: Dependency method for cached init setting object\n :type init_settings: InitSettings\n :return: None\n :rtype: NoneType\n \"\"\"\n subject: str = (\n f\"{init_settings.PROJECT_NAME} - \"\n f\"{init_settings.NEW_ACCOUNT_SUBJECT} {username}\"\n )\n template_str: str = await build_email_template(\n \"new_account.html\", init_settings\n )\n await send_email(\n email_to=email_to,\n subject_template=subject,\n html_template=template_str,\n environment={\n \"project_name\": init_settings.PROJECT_NAME,\n \"username\": username,\n \"email\": email_to,\n \"link\": f\"{auth_settings.SERVER_URL}\",\n },\n settings=settings,\n )"
},
{
"identifier": "send_welcome_email",
"path": "app/tasks/email_tasks/email_tasks.py",
"snippet": "@with_logging\nasync def send_welcome_email(\n email_to: EmailStr,\n username: str,\n init_settings: Annotated[InitSettings, Depends(get_init_settings)],\n settings: Annotated[Settings, Depends(get_settings)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> None:\n \"\"\"\n Send a welcome email\n :param email_to: The email address of the recipient to welcome\n :type email_to: EmailStr\n :param username: Username of the recipient\n :type username: str\n :param init_settings: Dependency method for cached init setting object\n :type init_settings: InitSettings\n :param settings: Dependency method for cached setting object\n :type settings: Settings\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: None\n :rtype: NoneType\n \"\"\"\n subject: str = (\n f\"{init_settings.WELCOME_SUBJECT}{init_settings.PROJECT_NAME},\"\n f\" {username}\"\n )\n template_str: str = await build_email_template(\n \"welcome.html\", init_settings\n )\n await send_email(\n email_to=email_to,\n subject_template=subject,\n html_template=template_str,\n environment={\n \"project_name\": init_settings.PROJECT_NAME,\n \"username\": username,\n \"email\": email_to,\n \"link\": f\"{auth_settings.SERVER_URL}\",\n },\n settings=settings,\n )"
}
] | import logging
from typing import Annotated, Any, Optional
from uuid import uuid4
from fastapi import (
APIRouter,
BackgroundTasks,
Body,
Depends,
HTTPException,
Response,
status,
)
from fastapi.params import Path, Query
from pydantic import UUID4, NonNegativeInt, PositiveInt
from redis.asyncio import Redis
from sqlalchemy.exc import SQLAlchemyError
from app.api.deps import get_redis_dep
from app.api.oauth2_validation import get_current_user
from app.config.config import (
get_auth_settings,
get_init_settings,
get_settings,
init_setting,
)
from app.config.db.auth_settings import AuthSettings
from app.config.init_settings import InitSettings
from app.config.settings import Settings
from app.exceptions.exceptions import NotFoundException, ServiceException
from app.schemas.external.user import (
UserCreate,
UserCreateResponse,
UserResponse,
UsersResponse,
UserUpdate,
UserUpdateResponse,
)
from app.schemas.infrastructure.user import UserAuth
from app.services.infrastructure.cached_user import CachedUserService
from app.services.infrastructure.user import UserService, get_user_service
from app.tasks.email_tasks.email_tasks import (
send_new_account_email,
send_welcome_email,
) | 10,364 | """
User API Router
This module provides CRUD (Create, Retrieve, Update, Delete) operations
for users.
"""
logger: logging.Logger = logging.getLogger(__name__)
router: APIRouter = APIRouter(prefix="/user", tags=["user"])
@router.get("", response_model=UsersResponse)
async def get_users(
current_user: Annotated[UserAuth, Depends(get_current_user)],
| """
User API Router
This module provides CRUD (Create, Retrieve, Update, Delete) operations
for users.
"""
logger: logging.Logger = logging.getLogger(__name__)
router: APIRouter = APIRouter(prefix="/user", tags=["user"])
@router.get("", response_model=UsersResponse)
async def get_users(
current_user: Annotated[UserAuth, Depends(get_current_user)], | user_service: Annotated[UserService, Depends(get_user_service)], | 16 | 2023-11-17 00:32:32+00:00 | 12k |
dataaug/open-interpreter-free | interpreter/core/core.py | [
{
"identifier": "cli",
"path": "interpreter/cli/cli.py",
"snippet": "def cli(interpreter):\n parser = argparse.ArgumentParser(description=\"Open Interpreter\")\n\n # Add arguments\n for arg in arguments:\n if arg[\"type\"] == bool:\n parser.add_argument(\n f'-{arg[\"nickname\"]}',\n f'--{arg[\"name\"]}',\n dest=arg[\"name\"],\n help=arg[\"help_text\"],\n action=\"store_true\",\n default=None,\n )\n else:\n choices = arg[\"choices\"] if \"choices\" in arg else None\n default = arg[\"default\"] if \"default\" in arg else None\n\n parser.add_argument(\n f'-{arg[\"nickname\"]}',\n f'--{arg[\"name\"]}',\n dest=arg[\"name\"],\n help=arg[\"help_text\"],\n type=arg[\"type\"],\n choices=choices,\n default=default,\n )\n\n # Add special arguments\n parser.add_argument(\n \"--config\",\n dest=\"config\",\n action=\"store_true\",\n help=\"open config.yaml file in text editor\",\n )\n parser.add_argument(\n \"--conversations\",\n dest=\"conversations\",\n action=\"store_true\",\n help=\"list conversations to resume\",\n )\n parser.add_argument(\n \"-f\",\n \"--fast\",\n dest=\"fast\",\n action=\"store_true\",\n help=\"run `interpreter --model gpt-3.5-turbo`\",\n )\n parser.add_argument(\n \"--version\",\n dest=\"version\",\n action=\"store_true\",\n help=\"get Open Interpreter's version number\",\n )\n\n args = parser.parse_args()\n\n # This should be pushed into an open_config.py util\n # If --config is used, open the config.yaml file in the Open Interpreter folder of the user's config dir\n if args.config:\n if args.config_file:\n config_file = get_config_path(args.config_file)\n else:\n config_file = get_config_path()\n\n print(f\"Opening `{config_file}`...\")\n\n # Use the default system editor to open the file\n if platform.system() == \"Windows\":\n os.startfile(\n config_file\n ) # This will open the file with the default application, e.g., Notepad\n else:\n try:\n # Try using xdg-open on non-Windows platforms\n subprocess.call([\"xdg-open\", config_file])\n except FileNotFoundError:\n # Fallback to using 'open' on macOS if 'xdg-open' is not available\n subprocess.call([\"open\", config_file])\n return\n\n if args.local:\n # Default local (LM studio) attributes\n interpreter.system_message = \"You are an AI.\"\n interpreter.model = (\n \"openai/\" + interpreter.model\n ) # This tells LiteLLM it's an OpenAI compatible server\n interpreter.api_base = \"http://localhost:1234/v1\"\n interpreter.max_tokens = 1000\n interpreter.context_window = 3000\n interpreter.api_key = \"0\"\n\n display_markdown_message(\n \"\"\"\n> Open Interpreter's local mode is powered by **`LM Studio`**.\n\n\nYou will need to run **LM Studio** in the background.\n\n1. Download **LM Studio** from [https://lmstudio.ai/](https://lmstudio.ai/) then start it.\n2. Select a language model then click **Download**.\n3. Click the **<->** button on the left (below the chat button).\n4. Select your model at the top, then click **Start Server**.\n\n\nOnce the server is running, you can begin your conversation below.\n\n> **Warning:** This feature is highly experimental.\n> Don't expect `gpt-3.5` / `gpt-4` level quality, speed, or reliability yet!\n\n\"\"\"\n )\n\n # Set attributes on interpreter\n for attr_name, attr_value in vars(args).items():\n # Ignore things that aren't possible attributes on interpreter\n if attr_value is not None and hasattr(interpreter, attr_name):\n # If the user has provided a config file, load it and extend interpreter's configuration\n if attr_name == \"config_file\":\n user_config = get_config_path(attr_value)\n interpreter.config_file = user_config\n interpreter.extend_config(config_path=user_config)\n else:\n setattr(interpreter, attr_name, attr_value)\n\n # if safe_mode and auto_run are enabled, safe_mode disables auto_run\n if interpreter.auto_run and (\n interpreter.safe_mode == \"ask\" or interpreter.safe_mode == \"auto\"\n ):\n setattr(interpreter, \"auto_run\", False)\n\n # If --conversations is used, run conversation_navigator\n if args.conversations:\n conversation_navigator(interpreter)\n return\n\n if args.version:\n version = pkg_resources.get_distribution(\"open-interpreter\").version\n print(f\"Open Interpreter {version}\")\n return\n\n if args.fast:\n interpreter.model = \"gpt-3.5-turbo\"\n\n if args.vision:\n interpreter.vision = True\n interpreter.model = \"gpt-4-vision-preview\"\n interpreter.system_message += \"\\nThe user will show you an image of the code you write. You can view images directly. Be sure to actually write a markdown code block for almost every user request! Almost EVERY message should include a markdown code block. Do not end your message prematurely!\\n\\nFor HTML: This will be run STATELESSLY. You may NEVER write '<!-- previous code here... --!>' or `<!-- header will go here -->` or anything like that. It is CRITICAL TO NEVER WRITE PLACEHOLDERS. Placeholders will BREAK it. You must write the FULL HTML CODE EVERY TIME. Therefore you cannot write HTML piecemeal—write all the HTML, CSS, and possibly Javascript **in one step, in one code block**. The user will help you review it visually.\\nIf the user submits a filepath, you will also see the image. The filepath and user image will both be in the user's message.\"\n interpreter.function_calling_llm = False\n interpreter.context_window = 110000\n interpreter.max_tokens = 4096\n\n display_markdown_message(\"> `Vision` enabled **(experimental)**\\n\")\n\n interpreter.chat()"
},
{
"identifier": "setup_llm",
"path": "interpreter/llm/setup_llm.py",
"snippet": "def setup_llm(interpreter):\n \"\"\"\n Takes an Interpreter (which includes a ton of LLM settings),\n returns a Coding LLM (a generator that streams deltas with `message` and `code`).\n \"\"\"\n # gpt4fre\n gpt4free = True\n if gpt4free:\n text_llm = setup_gpt4free_llm(interpreter)\n coding_llm = convert_to_coding_gpt4free_llm(text_llm, debug_mode=interpreter.debug_mode)\n return coding_llm\n\n # Detect whether or not it's a function calling LLM\n if interpreter.function_calling_llm == None:\n if not interpreter.local and (\n interpreter.model in litellm.open_ai_chat_completion_models\n or interpreter.model.startswith(\"azure/\")\n ):\n interpreter.function_calling_llm = True\n else:\n interpreter.function_calling_llm = False\n\n if interpreter.function_calling_llm:\n # Function-calling LLM\n coding_llm = setup_openai_coding_llm(interpreter)\n else:\n # If disable_procedures has not been set manually:\n if interpreter.disable_procedures == None:\n # Disable procedures, which confuses most of these models (except GPT-4V)\n\n if interpreter.model != \"gpt-4-vision-preview\":\n interpreter.disable_procedures = True\n\n # Non-function-calling LLM\n text_llm = setup_text_llm(interpreter)\n coding_llm = convert_to_coding_llm(text_llm, debug_mode=interpreter.debug_mode)\n\n return coding_llm"
},
{
"identifier": "terminal_interface",
"path": "interpreter/terminal_interface/terminal_interface.py",
"snippet": "def terminal_interface(interpreter, message):\n # Auto run and local don't display messages.\n # Probably worth abstracting this to something like \"verbose_cli\" at some point.\n if not interpreter.auto_run and not interpreter.local:\n interpreter_intro_message = [\n \"**Open Interpreter** will require approval before running code.\"\n ]\n\n if interpreter.safe_mode == \"ask\" or interpreter.safe_mode == \"auto\":\n if not check_for_package(\"semgrep\"):\n interpreter_intro_message.append(\n f\"**Safe Mode**: {interpreter.safe_mode}\\n\\n>Note: **Safe Mode** requires `semgrep` (`pip install semgrep`)\"\n )\n else:\n interpreter_intro_message.append(\"Use `interpreter -y` to bypass this.\")\n\n interpreter_intro_message.append(\"Press `CTRL-C` to exit.\")\n\n display_markdown_message(\"\\n\\n\".join(interpreter_intro_message) + \"\\n\")\n\n active_block = None\n\n if message:\n interactive = False\n else:\n interactive = True\n\n while True:\n try:\n if interactive:\n message = input(\"> \").strip()\n\n try:\n # This lets users hit the up arrow key for past messages\n readline.add_history(message)\n except:\n # If the user doesn't have readline (may be the case on windows), that's fine\n pass\n\n except KeyboardInterrupt:\n # Exit gracefully\n break\n\n if message.startswith(\"%\") and interactive:\n handle_magic_command(interpreter, message)\n continue\n\n # Many users do this\n if message.strip() == \"interpreter --local\":\n print(\"Please press CTRL-C then run `interpreter --local`.\")\n continue\n\n if True: ################## interpreter.vision:\n # Is the input a path to an image? Like they just dragged it into the terminal?\n image_path = find_image_path(message)\n\n ## If we found an image, add it to the message\n if image_path:\n if interpreter.debug_mode:\n print(\"Found image:\", image_path)\n # Turn it into base64\n with open(image_path, \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\n file_extension = image_path.split(\".\")[-1]\n message = {\n \"role\": \"user\",\n \"message\": message,\n \"image\": f\"data:image/{file_extension};base64,{encoded_string}\",\n }\n\n # Track if we've ran a code block.\n # We'll use this to determine if we should render a new code block,\n # In the event we get code -> output -> code again\n ran_code_block = False\n render_cursor = True\n\n try:\n for chunk in interpreter.chat(message, display=False, stream=True):\n if interpreter.debug_mode:\n print(\"Chunk in `terminal_interface`:\", chunk)\n\n # Message\n if \"message\" in chunk:\n if active_block is None:\n active_block = MessageBlock()\n if active_block.type != \"message\":\n active_block.end()\n active_block = MessageBlock()\n active_block.message += chunk[\"message\"]\n render_cursor = True\n\n # Code\n if \"code\" in chunk or \"language\" in chunk:\n if active_block is None:\n active_block = CodeBlock()\n if active_block.type != \"code\" or ran_code_block:\n # If the last block wasn't a code block,\n # or it was, but we already ran it:\n active_block.end()\n active_block = CodeBlock()\n ran_code_block = False\n render_cursor = True\n\n if \"language\" in chunk:\n active_block.language = chunk[\"language\"]\n if \"code\" in chunk:\n active_block.code += chunk[\"code\"]\n if \"active_line\" in chunk:\n active_block.active_line = chunk[\"active_line\"]\n\n # Execution notice\n if \"executing\" in chunk:\n if not interpreter.auto_run:\n # OI is about to execute code. The user wants to approve this\n\n # End the active block so you can run input() below it\n active_block.end()\n\n should_scan_code = False\n\n if not interpreter.safe_mode == \"off\":\n if interpreter.safe_mode == \"auto\":\n should_scan_code = True\n elif interpreter.safe_mode == \"ask\":\n response = input(\n \" Would you like to scan this code? (y/n)\\n\\n \"\n )\n print(\"\") # <- Aesthetic choice\n\n if response.strip().lower() == \"y\":\n should_scan_code = True\n\n if should_scan_code:\n # Get code language and actual code from the chunk\n # We need to give these to semgrep when we start our scan\n language = chunk[\"executing\"][\"language\"]\n code = chunk[\"executing\"][\"code\"]\n\n scan_code(code, language, interpreter)\n\n response = input(\n \" Would you like to run this code? (y/n)\\n\\n \"\n )\n print(\"\") # <- Aesthetic choice\n\n if response.strip().lower() == \"y\":\n # Create a new, identical block where the code will actually be run\n # Conveniently, the chunk includes everything we need to do this:\n active_block = CodeBlock()\n active_block.margin_top = False # <- Aesthetic choice\n active_block.language = chunk[\"executing\"][\"language\"]\n active_block.code = chunk[\"executing\"][\"code\"]\n else:\n # User declined to run code.\n interpreter.messages.append(\n {\n \"role\": \"user\",\n \"message\": \"I have declined to run this code.\",\n }\n )\n break\n\n if \"image\" in chunk or \"html\" in chunk or \"javascript\" in chunk:\n # Good to keep the LLM informed <3\n message_for_llm = display_output(chunk)\n if message_for_llm:\n if \"output\" in interpreter.messages[-1]:\n interpreter.messages[-1][\"output\"] += \"\\n\" + message_for_llm\n else:\n interpreter.messages[-1][\"output\"] = message_for_llm\n\n # I know this is insane, but the easiest way to now display this\n # is to set the chunk to an output chunk, which will trigger the next conditional!\n\n chunk = {\"output\": message_for_llm}\n\n # Output\n if \"output\" in chunk:\n ran_code_block = True\n render_cursor = False\n active_block.output += \"\\n\" + chunk[\"output\"]\n active_block.output = (\n active_block.output.strip()\n ) # <- Aesthetic choice\n\n # Truncate output\n active_block.output = truncate_output(\n active_block.output, interpreter.max_output\n )\n\n if active_block:\n active_block.refresh(cursor=render_cursor)\n\n yield chunk\n\n # (Sometimes -- like if they CTRL-C quickly -- active_block is still None here)\n if active_block:\n active_block.end()\n active_block = None\n\n if not interactive:\n # Don't loop\n break\n\n except KeyboardInterrupt:\n # Exit gracefully\n if active_block:\n active_block.end()\n active_block = None\n\n if interactive:\n # (this cancels LLM, returns to the interactive \"> \" input)\n continue\n else:\n break\n except:\n system_info(interpreter)\n raise"
},
{
"identifier": "validate_llm_settings",
"path": "interpreter/terminal_interface/validate_llm_settings.py",
"snippet": "def validate_llm_settings(interpreter):\n \"\"\"\n Interactivley prompt the user for required LLM settings\n \"\"\"\n\n # This runs in a while loop so `continue` lets us start from the top\n # after changing settings (like switching to/from local)\n while True:\n if interpreter.local:\n # We have already displayed a message.\n # (This strange behavior makes me think validate_llm_settings needs to be rethought / refactored)\n break\n\n else:\n # Ensure API keys are set as environment variables\n\n # OpenAI\n if interpreter.model in litellm.open_ai_chat_completion_models:\n if not os.environ.get(\"OPENAI_API_KEY\") and not interpreter.api_key:\n display_welcome_message_once()\n\n display_markdown_message(\n \"\"\"---\n > OpenAI API key not found\n\n To use `GPT-4` (highly recommended) please provide an OpenAI API key.\n\n To use another language model, consult the documentation at [docs.openinterpreter.com](https://docs.openinterpreter.com/language-model-setup/).\n \n ---\n \"\"\"\n )\n\n response = getpass.getpass(\"OpenAI API key: \")\n print(f\"OpenAI API key: {response[:4]}...{response[-4:]}\")\n\n display_markdown_message(\n \"\"\"\n\n **Tip:** To save this key for later, run `export OPENAI_API_KEY=your_api_key` on Mac/Linux or `setx OPENAI_API_KEY your_api_key` on Windows.\n \n ---\"\"\"\n )\n\n interpreter.api_key = response\n time.sleep(2)\n break\n\n # This is a model we don't have checks for yet.\n break\n\n # If we're here, we passed all the checks.\n\n # Auto-run is for fast, light useage -- no messages.\n # If local, we've already displayed a message.\n if not interpreter.auto_run and not interpreter.local:\n display_markdown_message(f\"> Model set to `{interpreter.model}`\")\n return"
},
{
"identifier": "check_for_update",
"path": "interpreter/utils/check_for_update.py",
"snippet": "def check_for_update():\n # Fetch the latest version from the PyPI API\n response = requests.get(f\"https://pypi.org/pypi/open-interpreter/json\")\n latest_version = response.json()[\"info\"][\"version\"]\n\n # Get the current version using pkg_resources\n current_version = pkg_resources.get_distribution(\"open-interpreter\").version\n\n return version.parse(latest_version) > version.parse(current_version)"
},
{
"identifier": "display_markdown_message",
"path": "interpreter/utils/display_markdown_message.py",
"snippet": "def display_markdown_message(message):\n \"\"\"\n Display markdown message. Works with multiline strings with lots of indentation.\n Will automatically make single line > tags beautiful.\n \"\"\"\n\n for line in message.split(\"\\n\"):\n line = line.strip()\n if line == \"\":\n print(\"\")\n elif line == \"---\":\n rich_print(Rule(style=\"white\"))\n else:\n rich_print(Markdown(line))\n\n if \"\\n\" not in message and message.startswith(\">\"):\n # Aesthetic choice. For these tags, they need a space below them\n print(\"\")"
},
{
"identifier": "get_config",
"path": "interpreter/utils/get_config.py",
"snippet": "def get_config_path(path=user_config_path):\ndef get_config(path=user_config_path):"
},
{
"identifier": "get_storage_path",
"path": "interpreter/utils/local_storage_path.py",
"snippet": "def get_storage_path(subdirectory=None):\n if subdirectory is None:\n return config_dir\n else:\n return os.path.join(config_dir, subdirectory)"
},
{
"identifier": "generate_system_message",
"path": "interpreter/core/generate_system_message.py",
"snippet": "def generate_system_message(interpreter):\n \"\"\"\n Dynamically generate a system message.\n\n Takes an interpreter instance,\n returns a string.\n\n This is easy to replace!\n Just swap out `interpreter.generate_system_message` with another function.\n \"\"\"\n\n #### Start with the static system message\n\n system_message = interpreter.system_message\n\n #### Add dynamic components, like the user's OS, username, relevant procedures, etc\n\n system_message += \"\\n\" + get_user_info_string()\n\n if not interpreter.local and not interpreter.disable_procedures:\n try:\n system_message += \"\\n\" + get_relevant_procedures_string(\n interpreter.messages\n )\n except:\n if interpreter.debug_mode:\n print(traceback.format_exc())\n # It's okay if they can't. This just fixes some common mistakes it makes.\n\n return system_message"
},
{
"identifier": "respond",
"path": "interpreter/core/respond.py",
"snippet": "def respond(interpreter):\n \"\"\"\n Yields tokens, but also adds them to interpreter.messages. TBH probably would be good to seperate those two responsibilities someday soon\n Responds until it decides not to run any more code or say anything else.\n \"\"\"\n\n last_unsupported_code = \"\"\n\n while True:\n system_message = interpreter.generate_system_message()\n\n # Create message object\n system_message = {\"role\": \"system\", \"message\": system_message}\n\n # Create the version of messages that we'll send to the LLM\n messages_for_llm = interpreter.messages.copy()\n messages_for_llm = [system_message] + messages_for_llm\n\n # It's best to explicitly tell these LLMs when they don't get an output\n for message in messages_for_llm:\n if \"output\" in message and message[\"output\"] == \"\":\n message[\"output\"] = \"No output\"\n\n ### RUN THE LLM ###\n\n # Add a new message from the assistant to interpreter's \"messages\" attribute\n # (This doesn't go to the LLM. We fill this up w/ the LLM's response)\n interpreter.messages.append({\"role\": \"assistant\"})\n\n # Start putting chunks into the new message\n # + yielding chunks to the user\n try:\n # Track the type of chunk that the coding LLM is emitting\n chunk_type = None\n\n for chunk in interpreter._llm(messages_for_llm):\n # Add chunk to the last message\n interpreter.messages[-1] = merge_deltas(interpreter.messages[-1], chunk)\n\n # This is a coding llm\n # It will yield dict with either a message, language, or code (or language AND code)\n\n # We also want to track which it's sending to we can send useful flags.\n # (otherwise pretty much everyone needs to implement this)\n for new_chunk_type in [\"message\", \"language\", \"code\"]:\n if new_chunk_type in chunk and chunk_type != new_chunk_type:\n if chunk_type:\n yield {f\"end_of_{chunk_type}\": True}\n # Language is actually from a code block\n if new_chunk_type == \"language\":\n new_chunk_type = \"code\"\n chunk_type = new_chunk_type\n yield {f\"start_of_{chunk_type}\": True}\n\n yield chunk\n\n # We don't trigger the end_of_message or end_of_code flag if we actually end on either (we just exit the loop above)\n yield {f\"end_of_{chunk_type}\": True}\n\n except litellm.exceptions.BudgetExceededError:\n display_markdown_message(\n f\"\"\"> Max budget exceeded\n\n **Session spend:** ${litellm._current_cost}\n **Max budget:** ${interpreter.max_budget}\n\n Press CTRL-C then run `interpreter --max_budget [higher USD amount]` to proceed.\n \"\"\"\n )\n break\n # Provide extra information on how to change API keys, if we encounter that error\n # (Many people writing GitHub issues were struggling with this)\n except Exception as e:\n if (\n interpreter.local == False\n and \"auth\" in str(e).lower()\n or \"api key\" in str(e).lower()\n ):\n output = traceback.format_exc()\n raise Exception(\n f\"{output}\\n\\nThere might be an issue with your API key(s).\\n\\nTo reset your API key (we'll use OPENAI_API_KEY for this example, but you may need to reset your ANTHROPIC_API_KEY, HUGGINGFACE_API_KEY, etc):\\n Mac/Linux: 'export OPENAI_API_KEY=your-key-here',\\n Windows: 'setx OPENAI_API_KEY your-key-here' then restart terminal.\\n\\n\"\n )\n elif interpreter.local:\n raise Exception(\n str(e)\n + \"\"\"\n\nPlease make sure LM Studio's local server is running by following the steps above.\n\nIf LM Studio's local server is running, please try a language model with a different architecture.\n\n \"\"\"\n )\n else:\n raise\n\n ### RUN CODE (if it's there) ###\n\n if \"code\" in interpreter.messages[-1]:\n if interpreter.debug_mode:\n print(\"Running code:\", interpreter.messages[-1])\n\n try:\n # What code do you want to run?\n code = interpreter.messages[-1][\"code\"]\n\n # Fix a common error where the LLM thinks it's in a Jupyter notebook\n if interpreter.messages[-1][\"language\"] == \"python\" and code.startswith(\n \"!\"\n ):\n code = code[1:]\n interpreter.messages[-1][\"code\"] = code\n interpreter.messages[-1][\"language\"] = \"shell\"\n\n # Get a code interpreter to run it\n language = interpreter.messages[-1][\"language\"].lower().strip()\n if language in language_map:\n if language not in interpreter._code_interpreters:\n # Create code interpreter\n config = {\"language\": language, \"vision\": interpreter.vision}\n interpreter._code_interpreters[\n language\n ] = create_code_interpreter(config)\n code_interpreter = interpreter._code_interpreters[language]\n else:\n # This still prints the code but don't allow code to run. Let's Open-Interpreter know through output message\n\n output = (\n f\"Open Interpreter does not currently support `{language}`.\"\n )\n\n yield {\"output\": output}\n interpreter.messages[-1][\"output\"] = output\n\n # Let the response continue so it can deal with the unsupported code in another way. Also prevent looping on the same piece of code.\n if code != last_unsupported_code:\n last_unsupported_code = code\n continue\n else:\n break\n\n # Yield a message, such that the user can stop code execution if they want to\n try:\n yield {\"executing\": {\"code\": code, \"language\": language}}\n except GeneratorExit:\n # The user might exit here.\n # We need to tell python what we (the generator) should do if they exit\n break\n\n # Yield each line, also append it to last messages' output\n interpreter.messages[-1][\"output\"] = \"\"\n for line in code_interpreter.run(code):\n yield line\n if \"output\" in line:\n output = interpreter.messages[-1][\"output\"]\n output += \"\\n\" + line[\"output\"]\n\n # Truncate output\n output = truncate_output(output, interpreter.max_output)\n\n interpreter.messages[-1][\"output\"] = output.strip()\n # Vision\n if interpreter.vision:\n base64_image = None\n if \"image\" in line:\n base64_image = line[\"image\"]\n if \"html\" in line:\n base64_image = html_to_base64(line[\"html\"])\n\n if base64_image:\n yield {\"output\": \"Sending image output to GPT-4V...\"}\n interpreter.messages[-1][\n \"image\"\n ] = f\"data:image/jpeg;base64,{base64_image}\"\n\n except:\n output = traceback.format_exc()\n yield {\"output\": output.strip()}\n interpreter.messages[-1][\"output\"] = output.strip()\n\n yield {\"active_line\": None}\n yield {\"end_of_execution\": True}\n\n else:\n # Doesn't want to run code. We're done\n break\n\n return"
}
] | import json
import os
from datetime import datetime
from ..cli.cli import cli
from ..llm.setup_llm import setup_llm
from ..terminal_interface.terminal_interface import terminal_interface
from ..terminal_interface.validate_llm_settings import validate_llm_settings
from ..utils.check_for_update import check_for_update
from ..utils.display_markdown_message import display_markdown_message
from ..utils.get_config import get_config, user_config_path
from ..utils.local_storage_path import get_storage_path
from .generate_system_message import generate_system_message
from .respond import respond | 7,228 | """
This file defines the Interpreter class.
It's the main file. `import interpreter` will import an instance of this class.
"""
class Interpreter:
def cli(self):
cli(self)
def __init__(self):
# State
self.messages = []
self._code_interpreters = {}
self.config_file = user_config_path
# Settings
self.local = False
self.auto_run = False
self.debug_mode = False
self.max_output = 2000
self.safe_mode = "off"
self.disable_procedures = False
# Conversation history
self.conversation_history = True
self.conversation_filename = None
self.conversation_history_path = get_storage_path("conversations")
# LLM settings
self.model = ""
self.temperature = None
self.system_message = ""
self.context_window = None
self.max_tokens = None
self.api_base = None
self.api_key = None
self.max_budget = None
self._llm = None
self.function_calling_llm = None
self.vision = False # LLM supports vision
# Load config defaults
self.extend_config(self.config_file)
# Check for update
try:
if not self.local:
# This should actually be pushed into the utility
if check_for_update():
display_markdown_message(
"> **A new version of Open Interpreter is available.**\n>Please run: `pip install --upgrade open-interpreter`\n\n---"
)
except:
# Doesn't matter
pass
def extend_config(self, config_path):
if self.debug_mode:
print(f"Extending configuration from `{config_path}`")
config = get_config(config_path)
self.__dict__.update(config)
def chat(self, message=None, display=True, stream=False):
if stream:
return self._streaming_chat(message=message, display=display)
# If stream=False, *pull* from the stream.
for _ in self._streaming_chat(message=message, display=display):
pass
return self.messages
def _streaming_chat(self, message=None, display=True):
# If we have a display,
# we can validate our LLM settings w/ the user first
gpt4free = True
if display and not gpt4free:
validate_llm_settings(self)
# Setup the LLM
if not self._llm:
| """
This file defines the Interpreter class.
It's the main file. `import interpreter` will import an instance of this class.
"""
class Interpreter:
def cli(self):
cli(self)
def __init__(self):
# State
self.messages = []
self._code_interpreters = {}
self.config_file = user_config_path
# Settings
self.local = False
self.auto_run = False
self.debug_mode = False
self.max_output = 2000
self.safe_mode = "off"
self.disable_procedures = False
# Conversation history
self.conversation_history = True
self.conversation_filename = None
self.conversation_history_path = get_storage_path("conversations")
# LLM settings
self.model = ""
self.temperature = None
self.system_message = ""
self.context_window = None
self.max_tokens = None
self.api_base = None
self.api_key = None
self.max_budget = None
self._llm = None
self.function_calling_llm = None
self.vision = False # LLM supports vision
# Load config defaults
self.extend_config(self.config_file)
# Check for update
try:
if not self.local:
# This should actually be pushed into the utility
if check_for_update():
display_markdown_message(
"> **A new version of Open Interpreter is available.**\n>Please run: `pip install --upgrade open-interpreter`\n\n---"
)
except:
# Doesn't matter
pass
def extend_config(self, config_path):
if self.debug_mode:
print(f"Extending configuration from `{config_path}`")
config = get_config(config_path)
self.__dict__.update(config)
def chat(self, message=None, display=True, stream=False):
if stream:
return self._streaming_chat(message=message, display=display)
# If stream=False, *pull* from the stream.
for _ in self._streaming_chat(message=message, display=display):
pass
return self.messages
def _streaming_chat(self, message=None, display=True):
# If we have a display,
# we can validate our LLM settings w/ the user first
gpt4free = True
if display and not gpt4free:
validate_llm_settings(self)
# Setup the LLM
if not self._llm: | self._llm = setup_llm(self) | 1 | 2023-11-16 03:10:42+00:00 | 12k |
3dp-accelerometer/octoprint-accelerometer | octoprint_accelerometer/plugin.py | [
{
"identifier": "DataPostProcessRunner",
"path": "octoprint_accelerometer/data_post_process.py",
"snippet": "class DataPostProcessRunner:\n \"\"\"\n Runner for traversing stream files and post-processing (FFT) if necessary.\n \"\"\"\n def __init__(self,\n logger: Logger,\n on_event_callback: Optional[Callable[[DataProcessingEventType], None]],\n input_dir: str,\n input_file_prefix: str,\n algorithm_d1: str,\n output_dir: str,\n output_file_prefix: str,\n output_overwrite: bool,\n do_dry_run: bool,\n do_abort_flag: threading.Event = threading.Event()):\n self.logger: Logger = logger\n self.on_event_callback: Optional[Callable[[DataProcessingEventType], None]] = on_event_callback\n self._input_dir: str = input_dir\n self._input_file_prefix: str = input_file_prefix\n self._algorithm_d1: str = algorithm_d1\n self._output_dir: str = output_dir\n self._output_file_prefix: str = output_file_prefix\n self._output_overwrite: bool = output_overwrite\n self._do_dry_run: bool = do_dry_run\n self._do_abort_flag: threading.Event = do_abort_flag\n self._background_task: Optional[DataPostProcessBackgroundTask] = None\n self._background_task_start_timestamp: Optional[float] = None\n self._background_task_stop_timestamp: Optional[float] = None\n self._files_total: Optional[int] = None\n self._files_processed: Optional[int] = None\n self._files_skipped: Optional[int] = None\n\n @property\n def algorithm_d1(self) -> str:\n return self._algorithm_d1\n\n @algorithm_d1.setter\n def algorithm_d1(self, algorithm_d1: str):\n self._algorithm_d1 = algorithm_d1\n\n @property\n def input_dir(self) -> str:\n return self._input_dir\n\n @input_dir.setter\n def input_dir(self, input_dir: str):\n self._input_dir = input_dir\n\n @property\n def input_file_prefix(self) -> str:\n return self._input_file_prefix\n\n @input_file_prefix.setter\n def input_file_prefix(self, input_file_prefix: str):\n self._input_file_prefix = input_file_prefix\n\n @property\n def output_dir(self) -> str:\n return self._output_dir\n\n @output_dir.setter\n def output_dir(self, output_dir: str):\n self._output_dir = output_dir\n\n @property\n def output_file_prefix(self) -> str:\n return self._output_file_prefix\n\n @output_file_prefix.setter\n def output_file_prefix(self, output_file_prefix: str):\n self._output_file_prefix = output_file_prefix\n\n @property\n def output_overwrite(self) -> bool:\n return self._output_overwrite\n\n @output_overwrite.setter\n def output_overwrite(self, output_overwrite: bool):\n self._output_overwrite = output_overwrite\n\n @property\n def do_dry_run(self) -> bool:\n return self._do_dry_run\n\n @do_dry_run.setter\n def do_dry_run(self, do_dry_run: bool):\n self._do_dry_run = do_dry_run\n\n def is_running(self) -> bool:\n return True if self._background_task is not None and self._background_task.is_alive() else False\n\n def _send_on_event_callback(self, event: DataProcessingEventType):\n if self.on_event_callback:\n self.on_event_callback(event)\n\n def _send_on_thread_event_callback(self,\n event: DataProcessingEventType,\n total: Optional[int] = None,\n processed: Optional[int] = None,\n skipped: Optional[int] = None):\n\n self._files_total = total\n self._files_processed = processed\n self._files_skipped = skipped\n\n if event == DataProcessingEventType.PROCESSING_FINISHED:\n self._thread_stop_timestamp = time.time()\n\n if self.on_event_callback:\n self.on_event_callback(event)\n\n # TODO: force an early thread termination not by just terminating run().\n # Reason: Thread.is_alive() takes up to 30 seconds after run() terminated\n # to report not-alive. This works but sounds like a bug though.\n if event in [DataProcessingEventType.PROCESSING_FINISHED,\n DataProcessingEventType.UNHANDLED_EXCEPTION,\n DataProcessingEventType.ABORTED]:\n self.logger.info(\"data post processing thread terminated\")\n raise SystemExit()\n\n def stop(self) -> None:\n self._do_abort_flag.set()\n self._send_on_event_callback(DataProcessingEventType.ABORTING)\n if self._background_task:\n try:\n self._background_task.join()\n except RuntimeError as _e:\n self.logger.info(\"no running thread that can be stopped\")\n self._background_task = None\n self._background_task_stop_timestamp = time.time()\n self._send_on_event_callback(DataProcessingEventType.ABORTED)\n\n def get_last_run_duration_s(self) -> Optional[float]:\n \"\"\"\n Returns the last known duration.\n\n Note: Whenever this method is called, make sure to assert that the thread is not running.\n\n This is-running check is skipped here on purpose.\n Normally the child thread is the caller itself.\n The call propagated indirectly through the plugin's callback that most likely called this method again.\n In that case the thread is always running.\n\n :return: the last known duration; None if unknown of thread is still running\n \"\"\"\n return None if not self._thread_stop_timestamp or not self._background_task_start_timestamp else self._thread_stop_timestamp - self._background_task_start_timestamp\n\n def get_last_processed_count(self) -> Tuple[Optional[int], Optional[int], Optional[int]]:\n return self._files_total, self._files_processed, self._files_skipped\n\n def run(self) -> None:\n self._do_abort_flag.clear()\n self._background_task_stop_timestamp = None\n self._files_total = None\n self._files_processed = None\n self._files_skipped = None\n\n try:\n self.logger.info(\"start data processing ...\")\n self._background_task = DataPostProcessBackgroundTask(\n logger=self.logger,\n task=DataPostProcessTask(\n logger=self.logger,\n runner=DataDecomposeRunner(\n command=\"algo\",\n input_dir=self.input_dir,\n input_file_prefix=self.input_file_prefix,\n algorithm_d1=self.algorithm_d1,\n output_dir=self.output_dir,\n output_file_prefix=self.output_file_prefix,\n output_overwrite=False),\n on_event_callback=self._send_on_thread_event_callback))\n\n self._send_on_event_callback(DataProcessingEventType.PROCESSING)\n self._background_task_start_timestamp = time.time()\n self._background_task.start()\n\n except Exception as e:\n self.logger.error(\"railed to start data processing thread\")\n self.logger.error(str(e))\n self._send_on_event_callback(DataProcessingEventType.UNHANDLED_EXCEPTION)"
},
{
"identifier": "DataProcessingEventType",
"path": "octoprint_accelerometer/event_types.py",
"snippet": "class DataProcessingEventType(IntEnum):\n \"\"\"\n Types that can be emitted by callback from the data processing task.\n \"\"\"\n\n STARTING = 1\n \"data processing: sane execution event\"\n PROCESSING = 2\n \"data processing: sane execution event\"\n PROCESSING_FINISHED = 3\n \"data processing: sane execution event\"\n\n UNHANDLED_EXCEPTION = 12\n \"data processing: exceptional event\"\n\n ABORTING = 21\n \"event upon user request\"\n ABORTED = 22\n \"event upon user request\""
},
{
"identifier": "RecordingEventType",
"path": "octoprint_accelerometer/event_types.py",
"snippet": "class RecordingEventType(IntEnum):\n \"\"\"\n Types that can be emitted by callback from the recording task.\n \"\"\"\n\n STARTING = 1\n \"processing: sane execution event\"\n PROCESSING = 2\n \"processing: sane execution event\"\n PROCESSING_FINISHED = 3\n \"processing: sane execution event\"\n\n FIFO_OVERRUN = 11\n \"processing: exceptional event\"\n UNHANDLED_EXCEPTION = 12\n \"processing: exceptional event\"\n\n ABORTING = 21\n \"event upon user request\"\n ABORTED = 22\n \"event upon user request\""
},
{
"identifier": "RecordStepSeriesRunner",
"path": "octoprint_accelerometer/record_step_series.py",
"snippet": "class RecordStepSeriesRunner:\n \"\"\"\n Runner for moving printer, recording streams from accelerometer and saving to data to files.\n \"\"\"\n\n def __init__(self,\n logger: Logger,\n printer: PrinterInterface,\n controller_serial_device: str,\n on_event_callback: Optional[Callable[[RecordingEventType], None]],\n controller_record_timelapse_s: float,\n controller_decode_timeout_s: float,\n sensor_odr_hz: int,\n gcode_start_point_mm: Tuple[int, int, int],\n gcode_axis: List[Literal[\"x\", \"y\", \"z\"]],\n gcode_distance_mm: int,\n gcode_step_count: int,\n gcode_sequence_count: int,\n start_frequency_hz: int,\n stop_frequency_hz: int,\n step_frequency_hz: int,\n start_zeta_em2: int,\n stop_zeta_em2: int,\n step_zeta_em2: int,\n output_file_prefix: str,\n output_dir: str,\n do_dry_run: bool,\n do_abort_flag: threading.Event = threading.Event()):\n self.controller_response_error: bool = False\n self.controller_fifo_overrun_error: bool = False\n self.unhandled_exception: bool = False\n self.logger: Logger = logger\n self.printer: PrinterInterface = printer\n self._controller_serial_device: str = controller_serial_device\n self.on_event_callback: Optional[Callable[[RecordingEventType], None]] = on_event_callback\n self._controller_record_timelapse_s: float = controller_record_timelapse_s\n self._controller_decode_timeout_s: float = controller_decode_timeout_s\n self._sensor_odr_hz: int = sensor_odr_hz\n self._gcode_start_point_mm: Tuple[int, int, int] = gcode_start_point_mm\n self._gcode_axis: List[Literal[\"x\", \"y\", \"z\"]] = gcode_axis\n self._gcode_distance_mm: int = gcode_distance_mm\n self._gcode_step_count: int = gcode_step_count\n self._gcode_sequence_count: int = gcode_sequence_count\n self._start_frequency_hz: int = start_frequency_hz\n self._stop_frequency_hz: int = stop_frequency_hz\n self._step_frequency_hz: int = step_frequency_hz\n self._start_zeta_em2: int = start_zeta_em2\n self._stop_zeta_em2: int = stop_zeta_em2\n self._step_zeta_em2: int = step_zeta_em2\n self._output_file_prefix: str = output_file_prefix\n self._output_dir: str = output_dir\n self._do_dry_run: bool = do_dry_run\n self._do_abort_flag: threading.Event = do_abort_flag\n self._background_task: Optional[RecordStepSeriesBackgroundTask] = None\n self._background_task_start_timestamp: Optional[float] = None\n self._background_task_stop_timestamp: Optional[float] = None\n\n @property\n def controller_serial_device(self) -> str:\n return self._controller_serial_device\n\n @controller_serial_device.setter\n def controller_serial_device(self, controller_serial_device: str):\n self._controller_serial_device = controller_serial_device\n\n @property\n def controller_record_timelapse_s(self) -> float:\n return self._controller_record_timelapse_s\n\n @controller_record_timelapse_s.setter\n def controller_record_timelapse_s(self, controller_record_timelapse_s: float):\n self._controller_record_timelapse_s = controller_record_timelapse_s\n\n @property\n def controller_decode_timeout_s(self) -> float:\n return self._controller_decode_timeout_s\n\n @controller_decode_timeout_s.setter\n def controller_decode_timeout_s(self, controller_decode_timeout_s: float):\n self._controller_decode_timeout_s = controller_decode_timeout_s\n\n @property\n def sensor_odr_hz(self) -> int:\n return self._sensor_odr_hz\n\n @sensor_odr_hz.setter\n def sensor_odr_hz(self, sensor_odr_hz: int):\n self._sensor_odr_hz = sensor_odr_hz\n\n @property\n def gcode_start_point_mm(self) -> Tuple[int, int, int]:\n return self._gcode_start_point_mm\n\n @gcode_start_point_mm.setter\n def gcode_start_point_mm(self, gcode_start_point_mm: Tuple[int, int, int]):\n self._gcode_start_point_mm = gcode_start_point_mm\n\n @property\n def gcode_axis(self) -> List[Literal[\"x\", \"y\", \"z\"]]:\n return self._gcode_axis\n\n @gcode_axis.setter\n def gcode_axis(self, gcode_axis: List[Literal[\"x\", \"y\", \"z\"]]):\n self._gcode_axis = gcode_axis\n\n @property\n def gcode_distance_mm(self) -> int:\n return self._gcode_distance_mm\n\n @gcode_distance_mm.setter\n def gcode_distance_mm(self, gcode_distance_mm: int):\n self._gcode_distance_mm = gcode_distance_mm\n\n @property\n def gcode_step_count(self) -> int:\n return self._gcode_step_count\n\n @gcode_step_count.setter\n def gcode_step_count(self, gcode_step_count: int):\n self._gcode_step_count = gcode_step_count\n\n @property\n def gcode_sequence_count(self) -> int:\n return self._gcode_sequence_count\n\n @gcode_sequence_count.setter\n def gcode_sequence_count(self, gcode_sequence_count: int):\n self._gcode_sequence_count = gcode_sequence_count\n\n @property\n def start_frequency_hz(self) -> int:\n return self._start_frequency_hz\n\n @start_frequency_hz.setter\n def start_frequency_hz(self, start_frequency_hz: int):\n self._start_frequency_hz = start_frequency_hz\n\n @property\n def stop_frequency_hz(self) -> int:\n return self._stop_frequency_hz\n\n @stop_frequency_hz.setter\n def stop_frequency_hz(self, stop_frequency_hz: int):\n self._stop_frequency_hz = stop_frequency_hz\n\n @property\n def step_frequency_hz(self) -> int:\n return self._step_frequency_hz\n\n @step_frequency_hz.setter\n def step_frequency_hz(self, step_frequency_hz: int):\n self._step_frequency_hz = step_frequency_hz\n\n @property\n def start_zeta_em2(self) -> int:\n return self._start_zeta_em2\n\n @start_zeta_em2.setter\n def start_zeta_em2(self, start_zeta_em2: int):\n self._start_zeta_em2 = start_zeta_em2\n\n @property\n def stop_zeta_em2(self) -> int:\n return self._stop_zeta_em2\n\n @stop_zeta_em2.setter\n def stop_zeta_em2(self, stop_zeta_em2: int):\n self._stop_zeta_em2 = stop_zeta_em2\n\n @property\n def step_zeta_em2(self) -> int:\n return self._step_zeta_em2\n\n @step_zeta_em2.setter\n def step_zeta_em2(self, step_zeta_em2: int):\n self._step_zeta_em2 = step_zeta_em2\n\n @property\n def output_file_prefix(self) -> str:\n return self._output_file_prefix\n\n @output_file_prefix.setter\n def output_file_prefix(self, output_file_prefix: str):\n self._output_file_prefix = output_file_prefix\n\n @property\n def output_dir(self) -> str:\n return self._output_dir\n\n @output_dir.setter\n def output_dir(self, output_dir: str):\n self._output_dir = output_dir\n\n @property\n def do_dry_run(self) -> bool:\n return self._do_dry_run\n\n @do_dry_run.setter\n def do_dry_run(self, do_dry_run: bool):\n self._do_dry_run = do_dry_run\n\n def is_running(self) -> bool:\n return True if self._background_task is not None and self._background_task.is_alive() else False\n\n def task_execution_had_errors(self) -> bool:\n return self.controller_response_error or self.controller_response_error or self.unhandled_exception\n\n def _send_on_event_callback(self, event: RecordingEventType):\n if self.on_event_callback:\n self.on_event_callback(event)\n\n def _send_on_thread_event_callback(self, event: RecordingEventType):\n if event == RecordingEventType.PROCESSING_FINISHED:\n self._thread_stop_timestamp = time.time()\n\n if self.on_event_callback:\n self.on_event_callback(event)\n\n # TODO: force an early thread termination not by just terminating run().\n # Reason: Thread.is_alive() takes up to 30 seconds after run() terminated\n # to report not-alive. This works but sounds like a bug though.\n if event in [RecordingEventType.PROCESSING_FINISHED,\n RecordingEventType.FIFO_OVERRUN,\n RecordingEventType.UNHANDLED_EXCEPTION,\n RecordingEventType.ABORTED]:\n self.logger.info(\"recording thread terminated\")\n raise SystemExit()\n\n def stop(self) -> None:\n self._do_abort_flag.set()\n self._send_on_event_callback(RecordingEventType.ABORTING)\n if self._background_task:\n try:\n self._background_task.join()\n except RuntimeError as _e:\n self.logger.info(\"no running thread that can be stopped\")\n self._background_task = None\n self._background_task_stop_timestamp = time.time()\n self._send_on_event_callback(RecordingEventType.ABORTED)\n\n def get_last_run_duration_s(self) -> Optional[float]:\n \"\"\"\n Returns the last known duration.\n\n Note: Whenever this method is called, make sure to assert that the thread is not running.\n\n This is-running check is skipped here on purpose.\n Normally the child thread is the caller itself.\n The call propagated indirectly through the plugin's callback that most likely called this method again.\n In that case the thread is always running.\n\n :return: the last known duration; None if unknown of thread is still running\n \"\"\"\n return None if not self._thread_stop_timestamp or not self._background_task_start_timestamp else self._thread_stop_timestamp - self._background_task_start_timestamp\n\n def run(self) -> None:\n py3dpaxxel_octo = Py3dpAxxelOcto(self.printer, self.logger)\n self.controller_fifo_overrun_error = False\n self.controller_response_error = False\n self.unhandled_exception = False\n self._do_abort_flag.clear()\n self._background_task_stop_timestamp = None\n\n if not self.printer.is_operational():\n self.logger.warning(\"received request to start recording but printer is not operational\")\n return\n\n try:\n self.logger.info(\"start recording ...\")\n self._background_task = RecordStepSeriesBackgroundTask(\n logger=self.logger,\n task=RecordStepSeriesTask(\n logger=self.logger,\n runner=SamplingStepsSeriesRunner(\n octoprint_api=py3dpaxxel_octo,\n controller_serial_device=self.controller_serial_device,\n controller_record_timelapse_s=self.controller_record_timelapse_s,\n controller_decode_timeout_s=self.controller_decode_timeout_s,\n sensor_odr=OutputDataRateFromHz[self.sensor_odr_hz],\n gcode_start_point_mm=self.gcode_start_point_mm,\n gcode_axis=self.gcode_axis,\n gcode_distance_mm=self.gcode_distance_mm,\n gcode_step_repeat_count=self.gcode_step_count,\n gcode_sequence_repeat_count=self.gcode_sequence_count,\n fx_start_hz=self.start_frequency_hz,\n fx_stop_hz=self.stop_frequency_hz,\n fx_step_hz=self.step_frequency_hz,\n zeta_start_em2=self.start_zeta_em2,\n zeta_stop_em2=self.start_zeta_em2,\n zeta_step_em2=self.step_zeta_em2,\n output_file_prefix=self.output_file_prefix,\n output_dir=self.output_dir,\n do_dry_run=self.do_dry_run,\n do_abort_flag=self._do_abort_flag),\n on_event_callback=self._send_on_thread_event_callback))\n self._send_on_event_callback(RecordingEventType.PROCESSING)\n self._background_task_start_timestamp = time.time()\n self._background_task.start()\n\n except Exception as e:\n self.unhandled_exception = True\n self.logger.error(\"railed to start recording thread\")\n self.logger.error(str(e))\n self._send_on_event_callback(RecordingEventType.UNHANDLED_EXCEPTION)"
},
{
"identifier": "RunMeta",
"path": "octoprint_accelerometer/transfer_types.py",
"snippet": "class RunMeta:\n started: Optional[Timestamp] = None # Timestamp()\n stopped: Optional[Timestamp] = None # Timestamp()\n sequences: Dict[int, SequenceMeta] = field(default_factory=lambda: ({}))"
},
{
"identifier": "SequenceMeta",
"path": "octoprint_accelerometer/transfer_types.py",
"snippet": "class SequenceMeta:\n streams: Dict[str, StreamMeta] = field(default_factory=lambda: ({}))"
},
{
"identifier": "StreamMeta",
"path": "octoprint_accelerometer/transfer_types.py",
"snippet": "class StreamMeta:\n file: Optional[File] = None # = File()\n meta: Optional[FilenameMetaStream] = None # = FilenameMetaStream()\n ffts: Dict[str, FftMeta] = field(default_factory=lambda: ({}))"
},
{
"identifier": "DataSets",
"path": "octoprint_accelerometer/transfer_types.py",
"snippet": "class DataSets:\n runs: Dict[str, RunMeta] = field(default_factory=lambda: ({}))"
},
{
"identifier": "FftMeta",
"path": "octoprint_accelerometer/transfer_types.py",
"snippet": "class FftMeta:\n file: Optional[File] = None # = File()\n meta: Optional[FilenameMetaFft] = None # = FilenameMetaStream()"
},
{
"identifier": "Timestamp",
"path": "octoprint_accelerometer/transfer_types.py",
"snippet": "class Timestamp:\n year: int = 0\n month: int = 0\n day: int = 0\n hour: int = 0\n minute: int = 0\n second: int = 0\n milli_second: int = 0"
}
] | import os
import flask
import octoprint.plugin
from typing import Any, Dict, List, Literal, Optional, Tuple
from octoprint.server.util.tornado import LargeResponseHandler, path_validation_factory
from octoprint.util import is_hidden_path
from py3dpaxxel.cli.args import convert_axis_from_str
from py3dpaxxel.controller.api import Py3dpAxxel
from py3dpaxxel.sampling_tasks.series_argument_generator import RunArgsGenerator
from py3dpaxxel.storage.file_filter import FileSelector, File
from py3dpaxxel.storage.filename import timestamp_from_args
from py3dpaxxel.storage.filename_meta import FilenameMetaStream, FilenameMetaFft
from octoprint_accelerometer.data_post_process import DataPostProcessRunner
from octoprint_accelerometer.event_types import DataProcessingEventType, RecordingEventType
from octoprint_accelerometer.record_step_series import RecordStepSeriesRunner
from octoprint_accelerometer.transfer_types import RunMeta, SequenceMeta, StreamMeta, DataSets, FftMeta, Timestamp | 7,431 | self.auto_home: bool = False
self.start_frequency_hz: int = 0
self.stop_frequency_hz: int = 0
self.step_frequency_hz: int = 0
self.start_zeta_em2: int = 0
self.stop_zeta_em2: int = 0
self.step_zeta_em2: int = 0
self.sensor_output_data_rate_hz: int = 0
self.data_remove_before_run: bool = False
self.do_sample_x: bool = False
self.do_sample_y: bool = False
self.do_sample_z: bool = False
self.recording_timespan_s: float = 0
self.sequence_separation_s: float = 0
self.step_separation_s: float = 0
self.do_dry_run: bool = False
# other parameters shared with UI
self.devices_seen: List[str] = []
self.device: str = ""
self.controller_fifo_overrun_error: bool = False
self.controller_response_error: bool = False
# following parameters are computed from above parameters
self.axis_x_sampling_start: Point3D = Point3D(0, 0, 0)
self.axis_y_sampling_start: Point3D = Point3D(0, 0, 0)
self.axis_z_sampling_start: Point3D = Point3D(0, 0, 0)
# recording runner: once constructed before invocation all properties shall be updated
self.data_recording_runner: Optional[RecordStepSeriesRunner] = None
self.data_processing_runner: Optional[DataPostProcessRunner] = None
@staticmethod
def _get_devices() -> Tuple[str, List[str]]:
"""
:return: tuple of primary device (if any) and list of all devices
"""
seen_devices: List[str] = [k for k in Py3dpAxxel.get_devices_dict().keys()]
primary: str = seen_devices[0] if len(seen_devices) > 0 else None
return primary, seen_devices
def _update_seen_devices(self):
primary, seen_devices = self._get_devices()
self._logger.debug(f"seen devices: primary={primary}, seen={seen_devices}")
self.devices_seen = seen_devices
self.device = primary if primary is not None else ""
@octoprint.plugin.BlueprintPlugin.route("/set_values", methods=["POST"])
def on_api_set_values(self):
data = flask.request.json
self._update_members_from_api(data)
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/start_recording", methods=["POST"])
def on_api_start_recording(self):
self._start_recording()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/abort_recording", methods=["POST"])
def on_api_abort_recording(self):
self._abort_recording()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/start_data_processing", methods=["POST"])
def on_api_start_data_processing(self):
self._start_data_processing()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/get_estimate", methods=["GET"])
def on_api_get_estimate(self):
return flask.jsonify({f"estimate": self._estimate_duration()})
@octoprint.plugin.BlueprintPlugin.route("/get_parameters", methods=["GET"])
def on_api_get_parameters(self):
return flask.jsonify({f"parameters": self._get_parameter_dict(flask.request.args)})
@octoprint.plugin.BlueprintPlugin.route("/get_files_listing", methods=["GET"])
def on_api_get_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), ".*"))
files_details = fs.filter()
return flask.jsonify({f"files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_stream_files_listing", methods=["GET"])
def on_api_get_stream_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_STREAM_FILE_NAME_PREFIX}-.*\\.tsv$"))
files = fs.filter()
files_details = [StreamMeta(f, FilenameMetaStream().from_filename(f.filename_ext)) for f in files]
return flask.jsonify({f"stream_files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_fft_files_listing", methods=["GET"])
def on_api_get_fft_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_FFT_FILE_NAME_PREFIX}-.*\\.tsv$"))
files = fs.filter()
files_details = [FftMeta(f, FilenameMetaFft().from_filename(f.filename_ext)) for f in files]
return flask.jsonify({f"fft_files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_data_listing", methods=["GET"])
def on_api_get_data_listing(self):
fs_stream = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_STREAM_FILE_NAME_PREFIX}-.*\\.tsv$"))
fs_fft = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_FFT_FILE_NAME_PREFIX}-.*\\.tsv$"))
files_meta_data_stream: List[Tuple[File, FilenameMetaStream]] = [(f, FilenameMetaStream().from_filename(f.filename_ext)) for f in fs_stream.filter()]
files_meta_data_fft: List[Tuple[File, FilenameMetaFft]] = [(f, FilenameMetaFft().from_filename(f.filename_ext)) for f in fs_fft.filter()]
data_sets: DataSets = DataSets()
# append all streams
for file_meta, filename_meta in files_meta_data_stream:
run_hash, sequence_nr, stream_hash = filename_meta.run_hash, filename_meta.sequence_nr, filename_meta.stream_hash
if run_hash not in data_sets.runs.keys():
data_sets.runs[run_hash] = RunMeta()
if sequence_nr not in data_sets.runs[run_hash].sequences.keys():
|
class Point3D:
def __init__(self, x: int, y: int, z: int):
self.x: int = x
self.y: int = y
self.z: int = z
def __str__(self):
return f"x={self.x} y={self.y} z={self.z}"
class OctoprintAccelerometerPlugin(octoprint.plugin.StartupPlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.BlueprintPlugin):
OUTPUT_STREAM_FILE_NAME_PREFIX: str = "axxel"
OUTPUT_FFT_FILE_NAME_PREFIX: str = "fft"
# noinspection PyMissingConstructor
def __init__(self):
# following parameters are shared among settings and UI
self.distance_x_mm: int = 0
self.distance_y_mm: int = 0
self.distance_z_mm: int = 0
self.step_count: int = 0
self.speed_x_mm_s: int = 0
self.speed_y_mm_s: int = 0
self.speed_z_mm_s: int = 0
self.acceleration_x_mm_ss: int = 0
self.acceleration_y_mm_ss: int = 0
self.acceleration_z_mm_ss: int = 0
self.anchor_point_coord_x_mm: int = 0
self.anchor_point_coord_y_mm: int = 0
self.anchor_point_coord_z_mm: int = 0
self.sequence_count: int = 0
self.go_start: bool = False
self.return_start: bool = False
self.auto_home: bool = False
self.start_frequency_hz: int = 0
self.stop_frequency_hz: int = 0
self.step_frequency_hz: int = 0
self.start_zeta_em2: int = 0
self.stop_zeta_em2: int = 0
self.step_zeta_em2: int = 0
self.sensor_output_data_rate_hz: int = 0
self.data_remove_before_run: bool = False
self.do_sample_x: bool = False
self.do_sample_y: bool = False
self.do_sample_z: bool = False
self.recording_timespan_s: float = 0
self.sequence_separation_s: float = 0
self.step_separation_s: float = 0
self.do_dry_run: bool = False
# other parameters shared with UI
self.devices_seen: List[str] = []
self.device: str = ""
self.controller_fifo_overrun_error: bool = False
self.controller_response_error: bool = False
# following parameters are computed from above parameters
self.axis_x_sampling_start: Point3D = Point3D(0, 0, 0)
self.axis_y_sampling_start: Point3D = Point3D(0, 0, 0)
self.axis_z_sampling_start: Point3D = Point3D(0, 0, 0)
# recording runner: once constructed before invocation all properties shall be updated
self.data_recording_runner: Optional[RecordStepSeriesRunner] = None
self.data_processing_runner: Optional[DataPostProcessRunner] = None
@staticmethod
def _get_devices() -> Tuple[str, List[str]]:
"""
:return: tuple of primary device (if any) and list of all devices
"""
seen_devices: List[str] = [k for k in Py3dpAxxel.get_devices_dict().keys()]
primary: str = seen_devices[0] if len(seen_devices) > 0 else None
return primary, seen_devices
def _update_seen_devices(self):
primary, seen_devices = self._get_devices()
self._logger.debug(f"seen devices: primary={primary}, seen={seen_devices}")
self.devices_seen = seen_devices
self.device = primary if primary is not None else ""
@octoprint.plugin.BlueprintPlugin.route("/set_values", methods=["POST"])
def on_api_set_values(self):
data = flask.request.json
self._update_members_from_api(data)
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/start_recording", methods=["POST"])
def on_api_start_recording(self):
self._start_recording()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/abort_recording", methods=["POST"])
def on_api_abort_recording(self):
self._abort_recording()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/start_data_processing", methods=["POST"])
def on_api_start_data_processing(self):
self._start_data_processing()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/get_estimate", methods=["GET"])
def on_api_get_estimate(self):
return flask.jsonify({f"estimate": self._estimate_duration()})
@octoprint.plugin.BlueprintPlugin.route("/get_parameters", methods=["GET"])
def on_api_get_parameters(self):
return flask.jsonify({f"parameters": self._get_parameter_dict(flask.request.args)})
@octoprint.plugin.BlueprintPlugin.route("/get_files_listing", methods=["GET"])
def on_api_get_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), ".*"))
files_details = fs.filter()
return flask.jsonify({f"files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_stream_files_listing", methods=["GET"])
def on_api_get_stream_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_STREAM_FILE_NAME_PREFIX}-.*\\.tsv$"))
files = fs.filter()
files_details = [StreamMeta(f, FilenameMetaStream().from_filename(f.filename_ext)) for f in files]
return flask.jsonify({f"stream_files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_fft_files_listing", methods=["GET"])
def on_api_get_fft_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_FFT_FILE_NAME_PREFIX}-.*\\.tsv$"))
files = fs.filter()
files_details = [FftMeta(f, FilenameMetaFft().from_filename(f.filename_ext)) for f in files]
return flask.jsonify({f"fft_files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_data_listing", methods=["GET"])
def on_api_get_data_listing(self):
fs_stream = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_STREAM_FILE_NAME_PREFIX}-.*\\.tsv$"))
fs_fft = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_FFT_FILE_NAME_PREFIX}-.*\\.tsv$"))
files_meta_data_stream: List[Tuple[File, FilenameMetaStream]] = [(f, FilenameMetaStream().from_filename(f.filename_ext)) for f in fs_stream.filter()]
files_meta_data_fft: List[Tuple[File, FilenameMetaFft]] = [(f, FilenameMetaFft().from_filename(f.filename_ext)) for f in fs_fft.filter()]
data_sets: DataSets = DataSets()
# append all streams
for file_meta, filename_meta in files_meta_data_stream:
run_hash, sequence_nr, stream_hash = filename_meta.run_hash, filename_meta.sequence_nr, filename_meta.stream_hash
if run_hash not in data_sets.runs.keys():
data_sets.runs[run_hash] = RunMeta()
if sequence_nr not in data_sets.runs[run_hash].sequences.keys(): | data_sets.runs[run_hash].sequences[sequence_nr] = SequenceMeta() | 5 | 2023-11-14 17:15:15+00:00 | 12k |
hmmbug/pythaidate | tests/test_pakdate.py | [
{
"identifier": "julianday",
"path": "pythaidate/julianday.py",
"snippet": "def to_julianday(year, month, day):\ndef from_julianday(jd):\ndef today(): # pragma: no cover\ndef date_to_julianday(d):\ndef julianday_to_date(obj):\n B = 0\n A = math.trunc(yearp / 100.)\n B = 2 - A + math.trunc(A / 4.)\n C = math.trunc((365.25 * yearp) - 0.75) if yearp < 0 else math.trunc(365.25 * yearp)\n D = math.trunc(30.6001 * (monthp + 1))\n F, I = math.modf(jd)\n I = int(I)\n A = math.trunc((I - 1867216.25)/36524.25)\n B = (I + 1 + A - math.trunc(A / 4.)) if I > 2299160 else I\n C = B + 1524\n D = math.trunc((C - 122.1) / 365.25)\n E = math.trunc(365.25 * D)\n G = math.trunc((C - E) / 30.6001)"
},
{
"identifier": "CsDate",
"path": "pythaidate/csdate.py",
"snippet": "class CsDate:\n\n def __init__(self, year: int, month: int=None, day: int=None,\n month_style: int = MONTH_SUK):\n logging.debug(\"args year:%s month:%s day:%s, month_style:%s\",\n year, month, day, month_style)\n self.__year = year\n self.__month = month\n self.__day = day # day of month\n self.__days = None # days elapsed in year\n self.__month_style = month_style # Sukothai, Chiang Mai, Keng Tung\n self.__init_ymd()\n self.__calculate()\n logging.debug(\"final y:%s m:%s d:%s days:%s\",\n self.__year, self.__month, self.__day, self.__days)\n\n def __init_ymd(self):\n \"\"\"\n Initialise from year, month and day args.\n \"\"\"\n self.__year0 = self.calculate_year0(self.__year)\n # logging.debug(\"offset_days:%d\", self.__year0.offset_days)\n\n date_offset = None\n if self.__month == 5:\n date_offset = self.__day\n elif self.__month == 6:\n date_offset = 29 + self.__day\n\n MP = MONTH_POSITION_C if self.__year0.cal_type == \"C\" else MONTH_POSITION_AB\n tmonth = MP.index(self.__month)\n if date_offset and date_offset < self.__year0.offset_days:\n # this is a month 5 or 6 date at end of the year\n tmonth += 13 if self.__year0.cal_type == \"C\" else 12\n # shift month number to end of the index in LUNAR_MONTHS[]\n self.__month += 10\n self.__days = MONTH_CUMULATIVE_DAYS[self.__year0.cal_type][tmonth-1] + self.__day - self.__year0.offset_days\n logging.debug(\"ymd: y:%s m:%s d:%s days:%s cal_type:%s tmonth:%s\",\n self.__year, self.__month, self.__day,\n self.__days, self.__year0.cal_type, tmonth)\n\n def __calculate(self):\n # horakhun: The number of elapsed days since epoch plus days since New Year's Day (Thai: หรคุฌ)\n self.__horakhun = (self.__year * DAYS_IN_800_YEARS + EPOCH_OFFSET) // TIME_UNITS_IN_1_DAY + 1 + self.__days\n assert self.julianday > CS_JULIAN_DAY_OFFSET # check for pre-epoch dates\n\n # kammacapon: A quantity that gives the excess of solar days over whole solar days (Thai: กัมมัขผล)\n self.__kammacapon = TIME_UNITS_IN_1_DAY - (self.__year * DAYS_IN_800_YEARS + EPOCH_OFFSET) % TIME_UNITS_IN_1_DAY\n\n # uccapon: The measure of the position of the Moon's apogee. It increases by one unit a day to\n # a maximum of 3232 (Thai: อุจจพล)\n self.__uccapon = (self.__horakhun + UCCAPON_CONSTANT) % APOGEE_ROTATION_DAYS\n\n # avoman: The excess of lunar days over solar days in units of 1/692 of a lunar day modulus 692.\n # It increases by 11 units each solar day. It is used to determine when to add intercalary days\n # in the calendar (Thai: อวมาน)\n self.__avoman = (self.__horakhun * 11 + 650) % 692\n if self.__avoman == 0:\n self.__avoman = 692\n\n # masaken: Number of lunar months since the epoch (Thai: มาสเกฌฑ์)\n avoman_div = ((self.__horakhun + self.days) * 11 + 650) // 692\n self.__masaken = (avoman_div + self.__horakhun) // 30\n\n # tithi: a lunar day, equal to 1/30th of a synodic month (Thai: ดิถี)\n quot = (self.__horakhun * 11 + 650) // 692\n self.__tithi = (quot + self.__horakhun) % 30\n\n # self.avomanExtra = (self.horakhun * 11 + 650) % 692\n logging.debug(\"horakhun:%s kamma:%s quot:%s tt:%s\", self.__horakhun, self.__kammacapon, quot, self.__tithi)\n\n @staticmethod\n def calculate_year0(year: int):\n y = [\n LSYear(year - 2),\n LSYear(year - 1),\n LSYear(year),\n LSYear(year + 1),\n LSYear(year + 2),\n ]\n # logging.debug(\"[0] year0[].caltype:%s\", \"\".join([i.cal_type for i in y]))\n\n for i in (0, 1, 2, 3, 4):\n if y[2].tithi == 24 and y[3].tithi == 6:\n # where tithi of this year is 24 and next year is 6, set all years to C-type\n # adjust next_nyd weekday\n y[i].cal_type = \"C\"\n y[i].next_nyd = (y[i].next_nyd + 2) % 7\n # logging.debug(\"[1] year0[].caltype:%s\", \"\".join([i.cal_type for i in y]))\n\n # Adjust c-type years where a intercalary day and month coincide. This can't happen\n # in the Thai calendar (unlike the Burmese) so we decide if the intercalary day is moved\n # to the previous or next year. This is done by ensuring a correct sequence of weekdays\n # from one year to the next.\n for i in (1, 2, 3):\n if y[i].cal_type == \"c\":\n j = 1 if y[i].nyd == y[i-1].next_nyd else -1\n y[i+j].cal_type = \"B\"\n y[i+j].next_nyd = (y[i+j].next_nyd + 1) % 7\n # logging.debug(\"[2] year0[].caltype:%s\", \"\".join([i.cal_type for i in y]))\n\n for i in (1, 2, 3):\n if y[i-1].next_nyd != y[i].nyd and y[i].next_nyd != y[i+1].nyd:\n y[i].offset = True\n y[i].langsak += 1\n y[i].nyd = (y[i].nyd + 6) % 7\n y[i].next_nyd = (y[i].next_nyd + 6) % 7\n\n # housekeeping - elabal any remaining c-type years as C-type; add day count too\n for i in (0, 1, 2, 3, 4):\n if y[i].cal_type == \"c\":\n y[i].cal_type = \"C\"\n y[i].caldays = CAL_TYPE_DAY_COUNTS[y[i].cal_type]\n # logging.debug(\"[F] year0[].caltype:%s\", \"\".join([i.cal_type for i in y]))\n\n # Determine month/day of new year\n y[2].first_month = \"C\" # as per Eade, C=>Caitra, V=>Vaisakha\n y[2].first_day = y[2].langsak\n y[2].offset_days = y[2].langsak # no.days offset from Caitra 1st\n if y[2].offset_days < (6 + int(y[2].offset)):\n y[2].first_month = \"V\"\n y[2].first_day = y[2].offset_days\n y[2].offset_days += 29\n return y[2]\n\n @staticmethod\n def find_date(cal: str, days: int):\n \"\"\"\n Given a calendar type (A, B, C) and number of days since new years day,\n return the month and day component of a date, derived from lookup tables.\n \"\"\"\n logging.debug(\"cal:%s days:%s\", cal, days)\n vals = {\n \"A\": (\n (383, 16), (354, 15), (324, 12), (295, 11), (265, 10), (236, 9),\n (206, 8), (177, 7), (147, 6), (118, 5), (88, 4), (59, 3), (29, 2),\n ),\n \"B\": (\n (384, 16), (355, 15), (325, 12), (296, 11), (266, 10), (237, 9),\n (207, 8), (178, 7), (148, 6), (119, 5), (89, 4), (59, 3), (29, 2),\n ),\n \"C\": (\n (384, 15), (354, 12), (325, 11), (295, 10), (266, 9), (236, 8),\n (207, 7), (177, 6), (148, 5), (118, 14), (88, 13), (59, 3), (29, 2),\n ),\n }\n assert cal in vals.keys(), ValueError(\"Cal {} not found\".format(cal))\n\n for a, b in vals[cal]:\n if days > a:\n days -= a\n logging.debug(\"solution: (a:%s b:%s) month:%s day:%s\",\n a, b, LUNAR_MONTHS[b], days)\n month = LUNAR_MONTHS[b]\n break\n month = LUNAR_MONTHS[1]\n else:\n logging.debug(\"default: month:%s (%s) day:%s\", 1, LUNAR_MONTHS[1], days)\n return month, days\n\n @classmethod\n def today(cls):\n \"\"\"\n Return today as CS date.\n \"\"\"\n jd = julianday.today()\n logging.debug(\"jd:%s\", jd)\n return cls.fromjulianday(jd)\n\n @classmethod\n def fromyd(cls, year: int, days: int):\n \"\"\"\n Return a Chulasakarat object from a year and days since new years day.\n \"\"\"\n logging.debug(\"start: year:%s days:%s\", year, days)\n year0 = cls.calculate_year0(year)\n days_in_year = 365 + int(year0.leapday)\n while days > days_in_year: # zero-indexed\n year += 1\n days -= days_in_year\n year0 = cls.calculate_year0(year)\n days_in_year = 365 + int(year0.leapday)\n logging.debug(\"days >= %s: year:%s days:%s\", 364 + int(year0.leapday), year, days)\n\n # logging.debug(\"year0 langsak:%s offset_days:%s\", year0.langsak, year0.offset_days)\n month, day = cls.find_date(year0.cal_type, year0.offset_days + days)\n logging.debug(\"year:%s month:%s day:%s\", year, month, day)\n return cls(year, month, day)\n\n @classmethod\n def fromjulianday(cls, jd: int):\n \"\"\"\n Return a Chulasakarat object from a Julian Day Number.\n \"\"\"\n hk = jd - CS_JULIAN_DAY_OFFSET\n year = (hk * 800 - 373) // 292207\n if hk % 292207 == 95333:\n # Every 800 years (292207 days), on the last day of the solar leap\n # year coinciding with an adhkimas lunar year, this jd->year\n # formula will be off by one day pushing the year forward by one\n # and the days count to -1.\n year -= 1\n days = 365\n logging.debug(\"800 year kamma adjustment\")\n else:\n year0 = cls.calculate_year0(year)\n days = hk - year0.horakhun\n # logging.debug(\"kamma:%s\", year0.kammacapon)\n # logging.debug(\"jd:%s year:%s days:%s cal_type:%s hk0:%s\", jd, year, days, year0.cal_type, year0.horakhun)\n logging.debug(\"jd:%s year:%s days:%s\", jd, year, days)\n return cls.fromyd(year=year, days=days)\n\n from_julianday = fromjulianday\n\n @classmethod\n def fromtimestamp(cls, ts):\n \"\"\"\n Return a Chulasakarat object from a UNIX timestamp.\n \"\"\"\n jd = ts // (24 * 60 * 60) + CS_UNIX_EPOCH_OFFSET\n return cls.fromjulianday(jd)\n\n @property\n def julianday(self):\n \"\"\"\n Return the Julian Day Number of this CS date.\n \"\"\"\n return self.__horakhun + CS_JULIAN_DAY_OFFSET\n\n @property\n def horakhun(self):\n return self.__horakhun\n\n @property\n def kammacapon(self):\n return self.__kammacapon\n\n @property\n def masaken(self):\n return self.__masaken\n\n @property\n def uccapon(self):\n return self.__uccapon\n\n @property\n def avoman(self):\n return self.__avoman\n\n @property\n def tithi(self):\n return self.__tithi\n\n @property\n def year(self):\n return self.__year\n\n @property\n def month(self):\n if self.__month == 15 or self.__month == 16:\n return self.__month - 10\n return self.__month\n\n @property\n def month_raw(self):\n return self.__month\n\n @property\n def day(self):\n return self.__day\n\n @property\n def days(self):\n return self.__days\n\n @property\n def solar_leap_year(self):\n return self.__year0.leapday\n\n @property\n def leap_day(self):\n return self.__year0.cal_type == 'B'\n\n @property\n def leap_month(self):\n return self.__year0.cal_type == 'C'\n\n @property\n def days_in_year(self):\n if self.__year0.cal_type == \"A\":\n return 354\n elif self.__year0.cal_type == \"B\":\n return 355\n elif self.__year0.cal_type == \"C\":\n return 384\n\n def replace(self, year=None, month=None, day=None):\n logging.debug(\"year:%s month%s day:%s\", year, month, day)\n y = year if year else self.year\n m = month if month else self.month\n d = day if day else self.day\n logging.debug(\"year:%s month%s day:%s\", y, m, d)\n return CsDate(y, m, d)\n\n def csweekday(self):\n return self.__horakhun % 7\n\n def weekday(self):\n return self.csweekday() - 2\n\n def isoweekday(self):\n return self.csweekday() - 1\n\n @property\n def yearnaksatr(self):\n idx = (self.year + 11) % 12\n if idx == 0:\n idx = 12\n return \"ปี\" + YEAR_NAKSATR[idx]\n\n def csformat(self):\n phase = \"ขึ้น\" if self.day <= 15 else \"แรม\"\n day = self.day if self.day <= 15 else self.day - 15\n s = \"{:s} เดือน {:s} {:s} {:s} ค่ำ {:s} จ.ศ.{:s}\".format(\n WEEKDAYS[self.csweekday()],\n digit_arabic_to_thai(self.month),\n phase,\n digit_arabic_to_thai(day),\n self.yearnaksatr,\n digit_arabic_to_thai(self.year)\n )\n s = digit_arabic_to_thai(s)\n return s\n\n def csformatymd(self):\n \"\"\"\n Return string in YYYY-MM-DD format.\n \"\"\"\n return \"{:4d}-{:02d}-{:02d}\".format(self.year, self.month, self.day)\n\n @classmethod\n def fromcsformat(self, s):\n s = digit_thai_to_arabic(s)\n weekday, _, month, phase, day, _, _, year = s.split()\n year = int(year.replace(\"จ.ศ.\", \"\"))\n month = int(month)\n day = int(day)\n if phase == \"แรม\":\n day += 15\n return CsDate(year, month, day)\n\n def cscalendar(self):\n return CsCalendarDate(self.year, self.month, self.day)\n\n def __str__(self):\n return self.csformat()\n\n def __int__(self):\n \"\"\"Convert to int by returning the Julian Day Number.\"\"\"\n return self.julianday\n\n def _hashable(self):\n return (\n self.__year,\n self.__month,\n self.__day,\n self.__days,\n self.__horakhun,\n self.__kammacapon,\n self.__tithi,\n self.__year0.cal_type,\n )\n\n def __hash__(self): # pragma: no cover\n return hash(self._hashable())\n\n def __lt__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday < other.julianday\n elif isinstance(other, date):\n return self.julianday < julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __le__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday <= other.julianday\n elif isinstance(other, date):\n return self.julianday <= julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __eq__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday == other.julianday\n elif isinstance(other, date):\n return self.julianday == julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __ge__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday >= other.julianday\n elif isinstance(other, date):\n return self.julianday >= julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __gt__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday > other.julianday\n elif isinstance(other, date):\n return self.julianday > julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __add__(self, other):\n if isinstance(other, timedelta):\n return CsDate.fromjulianday(self.julianday + other.days)\n return NotImplemented\n\n __radd__ = __add__\n\n def __sub__(self, other):\n if isinstance(other, timedelta):\n return self + timedelta(-other.days)\n elif hasattr(other, \"julianday\"):\n return timedelta(days=self.julianday - other.julianday)\n elif isinstance(other, date):\n other_jd = julianday.to_julianday(other.year, other.month, other.day)\n return timedelta(days=self.julianday - other_jd)\n return NotImplemented\n\n def debug(self): # pragma: no cover\n return {\n \"cp\": self.__year0,\n \"horakhun\": self.__horakhun,\n \"kamma\": self.__kammacapon,\n # \"avomanExtra\": self.avomanExtra,\n \"tt\": self.__tithi,\n \"year\": self.__year,\n \"month\": self.__month,\n \"day\": self.__day,\n \"days\": self.__days,\n \"cal_type\": self.__year0.cal_type,\n \"month_style\": self.__month_style,\n \"year0.langsak\": self.__year0.langsak,\n \"year0.offset\": self.__year0.offset,\n }"
},
{
"identifier": "PakDate",
"path": "pythaidate/pakdate.py",
"snippet": "class PakDate:\n\n def __init__(self, jd=None, pakcode=None, date=None):\n # assert jd is not None or pakcode is not None or date is not None\n self.__julianday = None\n self.__horakhun = None\n self.__pakkhagen = None\n self.__cycle = None\n self.__data = [0, 0, 0, 0, 0, 0]\n self.__pos = [None, None, None, None, None, None]\n self.__pakabbr = None\n\n if jd:\n self.__convert_julianday(jd)\n\n elif pakcode:\n self.__convert_pakcode(pakcode)\n\n elif date:\n jd = julianday.date_to_julianday(date)\n self.__convert_julianday(jd)\n\n @classmethod\n def today(cls):\n \"\"\"Return today as Pak date.\"\"\"\n return cls(jd=julianday.today())\n\n @classmethod\n def fromjulianday(cls, jd):\n \"\"\"Class method for Julian Day Number conversion.\"\"\"\n return cls(jd=jd)\n\n # @classmethod\n # def frompakcode(cls, pakcode):\n # \"\"\"Return Pak object from format string (x-a:b:c:d:e:f).\"\"\"\n # return cls(pakcode=pakcode)\n\n def __convert_julianday(self, jd):\n \"\"\"Convert from Julian Day Number.\"\"\"\n def div(a, b):\n c = 0\n while True:\n if b >= a:\n return c + 1, a\n a -= b\n c += 1\n\n def _adjust(row, prefix, col):\n logging.debug(\"_adjust_1(%s, %s, %s)\", row, 1-prefix, col-1)\n if col > len(layout[row][1-prefix]):\n rtn = None\n else:\n rtn = layout[row][1-prefix][col-1]\n logging.debug(\"_adjust_1(%s, %s, %s) -> %s\", row, 1-prefix, col-1, rtn)\n return rtn\n\n self.__julianday = jd\n self.__horakhun = jd - PAK_JULIAN_DAY_OFFSET\n if self.__horakhun <= 0:\n raise ValueError(\"Invalid Pakkhakhananaa range.\")\n\n days = self.__horakhun % PAK_DAYS_IN_CYCLE\n if days == 0:\n days = PAK_DAYS_IN_CYCLE\n self.__cycle = math.ceil(self.__horakhun / PAK_DAYS_IN_CYCLE)\n\n # ปักขคณนา row\n self.__data[0], rem = div(days, 16168)\n self.__pos[0] = (0, self.__data[0]-1)\n mahachula = layout[0][0][self.__data[0]-1]\n logging.debug(\"0 data:%s, mc:%s\", self.__data, self.__pos)\n\n # สัมพยุหะ, พยุหะ, สมุหะ, วรรค rows\n for row, divisor in ((1, 1447), (2, 251), (3, 59), (4, 15)):\n self.__data[row], rem = div(rem, divisor)\n mahachula1 = _adjust(row, mahachula, self.__data[row])\n # logging.debug(\"L: row:%s div:%s -> d[r]:%s rem:%s | mc:%s mc1:%s\", row, divisor, self.__data[row], rem, mc, mc1)\n if mahachula1 is None:\n # the row position is too large - decrement it by one and add\n # the divisor back on to rem for the next iteration. Do the\n # adjustment again and it should be correct.\n self.__data[row] -= 1\n rem += divisor\n mahachula1 = _adjust(row, mahachula, self.__data[row])\n self.__pos[row] = (1-mahachula, self.__data[row]-1) # display_pattern[row][self.__mahachula[row]][mc-1]\n # logging.debug(\"L: %s data:%s, mc:%s\", row, self.__data, self.__pos)\n mahachula = mahachula1\n\n # วัน (ค่ำ)\n self.__data[5] = rem\n self.__pos[5] = (mahachula, self.__data[5]-1) # display_pattern[row][self.__mahachula[4]][mc-1]\n logging.debug(\"F: %s %s %s\", self.__cycle, self.__data, self.__pos)\n\n def __convert_pakcode(self, s):\n \"\"\"Convert a Pak string (x-a:b:c:d:e:f) to a state object.\"\"\"\n cyc, pak = s.split(\"-\")\n cyc = int(cyc)\n assert cyc > 0, ValueError(\"Invalid Pak string.\")\n a, b, c, d, e, f = map(int, pak.split(\":\"))\n jd = (e - 1) * 15 + f\n jd = (d - 1) * 59 + jd\n jd = (c - 1) * 251 + jd\n jd = (b - 1) * 1447 + jd\n jd = (a - 1) * 16168 + jd\n jd += (cyc - 1) * PAK_DAYS_IN_CYCLE\n jd += 2355147\n self.__convert_julianday(jd)\n\n @property\n def julianday(self):\n # if self.__julianday is None:\n # self.__julianday = self.horakhun + PAK_JULIAN_DAY_OFFSET\n return self.__julianday\n\n @property\n def horakhun(self):\n \"\"\"\n Days since the Pakkhakhananaa epoch (1736-01-28 A.D., 2279-01-28 B.E.).(Thai: หรคุฌ)\n \"\"\"\n # if self.__horakhun is None:\n # self.__horakhun = (self.__data[0] - 1) * 16168 + \\\n # (self.__data[1] - 1) * 1447 + \\\n # (self.__data[2] - 1) * 251 + \\\n # (self.__data[3] - 1) * 59 + \\\n # (self.__data[4] - 1) * 15 + \\\n # self.__data[5]\n return self.__horakhun\n\n @property\n def pakkhagen(self):\n \"\"\"\n Number of lunar (14/15) day weeks since the epoch. (Thai: ปักขเกณฑ์)\n \"\"\"\n if self.__pakkhagen is None:\n self.__pakkhagen = (self.__cycle - 1) * 19612 + \\\n (self.__data[0] - 1) * 1095 + \\\n (self.__data[1] - 1) * 98 + \\\n (self.__data[2] - 1) * 17 + \\\n (self.__data[3] - 1) * 4 + \\\n self.__data[4]\n return self.__pakkhagen\n\n @property\n def pakcode(self):\n return \"{:d}-{:d}:{:d}:{:d}:{:d}:{:d}:{:d}\".format(self.__cycle, *self.__data)\n\n @property\n def pakabbr(self):\n \"\"\"\n Returns a string in \"เลขใช้บอกปักข์\" format\n \"\"\"\n def _digit1(d):\n return d // 10 if d > 9 else d\n\n def _digit2(d):\n return d % 10 if d > 9 else \" \"\n\n def _ctrans(c):\n return c if c == \" \" else \"กขฅจหฉษฐฬฮ\"[c-1]\n\n def _ntrans(c):\n return c if c == \" \" else \"๐๑๒๓๔๕๖๗๘๙\"[c]\n\n if self.__pakabbr is None:\n s1, s2 = [], []\n for i in range(5):\n v = self.__data[i]\n mahachula, col = self.__pos[i]\n if layout[i][mahachula][col] == 0:\n s1.append(_ctrans(_digit1(v)))\n s2.append(_ctrans(_digit2(v)))\n else:\n s1.append(_ntrans(_digit1(v)))\n s2.append(_ntrans(_digit2(v)))\n self.__pakabbr = \"\".join(s1) + \"\\n\" + \"\".join(s2)\n return self.__pakabbr.rstrip()\n\n @property\n def iswaxing(self):\n return self.pakkhagen % 2 == 0\n\n @property\n def iswaning(self):\n return self.pakkhagen % 2 == 1\n\n @property\n def iswanphra(self):\n f_days = 15 if self.__pos[5][0] else 14\n d = self.__data[5]\n val = d / f_days\n logging.debug(\"f_days:%d d:%d val:%d t1:%s t2:%s t3:%s\",\n f_days, d, val,\n val == 4/7, val == 8/15, val == 1)\n return val == 4/7 or val == 8/15 or val == 1\n\n issabbath = iswanphra\n\n def weekday(self):\n return self.__horakhun % 7 - 1\n\n def isoweekday(self):\n return self.__horakhun % 7\n\n def debug(self):\n return {\n \"pakcode\": self.pakcode,\n \"jd\": self.__julianday,\n \"hk\": self.__horakhun,\n \"pakkhagen\": self.__pakkhagen,\n }\n\n def pakboard(self, fh=None):\n def _display():\n def _stringify(b):\n max_prefix_len = max(map(lambda x: thai_string_width(x[0]), board))\n for r in board:\n numspaces = max_prefix_len - thai_string_width(r[0])\n r[0] = r[0] + \" \" * numspaces\n return max_prefix_len\n\n max_prefix_len = _stringify(board)\n blank = \" \" * (max_prefix_len - 1)\n headings = digit_arabic_to_thai(\" \".join([\"{:>2d}\".format(i) for i in range(1, 19)]))\n print(blank + \" \" + headings, file=fh)\n for i, r in enumerate(board):\n content = []\n for c in r[1:]:\n fmt = \"\\033[;7m{:>2s}\\033[0;0m\" if c & 0x80 else \"{:>2s}\"\n if i < 9:\n c = \"ม\" if c & 0x7F == 1 else \"จ\"\n else:\n c = digit_arabic_to_thai(str(c & 0x7f))\n content.append(fmt.format(c))\n print(\"{:s} {:s}\".format(r[0], \" \".join(content)), file=fh)\n print(\" \".join([\n blank, \" \",\n \"รอบที่\", digit_arabic_to_thai(self.__cycle), \" \",\n \"หรคุณปักขคณนา\", digit_arabic_to_thai(self.horakhun), \" \",\n \"ปักขเกณฑ์\", digit_arabic_to_thai(self.pakkhagen)\n ]), file=fh)\n\n if fh is None:\n fh = sys.stdout\n\n # setup board\n board = [\n [\"ปักขคณนา\", *layout[0][0]],\n [\"มหาสัมพยุหะ\", *layout[1][0]],\n [\"จุลสัมพยุหะ\", *layout[1][1]],\n [\"มหาพยุหะ\", *layout[2][0]],\n [\"จุลพยุหะ\", *layout[2][1]],\n [\"มหาสมุหะ\", *layout[3][0]],\n [\"จุลสมุหะ\", *layout[3][1]],\n [\"มหาวรรค\", *layout[4][0]],\n [\"จุลวรรค\", *layout[4][1]],\n [\"มหาปักข์\", *list(range(1,16))],\n [\"จุลปักข์\", *list(range(1,15))],\n ]\n\n # highlight row items\n for i in range(6):\n mahachula, col = self.__pos[i]\n row = 0 if i == 0 else i * 2 - 1 + mahachula\n board[row][col+1] += 0x80 # Set MSB to 1 as a \"selected\" flag\n _display()\n\n def __str__(self):\n # มหาสัมพยุหะ 6 จุลพยุหะ 5 จุลสมุหะ 6 จุลวรรค 2 จุลปักข์ 4 ขึ้น 3 ค่ำ (ปักข์ขาด / ปักข์ถ้วน)\n output = []\n next_row = 0\n for i, label in enumerate((\"สัมพยุหะ\", \"พยุหะ\", \"สมุหะ\", \"วรรค\", \"ปักข์\")):\n val = layout[i][next_row][self.__data[i]-1]\n output += [(\"มหา\" if val else \"จุล\") + label, str(self.__data[i])]\n next_row = 1 - val\n output += [\"ขึ้น\" if self.iswaxing else \"แรม\",\n str(self.__data[5]),\n \"ค่ำ\",\n \"(\" + (\"ปักข์ขาด\" if next_row else \"ปักข์ถ้วน\") + \")\"]\n return digit_arabic_to_thai(\" \".join(output))\n\n def __lt__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday < other.julianday\n elif isinstance(other, date):\n return self.julianday < julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __le__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday <= other.julianday\n elif isinstance(other, date):\n return self.julianday <= julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __eq__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday == other.julianday\n elif isinstance(other, date):\n return self.julianday == julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __ge__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday >= other.julianday\n elif isinstance(other, date):\n return self.julianday >= julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __gt__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday > other.julianday\n elif isinstance(other, date):\n return self.julianday > julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __add__(self, other):\n if isinstance(other, timedelta):\n return PakDate.fromjulianday(self.julianday + other.days)\n return NotImplemented\n\n __radd__ = __add__\n\n def __sub__(self, other):\n if isinstance(other, timedelta):\n return self + timedelta(-other.days)\n elif hasattr(other, \"julianday\"):\n return timedelta(days=self.julianday - other.julianday)\n elif isinstance(other, date):\n other_jd = julianday.to_julianday(other.year, other.month, other.day)\n return timedelta(days=self.julianday - other_jd)\n return NotImplemented\n\n def debug_reset(self): # pragma: no cover\n self.__horakhun = None\n self.__julianday = None\n self.__pakkhagen = None"
},
{
"identifier": "PAK_JULIAN_DAY_OFFSET",
"path": "pythaidate/constants.py",
"snippet": "PAK_JULIAN_DAY_OFFSET = 2355147"
}
] | from datetime import date, timedelta
from pythaidate import PakDate, CsDate, julianday
from pythaidate.constants import PAK_JULIAN_DAY_OFFSET
import json
import unittest
import os
import pathlib
import random
import logging | 9,969 |
RUN_PERCENT = 10
if os.environ.get("RUN_PERCENT"):
RUN_PERCENT = int(os.environ.get("RUN_PERCENT"))
if RUN_PERCENT > 100:
RUN_PERCENT = 100
RUN_PERCENT /= 100
for datafile in ("pak.data", "pak.min.data"):
datafile = os.path.join(pathlib.Path(__file__).parent.resolve(), "data", datafile)
if os.path.exists(datafile):
break
else:
raise FileNotFoundError("Pak data file not found.")
random.seed()
def read_test_date(sample=1, minjd=None):
with open(datafile) as fh:
for ln in fh:
if random.random() > sample:
continue
i = ln.rstrip().split(" ")
y, m, d = i[4].split("-")
e = {
"pakcode": i[0],
"jd": int(i[1][3:]),
"hk": int(i[2][3:]),
"masak": int(i[3][6:]),
"year": int(y),
"month": int(m),
"day": int(d),
"iswanphra": i[5] == 't',
}
if minjd and e["jd"] < minjd:
continue
yield e
class Test_PakDate(unittest.TestCase):
def test_jd_pre_epoch(self):
with self.assertRaises(ValueError):
# pre-epoch jd
|
RUN_PERCENT = 10
if os.environ.get("RUN_PERCENT"):
RUN_PERCENT = int(os.environ.get("RUN_PERCENT"))
if RUN_PERCENT > 100:
RUN_PERCENT = 100
RUN_PERCENT /= 100
for datafile in ("pak.data", "pak.min.data"):
datafile = os.path.join(pathlib.Path(__file__).parent.resolve(), "data", datafile)
if os.path.exists(datafile):
break
else:
raise FileNotFoundError("Pak data file not found.")
random.seed()
def read_test_date(sample=1, minjd=None):
with open(datafile) as fh:
for ln in fh:
if random.random() > sample:
continue
i = ln.rstrip().split(" ")
y, m, d = i[4].split("-")
e = {
"pakcode": i[0],
"jd": int(i[1][3:]),
"hk": int(i[2][3:]),
"masak": int(i[3][6:]),
"year": int(y),
"month": int(m),
"day": int(d),
"iswanphra": i[5] == 't',
}
if minjd and e["jd"] < minjd:
continue
yield e
class Test_PakDate(unittest.TestCase):
def test_jd_pre_epoch(self):
with self.assertRaises(ValueError):
# pre-epoch jd | p = PakDate(jd=PAK_JULIAN_DAY_OFFSET - 5) | 3 | 2023-11-18 21:14:01+00:00 | 12k |
CmosWolf1/Code_implementation_for_paper_SKZC | diffusiondet/detector.py | [
{
"identifier": "SetCriterionDynamicK",
"path": "diffusiondet/loss.py",
"snippet": "class SetCriterionDynamicK(nn.Module):\n \"\"\" This class computes the loss for DiffusionDet.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n \"\"\"\n def __init__(self, cfg, num_classes, matcher, weight_dict, eos_coef, losses, use_focal):\n \"\"\" Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n eos_coef: relative classification weight applied to the no-object category\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n \"\"\"\n super().__init__()\n self.cfg = cfg\n self.num_classes = num_classes\n self.matcher = matcher\n self.weight_dict = weight_dict\n self.eos_coef = eos_coef\n self.losses = losses\n self.use_focal = use_focal\n self.use_fed_loss = cfg.MODEL.DiffusionDet.USE_FED_LOSS\n if self.use_fed_loss:\n self.fed_loss_num_classes = 50\n from detectron2.data.detection_utils import get_fed_loss_cls_weights\n cls_weight_fun = lambda: get_fed_loss_cls_weights(dataset_names=cfg.DATASETS.TRAIN, freq_weight_power=cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER) # noqa\n fed_loss_cls_weights = cls_weight_fun()\n assert (\n len(fed_loss_cls_weights) == self.num_classes\n ), \"Please check the provided fed_loss_cls_weights. Their size should match num_classes\"\n self.register_buffer(\"fed_loss_cls_weights\", fed_loss_cls_weights)\n\n if self.use_focal:\n self.focal_loss_alpha = cfg.MODEL.DiffusionDet.ALPHA\n self.focal_loss_gamma = cfg.MODEL.DiffusionDet.GAMMA\n else:\n empty_weight = torch.ones(self.num_classes + 1)\n empty_weight[-1] = self.eos_coef\n self.register_buffer('empty_weight', empty_weight)\n\n # copy-paste from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/roi_heads/fast_rcnn.py#L356\n def get_fed_loss_classes(self, gt_classes, num_fed_loss_classes, num_classes, weight):\n \"\"\"\n Args:\n gt_classes: a long tensor of shape R that contains the gt class label of each proposal.\n num_fed_loss_classes: minimum number of classes to keep when calculating federated loss.\n Will sample negative classes if number of unique gt_classes is smaller than this value.\n num_classes: number of foreground classes\n weight: probabilities used to sample negative classes\n Returns:\n Tensor:\n classes to keep when calculating the federated loss, including both unique gt\n classes and sampled negative classes.\n \"\"\"\n unique_gt_classes = torch.unique(gt_classes)\n prob = unique_gt_classes.new_ones(num_classes + 1).float()\n prob[-1] = 0\n if len(unique_gt_classes) < num_fed_loss_classes:\n prob[:num_classes] = weight.float().clone()\n prob[unique_gt_classes] = 0\n sampled_negative_classes = torch.multinomial(\n prob, num_fed_loss_classes - len(unique_gt_classes), replacement=False\n )\n fed_loss_classes = torch.cat([unique_gt_classes, sampled_negative_classes])\n else:\n fed_loss_classes = unique_gt_classes\n return fed_loss_classes\n\n def loss_labels(self, outputs, targets, indices, num_boxes, log=False):\n \"\"\"Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert 'pred_logits' in outputs\n src_logits = outputs['pred_logits']\n batch_size = len(targets)\n\n # idx = self._get_src_permutation_idx(indices)\n # target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(src_logits.shape[:2], self.num_classes,\n dtype=torch.int64, device=src_logits.device)\n src_logits_list = []\n target_classes_o_list = []\n # target_classes[idx] = target_classes_o\n for batch_idx in range(batch_size):\n valid_query = indices[batch_idx][0]\n gt_multi_idx = indices[batch_idx][1]\n if len(gt_multi_idx) == 0:\n continue\n bz_src_logits = src_logits[batch_idx]\n target_classes_o = targets[batch_idx][\"labels\"]\n target_classes[batch_idx, valid_query] = target_classes_o[gt_multi_idx]\n\n src_logits_list.append(bz_src_logits[valid_query])\n target_classes_o_list.append(target_classes_o[gt_multi_idx])\n\n if self.use_focal or self.use_fed_loss:\n num_boxes = torch.cat(target_classes_o_list).shape[0] if len(target_classes_o_list) != 0 else 1\n\n target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], self.num_classes + 1],\n dtype=src_logits.dtype, layout=src_logits.layout,\n device=src_logits.device)\n target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)\n\n gt_classes = torch.argmax(target_classes_onehot, dim=-1)\n target_classes_onehot = target_classes_onehot[:, :, :-1]\n\n src_logits = src_logits.flatten(0, 1)\n target_classes_onehot = target_classes_onehot.flatten(0, 1)\n if self.use_focal:\n cls_loss = sigmoid_focal_loss_jit(src_logits, target_classes_onehot, alpha=self.focal_loss_alpha, gamma=self.focal_loss_gamma, reduction=\"none\")\n else:\n cls_loss = F.binary_cross_entropy_with_logits(src_logits, target_classes_onehot, reduction=\"none\")\n if self.use_fed_loss:\n K = self.num_classes\n N = src_logits.shape[0]\n fed_loss_classes = self.get_fed_loss_classes(\n gt_classes,\n num_fed_loss_classes=self.fed_loss_num_classes,\n num_classes=K,\n weight=self.fed_loss_cls_weights,\n )\n fed_loss_classes_mask = fed_loss_classes.new_zeros(K + 1)\n fed_loss_classes_mask[fed_loss_classes] = 1\n fed_loss_classes_mask = fed_loss_classes_mask[:K]\n weight = fed_loss_classes_mask.view(1, K).expand(N, K).float()\n\n loss_ce = torch.sum(cls_loss * weight) / num_boxes\n else:\n loss_ce = torch.sum(cls_loss) / num_boxes\n\n losses = {'loss_ce': loss_ce}\n else:\n raise NotImplementedError\n\n return losses\n\n def loss_boxes(self, outputs, targets, indices, num_boxes):\n \"\"\"Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss\n targets dicts must contain the key \"boxes\" containing a tensor of dim [nb_target_boxes, 4]\n The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.\n \"\"\"\n assert 'pred_boxes' in outputs\n # idx = self._get_src_permutation_idx(indices)\n src_boxes = outputs['pred_boxes']\n\n batch_size = len(targets)\n pred_box_list = []\n pred_norm_box_list = []\n tgt_box_list = []\n tgt_box_xyxy_list = []\n for batch_idx in range(batch_size):\n valid_query = indices[batch_idx][0]\n gt_multi_idx = indices[batch_idx][1]\n if len(gt_multi_idx) == 0:\n continue\n bz_image_whwh = targets[batch_idx]['image_size_xyxy']\n bz_src_boxes = src_boxes[batch_idx]\n bz_target_boxes = targets[batch_idx][\"boxes\"] # normalized (cx, cy, w, h)\n bz_target_boxes_xyxy = targets[batch_idx][\"boxes_xyxy\"] # absolute (x1, y1, x2, y2)\n pred_box_list.append(bz_src_boxes[valid_query])\n pred_norm_box_list.append(bz_src_boxes[valid_query] / bz_image_whwh) # normalize (x1, y1, x2, y2)\n tgt_box_list.append(bz_target_boxes[gt_multi_idx])\n tgt_box_xyxy_list.append(bz_target_boxes_xyxy[gt_multi_idx])\n\n if len(pred_box_list) != 0:\n src_boxes = torch.cat(pred_box_list)\n src_boxes_norm = torch.cat(pred_norm_box_list) # normalized (x1, y1, x2, y2)\n target_boxes = torch.cat(tgt_box_list)\n target_boxes_abs_xyxy = torch.cat(tgt_box_xyxy_list)\n num_boxes = src_boxes.shape[0]\n\n losses = {}\n # require normalized (x1, y1, x2, y2)\n loss_bbox = F.l1_loss(src_boxes_norm, box_cxcywh_to_xyxy(target_boxes), reduction='none')\n losses['loss_bbox'] = loss_bbox.sum() / num_boxes\n\n # loss_giou = giou_loss(box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))\n loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(src_boxes, target_boxes_abs_xyxy))\n losses['loss_giou'] = loss_giou.sum() / num_boxes\n else:\n losses = {'loss_bbox': outputs['pred_boxes'].sum() * 0,\n 'loss_giou': outputs['pred_boxes'].sum() * 0}\n\n return losses\n\n def _get_src_permutation_idx(self, indices):\n # permute predictions following indices\n batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n src_idx = torch.cat([src for (src, _) in indices])\n return batch_idx, src_idx\n\n def _get_tgt_permutation_idx(self, indices):\n # permute targets following indices\n batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n return batch_idx, tgt_idx\n\n def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):\n loss_map = {\n 'labels': self.loss_labels,\n 'boxes': self.loss_boxes,\n }\n assert loss in loss_map, f'do you really want to compute {loss} loss?'\n return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)\n\n def forward(self, outputs, targets):\n \"\"\" This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n \"\"\"\n outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices, _ = self.matcher(outputs_without_aux, targets)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if 'aux_outputs' in outputs:\n for i, aux_outputs in enumerate(outputs['aux_outputs']):\n indices, _ = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n if loss == 'masks':\n # Intermediate masks losses are too costly to compute, we ignore them.\n continue\n kwargs = {}\n if loss == 'labels':\n # Logging is enabled only for the last layer\n kwargs = {'log': False}\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n l_dict = {k + f'_{i}': v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses"
},
{
"identifier": "HungarianMatcherDynamicK",
"path": "diffusiondet/loss.py",
"snippet": "class HungarianMatcherDynamicK(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-k (dynamic) matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n def __init__(self, cfg, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1, cost_mask: float = 1, use_focal: bool = False):\n \"\"\"Creates the matcher\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost\n cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n self.use_focal = use_focal\n self.use_fed_loss = cfg.MODEL.DiffusionDet.USE_FED_LOSS\n self.ota_k = cfg.MODEL.DiffusionDet.OTA_K\n if self.use_focal:\n self.focal_loss_alpha = cfg.MODEL.DiffusionDet.ALPHA\n self.focal_loss_gamma = cfg.MODEL.DiffusionDet.GAMMA\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n\n def forward(self, outputs, targets):\n \"\"\" simOTA for detr\"\"\"\n with torch.no_grad():\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n # We flatten to compute the cost matrices in a batch\n if self.use_focal or self.use_fed_loss:\n out_prob = outputs[\"pred_logits\"].sigmoid() # [batch_size, num_queries, num_classes]\n out_bbox = outputs[\"pred_boxes\"] # [batch_size, num_queries, 4]\n else:\n out_prob = outputs[\"pred_logits\"].softmax(-1) # [batch_size, num_queries, num_classes]\n out_bbox = outputs[\"pred_boxes\"] # [batch_size, num_queries, 4]\n\n indices = []\n matched_ids = []\n assert bs == len(targets)\n for batch_idx in range(bs):\n bz_boxes = out_bbox[batch_idx] # [num_proposals, 4]\n bz_out_prob = out_prob[batch_idx]\n bz_tgt_ids = targets[batch_idx][\"labels\"]\n num_insts = len(bz_tgt_ids)\n if num_insts == 0: # empty object in key frame\n non_valid = torch.zeros(bz_out_prob.shape[0]).to(bz_out_prob) > 0\n indices_batchi = (non_valid, torch.arange(0, 0).to(bz_out_prob))\n matched_qidx = torch.arange(0, 0).to(bz_out_prob)\n indices.append(indices_batchi)\n matched_ids.append(matched_qidx)\n continue\n\n bz_gtboxs = targets[batch_idx]['boxes'] # [num_gt, 4] normalized (cx, xy, w, h)\n bz_gtboxs_abs_xyxy = targets[batch_idx]['boxes_xyxy']\n fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(\n box_xyxy_to_cxcywh(bz_boxes), # absolute (cx, cy, w, h)\n box_xyxy_to_cxcywh(bz_gtboxs_abs_xyxy), # absolute (cx, cy, w, h)\n expanded_strides=32\n )\n\n pair_wise_ious = ops.box_iou(bz_boxes, bz_gtboxs_abs_xyxy)\n\n # Compute the classification cost.\n if self.use_focal:\n alpha = self.focal_loss_alpha\n gamma = self.focal_loss_gamma\n neg_cost_class = (1 - alpha) * (bz_out_prob ** gamma) * (-(1 - bz_out_prob + 1e-8).log())\n pos_cost_class = alpha * ((1 - bz_out_prob) ** gamma) * (-(bz_out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, bz_tgt_ids] - neg_cost_class[:, bz_tgt_ids]\n elif self.use_fed_loss:\n # focal loss degenerates to naive one\n neg_cost_class = (-(1 - bz_out_prob + 1e-8).log())\n pos_cost_class = (-(bz_out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, bz_tgt_ids] - neg_cost_class[:, bz_tgt_ids]\n else:\n cost_class = -bz_out_prob[:, bz_tgt_ids]\n\n # Compute the L1 cost between boxes\n # image_size_out = torch.cat([v[\"image_size_xyxy\"].unsqueeze(0) for v in targets])\n # image_size_out = image_size_out.unsqueeze(1).repeat(1, num_queries, 1).flatten(0, 1)\n # image_size_tgt = torch.cat([v[\"image_size_xyxy_tgt\"] for v in targets])\n\n bz_image_size_out = targets[batch_idx]['image_size_xyxy']\n bz_image_size_tgt = targets[batch_idx]['image_size_xyxy_tgt']\n\n bz_out_bbox_ = bz_boxes / bz_image_size_out # normalize (x1, y1, x2, y2)\n bz_tgt_bbox_ = bz_gtboxs_abs_xyxy / bz_image_size_tgt # normalize (x1, y1, x2, y2)\n cost_bbox = torch.cdist(bz_out_bbox_, bz_tgt_bbox_, p=1)\n\n cost_giou = -generalized_box_iou(bz_boxes, bz_gtboxs_abs_xyxy)\n\n # Final cost matrix\n cost = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou + 100.0 * (~is_in_boxes_and_center)\n # cost = (cost_class + 3.0 * cost_giou + 100.0 * (~is_in_boxes_and_center)) # [num_query,num_gt]\n cost[~fg_mask] = cost[~fg_mask] + 10000.0\n\n # if bz_gtboxs.shape[0]>0:\n indices_batchi, matched_qidx = self.dynamic_k_matching(cost, pair_wise_ious, bz_gtboxs.shape[0])\n\n indices.append(indices_batchi)\n matched_ids.append(matched_qidx)\n\n return indices, matched_ids\n\n def get_in_boxes_info(self, boxes, target_gts, expanded_strides):\n xy_target_gts = box_cxcywh_to_xyxy(target_gts) # (x1, y1, x2, y2)\n\n anchor_center_x = boxes[:, 0].unsqueeze(1)\n anchor_center_y = boxes[:, 1].unsqueeze(1)\n\n # whether the center of each anchor is inside a gt box\n b_l = anchor_center_x > xy_target_gts[:, 0].unsqueeze(0)\n b_r = anchor_center_x < xy_target_gts[:, 2].unsqueeze(0)\n b_t = anchor_center_y > xy_target_gts[:, 1].unsqueeze(0)\n b_b = anchor_center_y < xy_target_gts[:, 3].unsqueeze(0)\n # (b_l.long()+b_r.long()+b_t.long()+b_b.long())==4 [300,num_gt] ,\n is_in_boxes = ((b_l.long() + b_r.long() + b_t.long() + b_b.long()) == 4)\n is_in_boxes_all = is_in_boxes.sum(1) > 0 # [num_query]\n # in fixed center\n center_radius = 2.5\n # Modified to self-adapted sampling --- the center size depends on the size of the gt boxes\n # https://github.com/dulucas/UVO_Challenge/blob/main/Track1/detection/mmdet/core/bbox/assigners/rpn_sim_ota_assigner.py#L212\n b_l = anchor_center_x > (target_gts[:, 0] - (center_radius * (xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0)\n b_r = anchor_center_x < (target_gts[:, 0] + (center_radius * (xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0)\n b_t = anchor_center_y > (target_gts[:, 1] - (center_radius * (xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0)\n b_b = anchor_center_y < (target_gts[:, 1] + (center_radius * (xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0)\n\n is_in_centers = ((b_l.long() + b_r.long() + b_t.long() + b_b.long()) == 4)\n is_in_centers_all = is_in_centers.sum(1) > 0\n\n is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all\n is_in_boxes_and_center = (is_in_boxes & is_in_centers)\n\n return is_in_boxes_anchor, is_in_boxes_and_center\n\n def dynamic_k_matching(self, cost, pair_wise_ious, num_gt):\n matching_matrix = torch.zeros_like(cost) # [300,num_gt]\n ious_in_boxes_matrix = pair_wise_ious\n n_candidate_k = self.ota_k\n\n # Take the sum of the predicted value and the top 10 iou of gt with the largest iou as dynamic_k\n topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=0)\n dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1)\n\n for gt_idx in range(num_gt):\n _, pos_idx = torch.topk(cost[:, gt_idx], k=dynamic_ks[gt_idx].item(), largest=False)\n matching_matrix[:, gt_idx][pos_idx] = 1.0\n\n del topk_ious, dynamic_ks, pos_idx\n\n anchor_matching_gt = matching_matrix.sum(1)\n\n if (anchor_matching_gt > 1).sum() > 0:\n _, cost_argmin = torch.min(cost[anchor_matching_gt > 1], dim=1)\n matching_matrix[anchor_matching_gt > 1] *= 0\n matching_matrix[anchor_matching_gt > 1, cost_argmin,] = 1\n\n while (matching_matrix.sum(0) == 0).any():\n num_zero_gt = (matching_matrix.sum(0) == 0).sum()\n matched_query_id = matching_matrix.sum(1) > 0\n cost[matched_query_id] += 100000.0\n unmatch_id = torch.nonzero(matching_matrix.sum(0) == 0, as_tuple=False).squeeze(1)\n for gt_idx in unmatch_id:\n pos_idx = torch.argmin(cost[:, gt_idx])\n matching_matrix[:, gt_idx][pos_idx] = 1.0\n if (matching_matrix.sum(1) > 1).sum() > 0: # If a query matches more than one gt\n _, cost_argmin = torch.min(cost[anchor_matching_gt > 1],\n dim=1) # find gt for these queries with minimal cost\n matching_matrix[anchor_matching_gt > 1] *= 0 # reset mapping relationship\n matching_matrix[anchor_matching_gt > 1, cost_argmin,] = 1 # keep gt with minimal cost\n\n assert not (matching_matrix.sum(0) == 0).any()\n selected_query = matching_matrix.sum(1) > 0\n gt_indices = matching_matrix[selected_query].max(1)[1]\n assert selected_query.sum() == len(gt_indices)\n\n cost[matching_matrix == 0] = cost[matching_matrix == 0] + float('inf')\n matched_query_id = torch.min(cost, dim=0)[1]\n\n return (selected_query, gt_indices), matched_query_id"
},
{
"identifier": "DynamicHead",
"path": "diffusiondet/head.py",
"snippet": "class DynamicHead(nn.Module):\n\n def __init__(self, cfg, roi_input_shape):\n super().__init__()\n\n # Build RoI.\n box_pooler = self._init_box_pooler(cfg, roi_input_shape)\n self.box_pooler = box_pooler\n \n # Build heads.\n num_classes = cfg.MODEL.DiffusionDet.NUM_CLASSES\n d_model = cfg.MODEL.DiffusionDet.HIDDEN_DIM\n dim_feedforward = cfg.MODEL.DiffusionDet.DIM_FEEDFORWARD\n nhead = cfg.MODEL.DiffusionDet.NHEADS\n dropout = cfg.MODEL.DiffusionDet.DROPOUT\n activation = cfg.MODEL.DiffusionDet.ACTIVATION\n num_heads = cfg.MODEL.DiffusionDet.NUM_HEADS\n rcnn_head = RCNNHead(cfg, d_model, num_classes, dim_feedforward, nhead, dropout, activation)\n self.head_series = _get_clones(rcnn_head, num_heads)\n self.num_heads = num_heads\n self.return_intermediate = cfg.MODEL.DiffusionDet.DEEP_SUPERVISION\n\n # Gaussian random feature embedding layer for time\n self.d_model = d_model\n time_dim = d_model * 4\n self.time_mlp = nn.Sequential(\n SinusoidalPositionEmbeddings(d_model),\n nn.Linear(d_model, time_dim),\n nn.GELU(),\n nn.Linear(time_dim, time_dim),\n )\n\n # Init parameters.\n self.use_focal = cfg.MODEL.DiffusionDet.USE_FOCAL\n self.use_fed_loss = cfg.MODEL.DiffusionDet.USE_FED_LOSS\n self.num_classes = num_classes\n if self.use_focal or self.use_fed_loss:\n prior_prob = cfg.MODEL.DiffusionDet.PRIOR_PROB\n self.bias_value = -math.log((1 - prior_prob) / prior_prob)\n self._reset_parameters()\n\n def _reset_parameters(self):\n # init all parameters.\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n # initialize the bias for focal loss and fed loss.\n if self.use_focal or self.use_fed_loss:\n if p.shape[-1] == self.num_classes or p.shape[-1] == self.num_classes + 1:\n nn.init.constant_(p, self.bias_value)\n\n @staticmethod\n def _init_box_pooler(cfg, input_shape):\n\n in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES\n pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION\n pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)\n sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO\n pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE\n\n # If StandardROIHeads is applied on multiple feature maps (as in FPN),\n # then we share the same predictors and therefore the channel counts must be the same\n in_channels = [input_shape[f].channels for f in in_features]\n # Check all channel counts are equal\n assert len(set(in_channels)) == 1, in_channels\n\n box_pooler = ROIPooler(\n output_size=pooler_resolution,\n scales=pooler_scales,\n sampling_ratio=sampling_ratio,\n pooler_type=pooler_type,\n )\n return box_pooler\n\n def forward(self, features, init_bboxes, t, init_features):\n # assert t shape (batch_size)\n time = self.time_mlp(t)\n\n inter_class_logits = []\n inter_pred_bboxes = []\n\n bs = len(features[0])\n bboxes = init_bboxes\n num_boxes = bboxes.shape[1]\n\n if init_features is not None:\n init_features = init_features[None].repeat(1, bs, 1)\n proposal_features = init_features.clone()\n else:\n proposal_features = None\n \n for head_idx, rcnn_head in enumerate(self.head_series):\n class_logits, pred_bboxes, proposal_features = rcnn_head(features, bboxes, proposal_features, self.box_pooler, time)\n if self.return_intermediate:\n inter_class_logits.append(class_logits)\n inter_pred_bboxes.append(pred_bboxes)\n bboxes = pred_bboxes.detach()\n\n if self.return_intermediate:\n return torch.stack(inter_class_logits), torch.stack(inter_pred_bboxes)\n\n return class_logits[None], pred_bboxes[None]"
},
{
"identifier": "box_cxcywh_to_xyxy",
"path": "diffusiondet/util/box_ops.py",
"snippet": "def box_cxcywh_to_xyxy(x):\n x_c, y_c, w, h = x.unbind(-1)\n b = [(x_c - 0.5 * w), (y_c - 0.5 * h),\n (x_c + 0.5 * w), (y_c + 0.5 * h)]\n return torch.stack(b, dim=-1)"
},
{
"identifier": "box_xyxy_to_cxcywh",
"path": "diffusiondet/util/box_ops.py",
"snippet": "def box_xyxy_to_cxcywh(x):\n x0, y0, x1, y1 = x.unbind(-1)\n b = [(x0 + x1) / 2, (y0 + y1) / 2,\n (x1 - x0), (y1 - y0)]\n return torch.stack(b, dim=-1)"
},
{
"identifier": "nested_tensor_from_tensor_list",
"path": "diffusiondet/util/misc.py",
"snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)"
}
] | import math
import random
import torch
import torch.nn.functional as F
from typing import List
from collections import namedtuple
from torch import nn
from detectron2.layers import batched_nms
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, detector_postprocess
from detectron2.structures import Boxes, ImageList, Instances
from .loss import SetCriterionDynamicK, HungarianMatcherDynamicK
from .head import DynamicHead
from .util.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh
from .util.misc import nested_tensor_from_tensor_list | 9,358 | """
steps = timesteps + 1
x = torch.linspace(0, timesteps, steps, dtype=torch.float64)
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * math.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
@META_ARCH_REGISTRY.register()
class DiffusionDet(nn.Module):
"""
Implement DiffusionDet
"""
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
self.num_classes = cfg.MODEL.DiffusionDet.NUM_CLASSES
self.num_proposals = cfg.MODEL.DiffusionDet.NUM_PROPOSALS
self.hidden_dim = cfg.MODEL.DiffusionDet.HIDDEN_DIM
self.num_heads = cfg.MODEL.DiffusionDet.NUM_HEADS
# Build Backbone.
self.backbone = build_backbone(cfg)
self.size_divisibility = self.backbone.size_divisibility
# build diffusion
timesteps = 1000
sampling_timesteps = cfg.MODEL.DiffusionDet.SAMPLE_STEP
self.objective = 'pred_x0'
betas = cosine_beta_schedule(timesteps)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.)
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.sampling_timesteps = default(sampling_timesteps, timesteps)
assert self.sampling_timesteps <= timesteps
self.is_ddim_sampling = self.sampling_timesteps < timesteps
self.ddim_sampling_eta = 1.
self.self_condition = False
self.scale = cfg.MODEL.DiffusionDet.SNR_SCALE
self.box_renewal = True
self.use_ensemble = True
self.register_buffer('betas', betas)
self.register_buffer('alphas_cumprod', alphas_cumprod)
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
self.register_buffer('posterior_variance', posterior_variance)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min=1e-20)))
self.register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
self.register_buffer('posterior_mean_coef2',
(1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
# Build Dynamic Head.
self.head = DynamicHead(cfg=cfg, roi_input_shape=self.backbone.output_shape())
# Loss parameters:
class_weight = cfg.MODEL.DiffusionDet.CLASS_WEIGHT
giou_weight = cfg.MODEL.DiffusionDet.GIOU_WEIGHT
l1_weight = cfg.MODEL.DiffusionDet.L1_WEIGHT
no_object_weight = cfg.MODEL.DiffusionDet.NO_OBJECT_WEIGHT
self.deep_supervision = cfg.MODEL.DiffusionDet.DEEP_SUPERVISION
self.use_focal = cfg.MODEL.DiffusionDet.USE_FOCAL
self.use_fed_loss = cfg.MODEL.DiffusionDet.USE_FED_LOSS
self.use_nms = cfg.MODEL.DiffusionDet.USE_NMS
# Build Criterion.
matcher = HungarianMatcherDynamicK(
cfg=cfg, cost_class=class_weight, cost_bbox=l1_weight, cost_giou=giou_weight, use_focal=self.use_focal
)
weight_dict = {"loss_ce": class_weight, "loss_bbox": l1_weight, "loss_giou": giou_weight}
if self.deep_supervision:
aux_weight_dict = {}
for i in range(self.num_heads - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "boxes"]
self.criterion = SetCriterionDynamicK(
cfg=cfg, num_classes=self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight,
losses=losses, use_focal=self.use_focal,)
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.to(self.device)
def predict_noise_from_start(self, x_t, t, x0):
return (
(extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) /
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
)
def model_predictions(self, backbone_feats, images_whwh, x, t, x_self_cond=None, clip_x_start=False):
x_boxes = torch.clamp(x, min=-1 * self.scale, max=self.scale)
x_boxes = ((x_boxes / self.scale) + 1) / 2
| # ========================================
# Modified by Shoufa Chen
# ========================================
# Modified by Peize Sun, Rufeng Zhang
# Contact: {sunpeize, cxrfzhang}@foxmail.com
#
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
__all__ = ["DiffusionDet"]
ModelPrediction = namedtuple('ModelPrediction', ['pred_noise', 'pred_x_start'])
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def extract(a, t, x_shape):
"""extract the appropriate t index for a batch of indices"""
batch_size = t.shape[0]
out = a.gather(-1, t)
return out.reshape(batch_size, *((1,) * (len(x_shape) - 1)))
def cosine_beta_schedule(timesteps, s=0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = torch.linspace(0, timesteps, steps, dtype=torch.float64)
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * math.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
@META_ARCH_REGISTRY.register()
class DiffusionDet(nn.Module):
"""
Implement DiffusionDet
"""
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
self.num_classes = cfg.MODEL.DiffusionDet.NUM_CLASSES
self.num_proposals = cfg.MODEL.DiffusionDet.NUM_PROPOSALS
self.hidden_dim = cfg.MODEL.DiffusionDet.HIDDEN_DIM
self.num_heads = cfg.MODEL.DiffusionDet.NUM_HEADS
# Build Backbone.
self.backbone = build_backbone(cfg)
self.size_divisibility = self.backbone.size_divisibility
# build diffusion
timesteps = 1000
sampling_timesteps = cfg.MODEL.DiffusionDet.SAMPLE_STEP
self.objective = 'pred_x0'
betas = cosine_beta_schedule(timesteps)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.)
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.sampling_timesteps = default(sampling_timesteps, timesteps)
assert self.sampling_timesteps <= timesteps
self.is_ddim_sampling = self.sampling_timesteps < timesteps
self.ddim_sampling_eta = 1.
self.self_condition = False
self.scale = cfg.MODEL.DiffusionDet.SNR_SCALE
self.box_renewal = True
self.use_ensemble = True
self.register_buffer('betas', betas)
self.register_buffer('alphas_cumprod', alphas_cumprod)
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
self.register_buffer('posterior_variance', posterior_variance)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min=1e-20)))
self.register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
self.register_buffer('posterior_mean_coef2',
(1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
# Build Dynamic Head.
self.head = DynamicHead(cfg=cfg, roi_input_shape=self.backbone.output_shape())
# Loss parameters:
class_weight = cfg.MODEL.DiffusionDet.CLASS_WEIGHT
giou_weight = cfg.MODEL.DiffusionDet.GIOU_WEIGHT
l1_weight = cfg.MODEL.DiffusionDet.L1_WEIGHT
no_object_weight = cfg.MODEL.DiffusionDet.NO_OBJECT_WEIGHT
self.deep_supervision = cfg.MODEL.DiffusionDet.DEEP_SUPERVISION
self.use_focal = cfg.MODEL.DiffusionDet.USE_FOCAL
self.use_fed_loss = cfg.MODEL.DiffusionDet.USE_FED_LOSS
self.use_nms = cfg.MODEL.DiffusionDet.USE_NMS
# Build Criterion.
matcher = HungarianMatcherDynamicK(
cfg=cfg, cost_class=class_weight, cost_bbox=l1_weight, cost_giou=giou_weight, use_focal=self.use_focal
)
weight_dict = {"loss_ce": class_weight, "loss_bbox": l1_weight, "loss_giou": giou_weight}
if self.deep_supervision:
aux_weight_dict = {}
for i in range(self.num_heads - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "boxes"]
self.criterion = SetCriterionDynamicK(
cfg=cfg, num_classes=self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight,
losses=losses, use_focal=self.use_focal,)
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.to(self.device)
def predict_noise_from_start(self, x_t, t, x0):
return (
(extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) /
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
)
def model_predictions(self, backbone_feats, images_whwh, x, t, x_self_cond=None, clip_x_start=False):
x_boxes = torch.clamp(x, min=-1 * self.scale, max=self.scale)
x_boxes = ((x_boxes / self.scale) + 1) / 2 | x_boxes = box_cxcywh_to_xyxy(x_boxes) | 3 | 2023-11-17 02:37:37+00:00 | 12k |
fg320/DEASC | examples/05_3x3_farm_wso_SLSQP_visualization.py | [
{
"identifier": "WfModel",
"path": "deasc/wf_model.py",
"snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by pointing towards an input file.\n (FLORIS interface object).\n\n Args\n ----\n input file:(FLORIS .json input file).\n \"\"\"\n # Read and initialize input file\n self.input_file = input_file\n self.interface = floris_input_handler(self.input_file, path)\n\n # Assign wind farm model proporties\n self.D, self.H_hub, self.n_turbs = floris_properties(self)\n\n def set_aligned_layout(self, n_row, n_col, spac_x, spac_y, coordinates=False):\n \"\"\"\n Modify farm layout in aligned wind turbines with constant spacing,\n differing only from rows to columns. Flow field is also reinitialized.\n\n Args\n ----\n n_row: (float) number of turbine rows\n n_col: (float) number of turbine columns\n spac_x: (float) WT diam normalized turbines distance in x direction\n spac_y: (float) WT diam normalized turbines distance in y direction\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Input type check\n if not all(isinstance(i, int) for i in [n_row, n_col]) or \\\n not all(isinstance(j, (int, float)) for j in [spac_x, spac_y]):\n err_msg = \"Incorrect input value types\"\n raise ValueError(err_msg)\n\n # Calculate new coordinate farm layout\n layout_x = []\n layout_y = []\n for i in range(int(n_row)):\n for j in range(int(n_col)):\n layout_x.append(i * spac_x * self.D)\n layout_y.append(j * spac_y * self.D)\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def set_HR_layout(self, coordinates=False):\n \"\"\"\n Set Horns Rev wind farm layout to wind farm object and\n returns turbines' x and y coordinates if coordinates=True.\n\n Args\n ----\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Vestas V80 2 MW diameter check\n if self.D != 80:\n warning = \"Rotor diameter not from the Vestas V80 2 MW turbine\"\n warnings.warn(warning, UserWarning)\n\n n_rows = 10\n n_cols = 8\n spac_x = 7\n spac_y = 7\n angle = 6\n layout_x = []\n layout_y = []\n for i in range(int(n_rows)):\n for j in range(int(n_cols)):\n layout_x.append((i * spac_x * self.D) -\n (np.sin(np.radians(angle)) * j * spac_y * self.D))\n layout_y.append(j * spac_y * self.D * np.cos(np.radians(angle)))\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def farm_eval(self, yaw=None, ws=None, wd=None, ti=None, shear=None):\n \"\"\"\n Calculate farm flow field for given wind farm layout and input conditions.\n Return main outputs, such as yaw angles, turbines power, farm power, etc.\n\n Args\n ----\n yaw: (list, optional) turbines yaw angles (deg). Default to None.\n ws: (float, optional) input wind speeds (m/s). Default to None.\n wd: (float, optional) input wind directions (deg). Default to None.\n ti: (float, optional) input turbulence intensity. Default to None.\n shear: (float, optional) shear exponent. Default to None.\n\n Returns\n -------\n wf_pow: (float) WF power (MWatts).\n wt_pow: (np.array) WTs power (MWatts).\n wt_ti: (list) WTs turbulence intensity.\n wt_yaw: (np.array) WTs yaw angles (deg).\n \"\"\"\n # Main wind farm calculation\n wf_pow, wt_pow, wt_ti, wt_yaw, _ = floris_farm_eval(self,\n yaw,\n ws,\n wd,\n ti,\n shear)\n\n return (wf_pow, wt_pow, wt_ti, wt_yaw)\n\n def pow_yaw_sweep_1var(self, layout, var_info):\n \"\"\"\n Return wind farm power for a single yaw variable, either a\n single turbine or a single row of turbines. Sweep by row not possible\n for not aligned \"custom\" layouts.\n\n Args\n ----\n layout: (tuple)\n row: (integer) number of farm rows\n cols: (integer) number of farm columns\n or string \"custom\"\n var_info: (tuple)\n var_type: (string) \"T\" for turbine,\n \"R\" for row (not for custom layouts)\n var: (integer) turbine or row number\n var_value: (list of floats) variable values\n\n Returns\n -------\n obj_out: tuple\n obj: (list) objective values\n obj_func: (string) objective function\n var_info: (tuple) see input\n model: (string) model name\n \"\"\"\n # Extract inputs and check inputs\n var_type, var, var_value = var_info\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'R' and layout == \"custom\":\n err_msg = \"Row not allowed for custom layouts\"\n raise ValueError(err_msg)\n if var_type == 'R' and var > rows:\n err_msg = \"Row specified not in farm\"\n raise ValueError(err_msg)\n if var_type == 'T' and var > self.n_turbs:\n err_msg = \"Turbine specified not in farm\"\n raise ValueError(err_msg)\n\n # Calculations\n yaw_angles = np.array(floris_current_yaw(self))\n wf_pow = []\n\n for yaw_change in var_value:\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'T':\n yaw_angles[(var-1)] = yaw_change\n elif var_type == 'R':\n idx_1 = var*cols\n idx_0 = idx_1-cols\n yaw_angles[idx_0:idx_1] = yaw_change\n else:\n err_msg = \"var_type either 'T' or 'R'\"\n raise ValueError(err_msg)\n\n wf_pow_single, _, _, _ = self.farm_eval(yaw=yaw_angles)\n wf_pow.append(wf_pow_single)\n\n obj_out = (wf_pow, 'Farm Power')\n var_info = (var_type, var, var_value)\n print(\"Function exploration complete\")\n\n return obj_out, var_info"
},
{
"identifier": "WSOpt",
"path": "deasc/wake_steering.py",
"snippet": "class WSOpt:\n \"\"\"\n Class to perform wake steering optimization with a WfModel object, given an a-priori\n specified wind farm layout and specified atmopheric conditions. Optimization can have\n all/some turbines as variables, or rows for wind farms with equal columns. Optimizers\n available are the local SLSQP, where linear constraints can be added, and the global\n optimizer TuRBO.\n \"\"\"\n\n def __init__(self,\n wf_model,\n inflow,\n variables,\n var_bounds,\n var_initial,\n opt_method=\"SLSQP\",\n opt_options=None,\n obj_function=\"Farm Power\",\n constraints=(None, None, None),\n by_row=(False, None, None),\n tuning_dynamic=False\n ):\n \"\"\"\n Args\n ----\n wf_model: (WfModel)\n WfModel to perform wake steering optimization.\n inflow: (list) Inflow conditions for wake steering optimization.\n yaw_initial: (list) wind farm yaw angles (deg).\n (string) 'random' for random intial wind farm yaw angles.\n wd: (float) input wind directions (deg).\n ws: (float) input wind speeds (m/s).\n ti: (float) input turbulence intensity.\n shear: (float) shear exponent.\n variables: (list)\n List of turbines (or rows) to optimize. Naming convention starts from 1.\n var_bounds: (tuple)\n low_bound: (float) variable (yaw angle) lower bound.\n upp_bound: (float) variable (yaw angle) upper bound.\n var_initial:\n SLSQP: (list) list of initial variable values for each variable.\n (string) 'random' for random initial variable values.\n TURBO_1: (list of lists) list of n_init variable values lists\n (see TURBO_1 options).\n (string) 'LHS' latin hypercube sampling.\n TURBO_M: (string) 'LHS' latin hypercube sampling.\n opt_method: (string, optional) optimization method.\n 'SLSQP', 'TURBO_1 and 'TURBO_M' available.\n Default set to 'SLSQP'.\n opt_options: (dict , optional) optimization method options dictionary.\n Default set to None.\n opt_function: (string , optional) objective function. 'Farm Power' available\n Default set to 'Farm Power'.\n constraints: (tuple) Linear constraints definition. Limited to SLSQP.\n A: (matrix) linear constraint matrix.\n Default set to None.\n low_bound_constr: (float) lower non-normalized contraint bound.\n Default set to None.\n upp_bnd_constr: (float) upper non-normalized contraint bound.\n Default set to None.\n by_row : (tuple, optional) Optimization by row, requires all farm columns to have\n the same amount of rows.\n by_row_bool: (bool) True if optimization variables are wind farm rows,\n False if wind farm turbines. Default set to False.\n rows:: (int) wind farm rows. Default set to None.\n cols:: (int) wind farm columns. Default set to None.\n tuning_dynamic : (bool, optional)\n If True, include dynamic parameter tuning. See tuning_dynamic_initialize\n method. Default to False.\n \"\"\"\n # Opt Methods - Opt Options - Optimizers - Opt Functions\n self.opt_method_list = [\"SLSQP\", \"TURBO_1\", \"TURBO_M\"]\n self.opt_options_dict = {\"SLSQP\": {'maxiter': 100,\n 'disp': True,\n 'iprint': 2,\n 'ftol': 1e-6,\n 'eps': 0.01},\n \"TURBO_1\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"},\n \"TURBO_M\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"n_trust_regions\": 2,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"}}\n self.optimizer_dict = {'SLSQP': self._optimizer_scipy,\n 'TURBO_1': self._optimizer_turbo_1,\n 'TURBO_M': self._optimizer_turbo_m}\n self.obj_function_dict = {'Farm Power': self._obj_function_power}\n\n # Optimization methods and optimizer\n self.opt_method = opt_method\n self._opt_method_settler()\n self.optimizer = self.optimizer_dict[self.opt_method]\n\n # Optimizer options\n self.opt_options = opt_options\n self._opt_options_settler()\n\n # Optimization function\n self.obj_function_name = obj_function\n self._obj_function_settler()\n\n # Wind farm conditions\n self.wf_model = wf_model\n self.wf_model_dict_original = floris_extract_object_dict(self.wf_model)\n self.yaw_initial, self.wd, self.ws, self.ti, self.shear = inflow\n if not isinstance(self.yaw_initial, (list, np.ndarray)):\n if self.yaw_initial == 'random':\n self.yaw_initial = self._random_yaw_generator(self.wf_model.n_turbs,\n var_bounds)\n self._yaw_initial_input_handler()\n self.yaw_initial = np.array([float(item) for item in self.yaw_initial])\n\n # Optimization per wind turbine or per wind farm row\n self.by_row_bool = by_row[0]\n if self.by_row_bool:\n self.rows = by_row[1]\n self.cols = by_row[2]\n self._by_row_input_handler()\n\n # Variable bounds\n self.var_bounds = var_bounds\n self.low_bound, self.upp_bound = self.var_bounds\n self.low_bound_norm = norm(self.low_bound, self.low_bound, self.upp_bound)\n self.upp_bound_norm = norm(self.upp_bound, self.low_bound, self.upp_bound)\n self.var_bounds_norm = (self.low_bound_norm, self.upp_bound_norm)\n tmp = [self.var_bounds_norm for i in range(len(variables))]\n self.var_bounds_norm_list = tmp\n tmp = np.array([self.low_bound_norm for i in range(len(variables))])\n self.low_bound_norm_list = tmp\n tmp = np.array([self.upp_bound_norm for i in range(len(variables))])\n self.upp_bound_norm_list = tmp\n\n # Constraints\n self.A = constraints[0]\n self.low_bound_constr = constraints[1]\n self.upp_bound_constr = constraints[2]\n if self.A is not None:\n self._constraints_input_handler()\n self.low_bound_constr_norm = norm(self.low_bound_constr,\n self.low_bound,\n self.upp_bound)\n self.upp_bound_constr_norm = norm(self.upp_bound_constr,\n self.low_bound,\n self.upp_bound)\n\n # Yaw variables\n self.variables = variables\n self.var_initial = var_initial\n self._variables_input_handler()\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.opt_method == 'SLSQP' and self.var_initial == 'random':\n self.var_initial = self._random_yaw_generator(len(self.variables),\n self.var_bounds)\n self._var_initial_input_handler()\n self.var_initial_norm = self._var_initial_norm()\n\n # Dynamic tuning\n self.tuning_dyn_bool = tuning_dynamic\n self._tuning_dyn_bool_check()\n self.tuning_dyn_initialization = False\n\n self.opt_run = False\n\n def tuning_dyn_initialize(self, tuning_dyn_obj_list):\n \"\"\"\n Assign list of tuning dynamic objects TuningDyn to the WSOpt object.\n\n Args\n ----\n tuning_dyn_object: (list of TuningDyn objects)\n \"\"\"\n self.tuning_dyn_obj_list = tuning_dyn_obj_list\n self._tuning_dyn_init_input_handler()\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n tuning_dyn_obj.wso_compatibility_check(self)\n self.tuning_dyn_initialization = True\n\n def optimize_yaw(self):\n \"\"\"\n Optimize the yaw angle for the given WSOpt object.\n\n Returns\n -------\n opt_yaw_angles_vars: (ndarray) optimal yaw angles for the optimization variables.\n opt_yaw_angles_all: (ndarray) optimal yaw angles for all.wind farm turbines.\n \"\"\"\n # Tuning dynamic initialization check\n self._tuning_dyn_initialization_check()\n\n # Print optimization info\n self._print_info()\n\n # Wind farm power - no yaw\n self.wf_pow_noyaw = self._get_farm_power_noyaw()\n\n # Optimize\n self._iter_details_setup()\n self.opt_yaw_angles_vars, self.opt_yaw_angles_all = self.optimizer()\n self.opt_run = True\n\n return (self.opt_yaw_angles_vars, self.opt_yaw_angles_all)\n\n def get_optimization_details(self):\n \"\"\"\n Return optimization details: optimizer iterations details and objective function\n evaluations details. The two are identical for TURBO optimizers as an objective\n function evaluation corresponds to an optimizer iteration, different for SLSQP as\n additional objective function evaluations are required to approximate gradients.\n\n Returns\n -------\n iter_details: (tuple) optimizer iterations details.\n iter_yaw_angles: (list) list of yaw angles per optimizer iteration.\n iter_obj_func: (list) list of objective function per optimizer iteration.\n iter_farm_power: (list) list of farm power values per optimizer iteration.\n eval_details: (tuple) objective fucntion evaluations details.\n eval_yaw_angles: (list) list of yaw angles per evaluation.\n eval_obj_func: (list) list of objective function per evaluation.\n eval_farm_power: (list) list of farm power values per evaluation.\n \"\"\"\n iter_details = (self.iter_yaw_angles,\n self.iter_obj_func,\n self.iter_farm_power)\n eval_details = (self.eval_yaw_angles,\n self.eval_obj_func,\n self.eval_farm_power)\n return (iter_details, eval_details)\n\n # %% Private methods\n\n def _opt_method_settler(self):\n if self.opt_method not in self.opt_method_list:\n err_msg = \"Optimization method not recognized\"\n raise Exception(err_msg)\n\n def _opt_options_settler(self):\n if self.opt_options is None:\n self.opt_options = self.opt_options_dict[self.opt_method]\n\n def _obj_function_settler(self):\n if self.obj_function_name in list(self.obj_function_dict.keys()):\n self.obj_function = self.obj_function_dict[self.obj_function_name]\n else:\n err_msg = \"Optimization function not recognized\"\n raise Exception(err_msg)\n\n def _random_yaw_generator(self, yaw_number, yaw_bounds):\n yaw_angles = []\n for i in range(yaw_number):\n x = random.choice(range(yaw_bounds[0], yaw_bounds[1]+1))\n yaw_angles.append(x)\n return yaw_angles\n\n def _yaw_initial_input_handler(self):\n if len(self.yaw_initial) != self.wf_model.n_turbs:\n err_msg = \"Initial yaw angles do not match turbine number\"\n raise Exception(err_msg)\n\n def _by_row_input_handler(self):\n if self.rows*self.cols != self.wf_model.n_turbs:\n err_msg = \"Farm rows and columns provided do not match turbine number\"\n raise Exception(err_msg)\n\n def _constraints_input_handler(self):\n if self.opt_method != 'SLSQP':\n err_msg = \"Linear constraints (on top of bounds) limited to SLSQP optimizer\"\n raise Exception(err_msg)\n\n def _variables_input_handler(self):\n if self.by_row_bool:\n for row in self.variables:\n if row > self.rows:\n err_msg = \"Row/s specified not in farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.rows:\n err_msg = \"Too many rows specified\"\n raise Exception(err_msg)\n else:\n for turb in self.variables:\n if turb > self.wf_model.n_turbs:\n err_msg = \"Turbine/s specified not in the farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.wf_model.n_turbs:\n err_msg = \"Too many turbines specified\"\n raise Exception(err_msg)\n if 0 in self.variables:\n err_msg = \"Turbine/row counting convention starts from 1\"\n raise Exception(err_msg)\n\n def _var_initial_input_handler(self):\n if self.opt_method == 'TURBO_1':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n pass\n elif self.var_initial == 'random':\n err_msg = \"Random initial variables limited to SLSQP optimizer\"\n raise Exception(err_msg)\n else:\n if len(self.var_initial) != self.opt_options[\"n_init\"]:\n err_msg = \"n_init initial variable lists are needed (see TURBO options)\"\n raise Exception(err_msg)\n elif len(self.var_initial[0]) != len(self.variables):\n err_msg = \"var_initial sublists length not equal number of variables\"\n raise Exception(err_msg)\n elif self.opt_method == 'TURBO_M':\n if self.var_initial != 'LHS':\n err_msg = \"TURBO_M optimizer requires LHS as initial sampling\"\n elif self.opt_method == 'SLSQP':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n err_msg = \"Latin Hypercube Sampling limited to TURBO optimizers\"\n raise Exception(err_msg)\n elif len(self.variables) != len(self.var_initial):\n err_msg = \"var_initial length needs to equal number of variables\"\n raise Exception(err_msg)\n\n def _var_initial_norm(self):\n if self.opt_method == \"SLSQP\":\n self.var_initial = np.array([float(item) for item in self.var_initial])\n var_initial_norm = norm(self.var_initial, self.low_bound, self.upp_bound)\n elif self.var_initial == 'LHS':\n var_initial_norm = None\n else:\n self.var_initial = np.array([np.array(x) for x in self.var_initial])\n var_initial_norm = []\n for x_list in self.var_initial:\n x_list_norm = []\n for x in x_list:\n x_norm = norm(x, self.low_bound, self.upp_bound)\n x_list_norm.append(x_norm)\n var_initial_norm.append(np.array(x_list_norm))\n return np.array(var_initial_norm)\n\n def _get_farm_power_noyaw(self):\n if (self.tuning_dyn_initialization and\n hasattr(self.tuning_dyn_obj_list[0], 'wf_pow_noyaw')):\n wf_pow_noyaw = self.tuning_dyn_obj_list[0].wf_pow_noyaw\n else:\n self.yaw_zero = np.full(shape=self.wf_model.n_turbs, fill_value=0.0)\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n # Tune parameters\n if self.tuning_dyn_initialization:\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, self.yaw_zero)\n\n wf_pow_noyaw = floris_calculate_farm_power(self.wf_model, self.yaw_zero)\n return wf_pow_noyaw\n\n def _print_info(self):\n print(\"=====================================================\")\n print(\"Optimizing wake redirection control...\")\n print(\"Optimization method: %s\" % (self.opt_method))\n print(\"Optimization function: %s \\n\" % (self.obj_function_name))\n if self.by_row_bool:\n print(\"Rows being optimized: \")\n print(self.variables)\n else:\n print(\"Turbines being optimized: \")\n print(self.variables)\n print(\"Number of variables to optimize = \", len(self.variables))\n print(\"=====================================================\")\n\n def _iter_details_setup(self):\n # Details for each obj function evaluation\n self.eval_yaw_angles = [] # deg\n self.eval_obj_func = []\n self.eval_farm_power = [] # MW\n\n # Details for each optimizer iteration\n self.iter_yaw_angles = [] # deg\n self.iter_obj_func = []\n self.iter_farm_power = [] # MW\n\n def _variables_to_farm_yaw(self, yaw_initial, var_values):\n yaw_angles = copy.deepcopy(yaw_initial)\n if self.by_row_bool:\n for i, row_idx in enumerate(self.variables):\n idx_1 = row_idx*self.cols\n idx_0 = idx_1-self.cols\n yaw_angles[idx_0:idx_1] = var_values[i]\n else:\n for i, turb_idx in enumerate(self.variables):\n yaw_angles[turb_idx-1] = var_values[i]\n return yaw_angles.tolist()\n\n # %% Optimizers\n\n def _optimizer_scipy(self):\n # Call back function for iter details\n def callback_func(xk):\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n # Linearly constrained case\n if self.A is not None:\n self.C = LinearConstraint(self.A,\n self.low_bound_constr_norm,\n self.upp_bound_constr_norm)\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n constraints=(self.C,),\n options=self.opt_options)\n # Unconstrained case\n else:\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n options=self.opt_options)\n # Extract optimal yaw angles for variables\n opt_yaw_angles_vars = unnorm(self.residual_plant.x,\n self.low_bound,\n self.upp_bound)\n # Extract optimal yaw angles for the entire farm\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Use best index because if total iterations reached, optimum not last evaluation\n eval_yaw_angles_lists = [x.tolist() for x in self.eval_yaw_angles]\n index_best = eval_yaw_angles_lists.index(opt_yaw_angles_all)\n opt_yaw_angles_all = np.array(opt_yaw_angles_all)\n self.obj_func_opt = self.eval_obj_func[index_best]\n self.farm_power_opt = self.eval_farm_power[index_best]\n\n # Add initial and last points to iteration details\n self.iter_yaw_angles.insert(0, self.eval_yaw_angles[0])\n self.iter_obj_func.insert(0, self.eval_obj_func[0])\n self.iter_farm_power.insert(0, self.eval_farm_power[0])\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_1(self):\n\n # TURBO initial sampling\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n X_init_provided = False\n X_init_same_norm = None\n else:\n X_init_provided = True\n X_init_same_norm = self.var_initial_norm\n\n # TURBO optimization\n turbo_1 = Turbo1(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n X_init_provided=X_init_provided,\n X_init_same=X_init_same_norm,\n )\n turbo_1.optimize()\n X = turbo_1.X # Evaluated points\n fX = turbo_1.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.obj_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_m(self):\n\n # TURBO optimization\n turbo_m = TurboM(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n )\n turbo_m.optimize()\n X = turbo_m.X # Evaluated points\n fX = turbo_m.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.cost_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n # %% Objective functions\n\n def _obj_function_power(self, var_norm):\n\n # Extract farm yaw angles\n var_unnorm = unnorm(var_norm, self.low_bound, self.upp_bound)\n yaw_angles = self._variables_to_farm_yaw(self.yaw_initial, var_unnorm)\n yaw_angles = np.array([float(item) for item in yaw_angles])\n\n # Tune parameters dynamically\n if self.tuning_dyn_initialization:\n # Set equal yaw angles in groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n yaw_angles = self.tuning_dyn_obj_list[0].set_yaw_groups(yaw_angles)\n # Tune parameters\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, yaw_angles)\n\n # Calculate negative of the farm power normalized by power for zero yaw\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n wf_pow = floris_calculate_farm_power(self.wf_model, yaw_angles)\n obj_function = (-1 * wf_pow / self.wf_pow_noyaw)\n\n # Update evalauation details\n self.eval_yaw_angles.append(yaw_angles)\n self.eval_obj_func.append(obj_function)\n self.eval_farm_power.append(wf_pow)\n\n return obj_function\n\n # %% Tuning Dynamic methods\n\n def _tuning_dyn_bool_check(self):\n if self.tuning_dyn_bool and self.by_row_bool:\n err_msg = \"Dynamic tuning not available for optimization by row.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_init_input_handler(self):\n if isinstance(self.tuning_dyn_obj_list, (list, np.ndarray)) is False:\n err_msg = \"TuningDyn objects need to be in a list, even if only one.\"\n raise Exception(err_msg)\n # Check dynamic grouping tuning objects have the same tuning groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n tuning_groups_first = self.tuning_dyn_obj_list[0].tuning_groups\n same_groups = all(obj.tuning_groups == tuning_groups_first\n for obj in self.tuning_dyn_obj_list)\n if same_groups is False:\n err_msg = \"TuningDyn objects have different groupings.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_initialization_check(self):\n if self.tuning_dyn_bool and self.tuning_dyn_initialization is False:\n err_msg = \"Tuning dynamic not initialized. See tuning_dyn_initialize method.\"\n raise Exception(err_msg)"
},
{
"identifier": "wso_optimal_yaw_angles",
"path": "deasc/visualisation.py",
"snippet": "def wso_optimal_yaw_angles(wso_obj, radius=1.5, ax=None):\n \"\"\"\n Plot the the optimal yaw angles for the wake steering optimisation.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object with method optimize_yaw run.\n radius: (float) radius of circle around each turbine.\n ax: (:py:class:`matplotlib.pyplot.axes`, optional) Figure axes. Defaults to None.\n \"\"\"\n # Check if optimisation is run\n if wso_obj.opt_run is False:\n err_msg = \"Wake steering optimisation not run. See optimize_yaw method\"\n raise Exception(err_msg)\n\n # Setup plot\n if ax is None:\n fig, ax = plt.subplots()\n\n # Get farm layout\n wf_model = wso_obj.wf_model\n x_coordinates = wf_model.interface.get_turbine_layout()[0]\n y_coordinates = wf_model.interface.get_turbine_layout()[1]\n\n # Scatter plot with optimal yaw angles\n lb = wso_obj.low_bound\n ub = wso_obj.upp_bound\n colors = plt.cm.coolwarm(np.linspace(0, 1, len(np.arange(lb, ub, 1))+(ub+1)))\n ax.scatter(x_coordinates/wf_model.D, y_coordinates/wf_model.D, s=0)\n for coord_idx in range(len(x_coordinates)):\n # Coloured patch\n yaw_single = wso_obj.opt_yaw_angles_all[coord_idx]\n color = colors[int(yaw_single)+40]\n circ = plt.Circle((x_coordinates[coord_idx]/wf_model.D,\n y_coordinates[coord_idx]/wf_model.D),\n radius=radius, color=color, fill=True)\n ax.add_patch(circ)\n # Yaw angle as text\n string = f\"{(round(yaw_single)):d}\"\n ax.text(x_coordinates[coord_idx]/wf_model.D,\n y_coordinates[coord_idx]/wf_model.D,\n string, fontsize=8, ha='center', color='k')\n\n ax.set_title(\"Optimal yaw angles\", fontsize=10)\n ax.set_xlabel(\"$xD^{-1}$\", fontsize=10)\n ax.set_ylabel(\"$yD^{-1}$\", fontsize=10)\n ax.set_aspect(\"equal\")\n\n return ax"
},
{
"identifier": "wso_optimal_flow_field",
"path": "deasc/visualisation.py",
"snippet": "def wso_optimal_flow_field(wso_obj, ax=None):\n \"\"\"\n Plot the streamwise velocity flow field at hub height for the optimal yaw angles at\n the inflow conditions specified in the optimisation.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object with method optimize_yaw run.\n ax: (:py:class:`matplotlib.pyplot.axes`, optional) Figure axes. Defaults to None.\n \"\"\"\n # Check if optimisation is run\n if wso_obj.opt_run is False:\n err_msg = \"Wake steering optimisation not run. See optimize_yaw method\"\n raise Exception(err_msg)\n\n # Setup plot\n if ax is None:\n fig, ax = plt.subplots()\n\n # Get hub height streamwise velocity field\n _ = floris_farm_eval(wso_obj.wf_model,\n wso_obj.opt_yaw_angles_all,\n wso_obj.ws,\n wso_obj.wd,\n wso_obj.ti,\n wso_obj.shear)\n hor_plane = floris_get_hor_plane_hub(wso_obj.wf_model, wso_obj.opt_yaw_angles_all)\n\n # Plot streamwise velocity field\n floris_visualize_cut_plane(hor_plane,\n ax=ax,\n vel_component='u',\n cmap=\"coolwarm\",\n levels=None,\n color_bar=True,\n title='Optimized Yaw')\n ax.set_xlabel(\"x [m]\")\n ax.set_ylabel(\"y [m]\")\n\n return ax"
},
{
"identifier": "wso_plot_details_iterations",
"path": "deasc/visualisation.py",
"snippet": "def wso_plot_details_iterations(wso_obj, ax=None):\n \"\"\"\n Plot the optimizer iteration details with the progressive values of the\n objective function.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object with method optimize_yaw run.\n ax: (:py:class:`matplotlib.pyplot.axes`, optional) Figure axes. Defaults to None.\n \"\"\"\n # Check if optimisation is run\n if wso_obj.opt_run is False:\n err_msg = \"Wake steering optimisation not run. See optimize_yaw method\"\n raise Exception(err_msg)\n\n # Setup plot\n if ax is None:\n fig, ax = plt.subplots()\n\n # Plot details\n _wso_plot_details(wso_obj, 'iterations', ax)\n\n return ax"
},
{
"identifier": "wso_plot_details_evaluations",
"path": "deasc/visualisation.py",
"snippet": "def wso_plot_details_evaluations(wso_obj, ax=None):\n \"\"\"\n Plot the wind farm evaluations details with the progressive values of the\n objective function.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object with method optimize_yaw run.\n ax: (:py:class:`matplotlib.pyplot.axes`, optional) Figure axes. Defaults to None.\n \"\"\"\n # Check if optimisation is run\n if wso_obj.opt_run is False:\n err_msg = \"Wake steering optimisation not run. See optimize_yaw method\"\n raise Exception(err_msg)\n\n # Setup plot\n if ax is None:\n fig, ax = plt.subplots()\n\n # Plot details\n _wso_plot_details(wso_obj, 'evaluations', ax)\n\n return ax"
},
{
"identifier": "wso_explore_optimum_power_1var",
"path": "deasc/visualisation.py",
"snippet": "def wso_explore_optimum_power_1var(wso_obj, turbine, yaw_bounds, yaw_number):\n \"\"\"\n Plot the power function for the yaw sweep of a single turbine within the farm,\n having the wake steering optimal yaw angles as initial condition.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object with method optimize_yaw run.\n turbine: (integer) turbine to sweep yaw angle.\n yaw_bounds: (tuple) yaw bounds for yaw sweep.\n yaw_number: (integer): number of yaw angles withing the specified yaw bounds.\n \"\"\"\n # Check if optimisation is run\n if wso_obj.opt_run is False:\n err_msg = \"Wake steering optimisation not run. See optimize_yaw method\"\n raise Exception(err_msg)\n # Check function input\n _wso_explore_optimum_input_handler(wso_obj, turbine)\n # Run optimal yaw angles solution\n _ = floris_farm_eval(wso_obj.wf_model,\n wso_obj.opt_yaw_angles_all,\n wso_obj.ws,\n wso_obj.wd,\n wso_obj.ti,\n wso_obj.shear)\n # Get yaw sweep plot\n yaw_sweep = np.linspace(yaw_bounds[0], yaw_bounds[1], yaw_number)\n decorated = obj_yaw_sweep_1var_plot(wso_obj.wf_model.pow_yaw_sweep_1var)\n decorated(\"custom\", (\"T\", turbine, yaw_sweep))\n # Add optimum\n plt.plot(wso_obj.opt_yaw_angles_all[turbine-1],\n wso_obj.farm_power_opt,\n 'or',\n label='Optimum')\n plt.legend(loc='best', fontsize=6, markerscale=0.6)"
}
] | import numpy as np
import matplotlib.pyplot as plt
from deasc import WfModel
from deasc import WSOpt
from deasc.visualisation import (
wso_optimal_yaw_angles,
wso_optimal_flow_field,
wso_plot_details_iterations,
wso_plot_details_evaluations,
wso_explore_optimum_power_1var
) | 10,596 |
"""
This example shows the plotting methods for wake steering optimisation on a 3x3 wind farm
of NREL 5 MW turbines. The optimisation conditions are the same as for example 04. The
plotting methods include: optimal yaw angles plot, optimizer iteration details, objective
function evaluation details, and optimum exploration.
"""
# Input file definition
path = "./inputs/"
input_file = "gch.yaml"
# Initialise wind farm model
wf_model = WfModel(input_file, path)
# Change wind farm layout
n_row = 3
n_col = 3
spac_x = 7
spac_y = 5
wf_model.set_aligned_layout(n_row, n_col, spac_x, spac_y)
# Specify atmopheric conditions
ws = 8.0
wd = 270
ti = 0.05
shear = 0.0
# Wake steering optimisation inputs
yaw_initial = np.full(shape=(n_row*n_col), fill_value=0)
inflow = (yaw_initial, wd, ws, ti, shear)
variables = [1, 2, 3, 4, 5, 6]
var_bounds = (-25, 25)
var_initial = np.full(shape=(len(variables)), fill_value=0)
# Initialise optimisation object
wso_obj = WSOpt(wf_model=wf_model,
inflow=inflow,
variables=variables,
var_bounds=var_bounds,
var_initial=var_initial,
opt_method="SLSQP",
opt_options=None,
obj_function="Farm Power",
constraints=(None, None, None),
by_row=(False, None, None),
tuning_dynamic=False
)
# Optimise, extract and print optimal yaw angles
opt_yaw_angles_vars, opt_yaw_angles_all = wso_obj.optimize_yaw()
print('Optimal farm yaw angles:')
print(opt_yaw_angles_all)
# Get optimisation details and print number of iterations and evaluations
iter_details, eval_details = wso_obj.get_optimization_details()
print('Number of optimiser iterations: %i' % (len(iter_details[0])))
print('Number of objective function evaluations: %i' % (len(eval_details[0])))
# Plot optimisation details and results
wso_optimal_yaw_angles(wso_obj, radius=1.2)
wso_optimal_flow_field(wso_obj)
wso_plot_details_iterations(wso_obj)
wso_plot_details_evaluations(wso_obj)
|
"""
This example shows the plotting methods for wake steering optimisation on a 3x3 wind farm
of NREL 5 MW turbines. The optimisation conditions are the same as for example 04. The
plotting methods include: optimal yaw angles plot, optimizer iteration details, objective
function evaluation details, and optimum exploration.
"""
# Input file definition
path = "./inputs/"
input_file = "gch.yaml"
# Initialise wind farm model
wf_model = WfModel(input_file, path)
# Change wind farm layout
n_row = 3
n_col = 3
spac_x = 7
spac_y = 5
wf_model.set_aligned_layout(n_row, n_col, spac_x, spac_y)
# Specify atmopheric conditions
ws = 8.0
wd = 270
ti = 0.05
shear = 0.0
# Wake steering optimisation inputs
yaw_initial = np.full(shape=(n_row*n_col), fill_value=0)
inflow = (yaw_initial, wd, ws, ti, shear)
variables = [1, 2, 3, 4, 5, 6]
var_bounds = (-25, 25)
var_initial = np.full(shape=(len(variables)), fill_value=0)
# Initialise optimisation object
wso_obj = WSOpt(wf_model=wf_model,
inflow=inflow,
variables=variables,
var_bounds=var_bounds,
var_initial=var_initial,
opt_method="SLSQP",
opt_options=None,
obj_function="Farm Power",
constraints=(None, None, None),
by_row=(False, None, None),
tuning_dynamic=False
)
# Optimise, extract and print optimal yaw angles
opt_yaw_angles_vars, opt_yaw_angles_all = wso_obj.optimize_yaw()
print('Optimal farm yaw angles:')
print(opt_yaw_angles_all)
# Get optimisation details and print number of iterations and evaluations
iter_details, eval_details = wso_obj.get_optimization_details()
print('Number of optimiser iterations: %i' % (len(iter_details[0])))
print('Number of objective function evaluations: %i' % (len(eval_details[0])))
# Plot optimisation details and results
wso_optimal_yaw_angles(wso_obj, radius=1.2)
wso_optimal_flow_field(wso_obj)
wso_plot_details_iterations(wso_obj)
wso_plot_details_evaluations(wso_obj) | wso_explore_optimum_power_1var(wso_obj, turbine=5, yaw_bounds=(-25, 25), yaw_number=51) | 6 | 2023-11-10 18:13:27+00:00 | 12k |
CPES-Power-and-Energy-Systems/interoperable-recommender-tso | energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/bayesian_optimization.py | [
{
"identifier": "GaussianProcess",
"path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py",
"snippet": "class GaussianProcess(BaseEstimator, RegressorMixin):\n \"\"\"The legacy Gaussian Process model class.\n\n .. deprecated:: 0.18\n This class will be removed in 0.20.\n Use the :class:`GaussianProcessRegressor` instead.\n\n Read more in the :ref:`User Guide <gaussian_process>`.\n\n Parameters\n ----------\n regr : string or callable, optional\n A regression function returning an array of outputs of the linear\n regression functional basis. The number of observations n_samples\n should be greater than the size p of this basis.\n Default assumes a simple constant regression trend.\n Available built-in regression models are::\n\n 'constant', 'linear', 'quadratic'\n\n corr : string or callable, optional\n A stationary autocorrelation function returning the autocorrelation\n between two points x and x'.\n Default assumes a squared-exponential autocorrelation model.\n Built-in correlation models are::\n\n 'absolute_exponential', 'squared_exponential',\n 'generalized_exponential', 'cubic', 'linear'\n\n beta0 : double array_like, optional\n The regression weight vector to perform Ordinary Kriging (OK).\n Default assumes Universal Kriging (UK) so that the vector beta of\n regression weights is estimated using the maximum likelihood\n principle.\n\n storage_mode : string, optional\n A string specifying whether the Cholesky decomposition of the\n correlation matrix should be stored in the class (storage_mode =\n 'full') or not (storage_mode = 'light').\n Default assumes storage_mode = 'full', so that the\n Cholesky decomposition of the correlation matrix is stored.\n This might be a useful parameter when one is not interested in the\n MSE and only plan to estimate the BLUP, for which the correlation\n matrix is not required.\n\n verbose : boolean, optional\n A boolean specifying the verbose level.\n Default is verbose = False.\n\n theta0 : double array_like, optional\n An array with shape (n_features, ) or (1, ).\n The parameters in the autocorrelation model.\n If thetaL and thetaU are also specified, theta0 is considered as\n the starting point for the maximum likelihood estimation of the\n best set of parameters.\n Default assumes isotropic autocorrelation model with theta0 = 1e-1.\n\n thetaL : double array_like, optional\n An array with shape matching theta0's.\n Lower bound on the autocorrelation parameters for maximum\n likelihood estimation.\n Default is None, so that it skips maximum likelihood estimation and\n it uses theta0.\n\n thetaU : double array_like, optional\n An array with shape matching theta0's.\n Upper bound on the autocorrelation parameters for maximum\n likelihood estimation.\n Default is None, so that it skips maximum likelihood estimation and\n it uses theta0.\n\n normalize : boolean, optional\n Input X and observations y are centered and reduced wrt\n means and standard deviations estimated from the n_samples\n observations provided.\n Default is normalize = True so that data is normalized to ease\n maximum likelihood estimation.\n\n nugget : double or ndarray, optional\n Introduce a nugget effect to allow smooth predictions from noisy\n data. If nugget is an ndarray, it must be the same length as the\n number of data points used for the fit.\n The nugget is added to the diagonal of the assumed training covariance;\n in this way it acts as a Tikhonov regularization in the problem. In\n the special case of the squared exponential correlation function, the\n nugget mathematically represents the variance of the input values.\n Default assumes a nugget close to machine precision for the sake of\n robustness (nugget = 10. * MACHINE_EPSILON).\n\n optimizer : string, optional\n A string specifying the optimization algorithm to be used.\n Default uses 'fmin_cobyla' algorithm from scipy.optimize.\n Available optimizers are::\n\n 'fmin_cobyla', 'Welch'\n\n 'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.\n It consists in iterating over several one-dimensional optimizations\n instead of running one single multi-dimensional optimization.\n\n random_start : int, optional\n The number of times the Maximum Likelihood Estimation should be\n performed from a random starting point.\n The first MLE always uses the specified starting point (theta0),\n the next starting points are picked at random according to an\n exponential distribution (log-uniform on [thetaL, thetaU]).\n Default does not use random starting point (random_start = 1).\n\n random_state : int, RandomState instance or None, optional (default=None)\n The generator used to shuffle the sequence of coordinates of theta in\n the Welch optimizer. If int, random_state is the seed used by the\n random number generator; If RandomState instance, random_state is the\n random number generator; If None, the random number generator is the\n RandomState instance used by `np.random`.\n\n Attributes\n ----------\n theta_ : array\n Specified theta OR the best set of autocorrelation parameters (the \\\n sought maximizer of the reduced likelihood function).\n\n reduced_likelihood_function_value_ : array\n The optimal reduced likelihood function value.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.gaussian_process import GaussianProcess\n >>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T\n >>> y = (X * np.sin(X)).ravel()\n >>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)\n >>> gp.fit(X, y) # doctest: +ELLIPSIS\n GaussianProcess(beta0=None...\n ...\n\n Notes\n -----\n The presentation implementation is based on a translation of the DACE\n Matlab toolbox, see reference [NLNS2002]_.\n\n References\n ----------\n\n .. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.\n Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)\n http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf\n\n .. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,\n and M.D. Morris (1992). Screening, predicting, and computer\n experiments. Technometrics, 34(1) 15--25.`\n http://www.jstor.org/stable/1269548\n \"\"\"\n\n _regression_types = {\n 'constant': regression.constant,\n 'linear': regression.linear,\n 'quadratic': regression.quadratic}\n\n _correlation_types = {\n 'absolute_exponential': correlation.absolute_exponential,\n 'squared_exponential': correlation.squared_exponential,\n 'generalized_exponential': correlation.generalized_exponential,\n 'cubic': correlation.cubic,\n 'linear': correlation.linear}\n\n _optimizer_types = [\n 'fmin_cobyla',\n 'Welch']\n\n def __init__(self, regr='constant', corr='squared_exponential', beta0=None,\n storage_mode='full', verbose=False, theta0=1e-1,\n thetaL=None, thetaU=None, optimizer='fmin_cobyla',\n random_start=1, normalize=True,\n nugget=10. * MACHINE_EPSILON, random_state=None):\n\n self.regr = regr\n self.corr = corr\n self.beta0 = beta0\n self.storage_mode = storage_mode\n self.verbose = verbose\n self.theta0 = theta0\n self.thetaL = thetaL\n self.thetaU = thetaU\n self.normalize = normalize\n self.nugget = nugget\n self.optimizer = optimizer\n self.random_start = random_start\n self.random_state = random_state\n\n def fit(self, X, y):\n \"\"\"\n The Gaussian Process model fitting method.\n\n Parameters\n ----------\n X : double array_like\n An array with shape (n_samples, n_features) with the input at which\n observations were made.\n\n y : double array_like\n An array with shape (n_samples, ) or shape (n_samples, n_targets)\n with the observations of the output to be predicted.\n\n Returns\n -------\n gp : self\n A fitted Gaussian Process model object awaiting data to perform\n predictions.\n \"\"\"\n # Run input checks\n self._check_params()\n\n self.random_state = check_random_state(self.random_state)\n\n # Force data to 2D numpy.array\n X, y = check_X_y(X, y, multi_output=True, y_numeric=True)\n self.y_ndim_ = y.ndim\n if y.ndim == 1:\n y = y[:, np.newaxis]\n\n # Check shapes of DOE & observations\n n_samples, n_features = X.shape\n _, n_targets = y.shape\n\n # Run input checks\n self._check_params(n_samples)\n\n # Normalize data or don't\n if self.normalize:\n X_mean = np.mean(X, axis=0)\n X_std = np.std(X, axis=0)\n y_mean = np.mean(y, axis=0)\n y_std = np.std(y, axis=0)\n X_std[X_std == 0.] = 1.\n y_std[y_std == 0.] = 1.\n # center and scale X if necessary\n X = (X - X_mean) / X_std\n y = (y - y_mean) / y_std\n else:\n X_mean = np.zeros(1)\n X_std = np.ones(1)\n y_mean = np.zeros(1)\n y_std = np.ones(1)\n\n # Calculate matrix of distances D between samples\n D, ij = l1_cross_distances(X)\n if (np.min(np.sum(D, axis=1)) == 0. and self.corr != correlation.pure_nugget): # noqa\n raise Exception(\"Multiple input features cannot have the same\"\n \" target value.\")\n\n # Regression matrix and parameters\n F = self.regr(X)\n n_samples_F = F.shape[0]\n if F.ndim > 1:\n p = F.shape[1]\n else:\n p = 1\n if n_samples_F != n_samples:\n raise Exception(\"Number of rows in F and X do not match. Most \"\n \"likely something is going wrong with the \"\n \"regression model.\")\n if p > n_samples_F:\n raise Exception((\"Ordinary least squares problem is undetermined \"\n \"n_samples=%d must be greater than the \"\n \"regression model size p=%d.\") % (n_samples, p))\n if self.beta0 is not None:\n if self.beta0.shape[0] != p:\n raise Exception(\"Shapes of beta0 and F do not match.\")\n\n # Set attributes\n self.X = X\n self.y = y\n self.D = D\n self.ij = ij\n self.F = F\n self.X_mean, self.X_std = X_mean, X_std\n self.y_mean, self.y_std = y_mean, y_std\n\n # Determine Gaussian Process model parameters\n if self.thetaL is not None and self.thetaU is not None:\n # Maximum Likelihood Estimation of the parameters\n if self.verbose:\n print(\"Performing Maximum Likelihood Estimation of the \"\n \"autocorrelation parameters...\")\n self.theta_, self.reduced_likelihood_function_value_, par = \\\n self._arg_max_reduced_likelihood_function()\n if np.isinf(self.reduced_likelihood_function_value_):\n raise Exception(\"Bad parameter region. \"\n \"Try increasing upper bound\")\n\n else:\n # Given parameters\n if self.verbose:\n print(\"Given autocorrelation parameters. \"\n \"Computing Gaussian Process model parameters...\")\n self.theta_ = self.theta0\n self.reduced_likelihood_function_value_, par = \\\n self.reduced_likelihood_function()\n if np.isinf(self.reduced_likelihood_function_value_):\n raise Exception(\"Bad point. Try increasing theta0.\")\n\n self.beta = par['beta']\n self.gamma = par['gamma']\n self.sigma2 = par['sigma2']\n self.C = par['C']\n self.Ft = par['Ft']\n self.G = par['G']\n\n if self.storage_mode == 'light':\n # Delete heavy data (it will be computed again if required)\n # (it is required only when MSE is wanted in self.predict)\n if self.verbose:\n print(\"Light storage mode specified. \"\n \"Flushing autocorrelation matrix...\")\n self.D = None\n self.ij = None\n self.F = None\n self.C = None\n self.Ft = None\n self.G = None\n\n return self\n\n def predict(self, X, eval_MSE=False, batch_size=None):\n \"\"\"\n This function evaluates the Gaussian Process model at x.\n\n Parameters\n ----------\n X : array_like\n An array with shape (n_eval, n_features) giving the point(s) at\n which the prediction(s) should be made.\n\n eval_MSE : boolean, optional\n A boolean specifying whether the Mean Squared Error should be\n evaluated or not.\n Default assumes evalMSE = False and evaluates only the BLUP (mean\n prediction).\n\n batch_size : integer, optional\n An integer giving the maximum number of points that can be\n evaluated simultaneously (depending on the available memory).\n Default is None so that all given points are evaluated at the same\n time.\n\n Returns\n -------\n y : array_like, shape (n_samples, ) or (n_samples, n_targets)\n An array with shape (n_eval, ) if the Gaussian Process was trained\n on an array of shape (n_samples, ) or an array with shape\n (n_eval, n_targets) if the Gaussian Process was trained on an array\n of shape (n_samples, n_targets) with the Best Linear Unbiased\n Prediction at x.\n\n MSE : array_like, optional (if eval_MSE == True)\n An array with shape (n_eval, ) or (n_eval, n_targets) as with y,\n with the Mean Squared Error at x.\n \"\"\"\n check_is_fitted(self, \"X\")\n\n # Check input shapes\n X = check_array(X)\n n_eval, _ = X.shape\n n_samples, n_features = self.X.shape\n n_samples_y, n_targets = self.y.shape\n\n # Run input checks\n self._check_params(n_samples)\n\n if X.shape[1] != n_features:\n raise ValueError((\"The number of features in X (X.shape[1] = %d) \"\n \"should match the number of features used \"\n \"for fit() \"\n \"which is %d.\") % (X.shape[1], n_features))\n\n if batch_size is None:\n # No memory management\n # (evaluates all given points in a single batch run)\n\n # Normalize input\n X = (X - self.X_mean) / self.X_std\n\n # Initialize output\n y = np.zeros(n_eval)\n if eval_MSE:\n MSE = np.zeros(n_eval)\n\n # Get pairwise componentwise L1-distances to the input training set\n dx = manhattan_distances(X, Y=self.X, sum_over_features=False)\n # Get regression function and correlation\n f = self.regr(X)\n r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)\n\n # Scaled predictor\n y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)\n\n # Predictor\n y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)\n\n if self.y_ndim_ == 1:\n y = y.ravel()\n\n # Mean Squared Error\n if eval_MSE:\n C = self.C\n if C is None:\n # Light storage mode (need to recompute C, F, Ft and G)\n if self.verbose:\n print(\"This GaussianProcess used 'light' storage mode \"\n \"at instantiation. Need to recompute \"\n \"autocorrelation matrix...\")\n reduced_likelihood_function_value, par = \\\n self.reduced_likelihood_function()\n self.C = par['C']\n self.Ft = par['Ft']\n self.G = par['G']\n\n rt = linalg.solve_triangular(self.C, r.T, lower=True)\n\n if self.beta0 is None:\n # Universal Kriging\n u = linalg.solve_triangular(self.G.T,\n np.dot(self.Ft.T, rt) - f.T,\n lower=True)\n else:\n # Ordinary Kriging\n u = np.zeros((n_targets, n_eval))\n\n MSE = np.dot(self.sigma2.reshape(n_targets, 1),\n (1. - (rt ** 2.).sum(axis=0)\n + (u ** 2.).sum(axis=0))[np.newaxis, :])\n MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)\n\n # Mean Squared Error might be slightly negative depending on\n # machine precision: force to zero!\n MSE[MSE < 0.] = 0.\n\n if self.y_ndim_ == 1:\n MSE = MSE.ravel()\n\n return y, MSE\n\n else:\n\n return y\n\n else:\n # Memory management\n\n if type(batch_size) is not int or batch_size <= 0:\n raise Exception(\"batch_size must be a positive integer\")\n\n if eval_MSE:\n\n y, MSE = np.zeros(n_eval), np.zeros(n_eval)\n for k in range(max(1, int(n_eval / batch_size))):\n batch_from = k * batch_size\n batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])\n y[batch_from:batch_to], MSE[batch_from:batch_to] = \\\n self.predict(X[batch_from:batch_to],\n eval_MSE=eval_MSE, batch_size=None)\n\n return y, MSE\n\n else:\n\n y = np.zeros(n_eval)\n for k in range(max(1, int(n_eval / batch_size))):\n batch_from = k * batch_size\n batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])\n y[batch_from:batch_to] = \\\n self.predict(X[batch_from:batch_to],\n eval_MSE=eval_MSE, batch_size=None)\n\n return y\n\n def reduced_likelihood_function(self, theta=None):\n \"\"\"\n This function determines the BLUP parameters and evaluates the reduced\n likelihood function for the given autocorrelation parameters theta.\n\n Maximizing this function wrt the autocorrelation parameters theta is\n equivalent to maximizing the likelihood of the assumed joint Gaussian\n distribution of the observations y evaluated onto the design of\n experiments X.\n\n Parameters\n ----------\n theta : array_like, optional\n An array containing the autocorrelation parameters at which the\n Gaussian Process model parameters should be determined.\n Default uses the built-in autocorrelation parameters\n (ie ``theta = self.theta_``).\n\n Returns\n -------\n reduced_likelihood_function_value : double\n The value of the reduced likelihood function associated to the\n given autocorrelation parameters theta.\n\n par : dict\n A dictionary containing the requested Gaussian Process model\n parameters:\n\n - ``sigma2`` is the Gaussian Process variance.\n - ``beta`` is the generalized least-squares regression weights for\n Universal Kriging or given beta0 for Ordinary Kriging.\n - ``gamma`` is the Gaussian Process weights.\n - ``C`` is the Cholesky decomposition of the correlation\n matrix [R].\n - ``Ft`` is the solution of the linear equation system\n [R] x Ft = F\n - ``G`` is the QR decomposition of the matrix Ft.\n \"\"\"\n check_is_fitted(self, \"X\")\n\n if theta is None:\n # Use built-in autocorrelation parameters\n theta = self.theta_\n\n # Initialize output\n reduced_likelihood_function_value = - np.inf\n par = {}\n\n # Retrieve data\n n_samples = self.X.shape[0]\n D = self.D\n ij = self.ij\n F = self.F\n\n if D is None:\n # Light storage mode (need to recompute D, ij and F)\n D, ij = l1_cross_distances(self.X)\n if (np.min(np.sum(D, axis=1)) == 0.\n and self.corr != correlation.pure_nugget):\n raise Exception(\"Multiple X are not allowed\")\n F = self.regr(self.X)\n\n # Set up R\n r = self.corr(theta, D)\n R = np.eye(n_samples) * (1. + self.nugget)\n R[ij[:, 0], ij[:, 1]] = r\n R[ij[:, 1], ij[:, 0]] = r\n\n # Cholesky decomposition of R\n try:\n C = linalg.cholesky(R, lower=True)\n except linalg.LinAlgError:\n return reduced_likelihood_function_value, par\n\n # Get generalized least squares solution\n Ft = linalg.solve_triangular(C, F, lower=True)\n Q, G = linalg.qr(Ft, mode='economic')\n\n sv = linalg.svd(G, compute_uv=False)\n rcondG = sv[-1] / sv[0]\n if rcondG < 1e-10:\n # Check F\n sv = linalg.svd(F, compute_uv=False)\n condF = sv[0] / sv[-1]\n if condF > 1e15:\n raise Exception(\"F is too ill conditioned. Poor combination \"\n \"of regression model and observations.\")\n else:\n # Ft is too ill conditioned, get out (try different theta)\n return reduced_likelihood_function_value, par\n\n Yt = linalg.solve_triangular(C, self.y, lower=True)\n if self.beta0 is None:\n # Universal Kriging\n beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))\n else:\n # Ordinary Kriging\n beta = np.array(self.beta0)\n\n rho = Yt - np.dot(Ft, beta)\n sigma2 = (rho ** 2.).sum(axis=0) / n_samples\n # The determinant of R is equal to the squared product of the diagonal\n # elements of its Cholesky decomposition C\n detR = (np.diag(C) ** (2. / n_samples)).prod()\n\n # Compute/Organize output\n reduced_likelihood_function_value = - sigma2.sum() * detR\n par['sigma2'] = sigma2 * self.y_std ** 2.\n par['beta'] = beta\n par['gamma'] = linalg.solve_triangular(C.T, rho)\n par['C'] = C\n par['Ft'] = Ft\n par['G'] = G\n\n return reduced_likelihood_function_value, par\n\n def _arg_max_reduced_likelihood_function(self):\n \"\"\"\n This function estimates the autocorrelation parameters theta as the\n maximizer of the reduced likelihood function.\n (Minimization of the opposite reduced likelihood function is used for\n convenience)\n\n Parameters\n ----------\n self : All parameters are stored in the Gaussian Process model object.\n\n Returns\n -------\n optimal_theta : array_like\n The best set of autocorrelation parameters (the sought maximizer of\n the reduced likelihood function).\n\n optimal_reduced_likelihood_function_value : double\n The optimal reduced likelihood function value.\n\n optimal_par : dict\n The BLUP parameters associated to thetaOpt.\n \"\"\"\n\n # Initialize output\n best_optimal_theta = []\n best_optimal_rlf_value = []\n best_optimal_par = []\n\n if self.verbose:\n print(\"The chosen optimizer is: \" + str(self.optimizer))\n if self.random_start > 1:\n print(str(self.random_start) + \" random starts are required.\")\n\n percent_completed = 0.\n\n # Force optimizer to fmin_cobyla if the model is meant to be isotropic\n if self.optimizer == 'Welch' and self.theta0.size == 1:\n self.optimizer = 'fmin_cobyla'\n\n if self.optimizer == 'fmin_cobyla':\n\n def minus_reduced_likelihood_function(log10t):\n return - self.reduced_likelihood_function(\n theta=10. ** log10t)[0]\n\n constraints = []\n for i in range(self.theta0.size):\n constraints.append(lambda log10t, i=i:\n log10t[i] - np.log10(self.thetaL[0, i]))\n constraints.append(lambda log10t, i=i:\n np.log10(self.thetaU[0, i]) - log10t[i])\n\n for k in range(self.random_start):\n\n if k == 0:\n # Use specified starting point as first guess\n theta0 = self.theta0\n else:\n # Generate a random starting point log10-uniformly\n # distributed between bounds\n log10theta0 = (np.log10(self.thetaL)\n + self.random_state.rand(*self.theta0.shape)\n * np.log10(self.thetaU / self.thetaL))\n theta0 = 10. ** log10theta0\n\n # Run Cobyla\n try:\n log10_optimal_theta = \\\n optimize.fmin_cobyla(minus_reduced_likelihood_function,\n np.log10(theta0).ravel(),\n constraints)\n except ValueError as ve:\n print(\"Optimization failed. Try increasing the ``nugget``\")\n raise ve\n\n optimal_theta = 10. ** log10_optimal_theta\n optimal_rlf_value, optimal_par = \\\n self.reduced_likelihood_function(theta=optimal_theta)\n\n # Compare the new optimizer to the best previous one\n if k > 0:\n if optimal_rlf_value > best_optimal_rlf_value:\n best_optimal_rlf_value = optimal_rlf_value\n best_optimal_par = optimal_par\n best_optimal_theta = optimal_theta\n else:\n best_optimal_rlf_value = optimal_rlf_value\n best_optimal_par = optimal_par\n best_optimal_theta = optimal_theta\n if self.verbose and self.random_start > 1:\n if (20 * k) / self.random_start > percent_completed:\n percent_completed = (20 * k) / self.random_start\n print(\"%s completed\" % (5 * percent_completed))\n\n optimal_rlf_value = best_optimal_rlf_value\n optimal_par = best_optimal_par\n optimal_theta = best_optimal_theta\n\n elif self.optimizer == 'Welch':\n\n # Backup of the given attributes\n theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU\n corr = self.corr\n verbose = self.verbose\n\n # This will iterate over fmin_cobyla optimizer\n self.optimizer = 'fmin_cobyla'\n self.verbose = False\n\n # Initialize under isotropy assumption\n if verbose:\n print(\"Initialize under isotropy assumption...\")\n self.theta0 = check_array(self.theta0.min())\n self.thetaL = check_array(self.thetaL.min())\n self.thetaU = check_array(self.thetaU.max())\n theta_iso, optimal_rlf_value_iso, par_iso = \\\n self._arg_max_reduced_likelihood_function()\n optimal_theta = theta_iso + np.zeros(theta0.shape)\n\n # Iterate over all dimensions of theta allowing for anisotropy\n if verbose:\n print(\"Now improving allowing for anisotropy...\")\n for i in self.random_state.permutation(theta0.size):\n if verbose:\n print(\"Proceeding along dimension %d...\" % (i + 1))\n self.theta0 = check_array(theta_iso)\n self.thetaL = check_array(thetaL[0, i])\n self.thetaU = check_array(thetaU[0, i])\n\n def corr_cut(t, d):\n return corr(check_array(np.hstack(\n [\n optimal_theta[0][0:i], t[0],\n optimal_theta[0][(i + 1)::]\n ]\n )), d)\n\n self.corr = corr_cut\n optimal_theta[0, i], optimal_rlf_value, optimal_par = \\\n self._arg_max_reduced_likelihood_function()\n\n # Restore the given attributes\n self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU\n self.corr = corr\n self.optimizer = 'Welch'\n self.verbose = verbose\n\n else:\n\n raise NotImplementedError(\"This optimizer ('%s') is not \"\n \"implemented yet. Please contribute!\"\n % self.optimizer)\n\n return optimal_theta, optimal_rlf_value, optimal_par\n\n def _check_params(self, n_samples=None):\n\n # Check regression model\n if not callable(self.regr):\n if self.regr in self._regression_types:\n self.regr = self._regression_types[self.regr]\n else:\n raise ValueError(\"regr should be one of %s or callable, \"\n \"%s was given.\"\n % (self._regression_types.keys(), self.regr))\n\n # Check regression weights if given (Ordinary Kriging)\n if self.beta0 is not None:\n self.beta0 = np.atleast_2d(self.beta0)\n if self.beta0.shape[1] != 1:\n # Force to column vector\n self.beta0 = self.beta0.T\n\n # Check correlation model\n if not callable(self.corr):\n if self.corr in self._correlation_types:\n self.corr = self._correlation_types[self.corr]\n else:\n raise ValueError(\"corr should be one of %s or callable, \"\n \"%s was given.\"\n % (self._correlation_types.keys(), self.corr))\n\n # Check storage mode\n if self.storage_mode != 'full' and self.storage_mode != 'light':\n raise ValueError(\"Storage mode should either be 'full' or \"\n \"'light', %s was given.\" % self.storage_mode)\n\n # Check correlation parameters\n self.theta0 = np.atleast_2d(self.theta0)\n lth = self.theta0.size\n\n if self.thetaL is not None and self.thetaU is not None:\n self.thetaL = np.atleast_2d(self.thetaL)\n self.thetaU = np.atleast_2d(self.thetaU)\n if self.thetaL.size != lth or self.thetaU.size != lth:\n raise ValueError(\"theta0, thetaL and thetaU must have the \"\n \"same length.\")\n if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):\n raise ValueError(\"The bounds must satisfy O < thetaL <= \"\n \"thetaU.\")\n\n elif self.thetaL is None and self.thetaU is None:\n if np.any(self.theta0 <= 0):\n raise ValueError(\"theta0 must be strictly positive.\")\n\n elif self.thetaL is None or self.thetaU is None:\n raise ValueError(\"thetaL and thetaU should either be both or \"\n \"neither specified.\")\n\n # Force verbose type to bool\n self.verbose = bool(self.verbose)\n\n # Force normalize type to bool\n self.normalize = bool(self.normalize)\n\n # Check nugget value\n self.nugget = np.asarray(self.nugget)\n if np.any(self.nugget) < 0.:\n raise ValueError(\"nugget must be positive or zero.\")\n if (n_samples is not None\n and self.nugget.shape not in [(), (n_samples,)]):\n raise ValueError(\"nugget must be either a scalar \"\n \"or array of length n_samples.\")\n\n # Check optimizer\n if self.optimizer not in self._optimizer_types:\n raise ValueError(\"optimizer should be one of %s\"\n % self._optimizer_types)\n\n # Force random_start type to int\n self.random_start = int(self.random_start)"
},
{
"identifier": "UtilityFunction",
"path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py",
"snippet": "class UtilityFunction(object):\n \"\"\"\n An object to compute the acquisition functions.\n \"\"\"\n\n def __init__(self, kind, kappa, xi):\n \"\"\"\n If UCB is to be used, a constant kappa is needed.\n \"\"\"\n self.kappa = kappa\n self.xi = xi\n if kind not in ['ucb', 'ei', 'poi']:\n err = \"The utility function \" \\\n \"{} has not been implemented, \" \\\n \"please choose one of ucb, ei, or poi.\".format(kind)\n raise NotImplementedError(err)\n else:\n self.kind = kind\n\n def utility(self, x, gp, y_max):\n if self.kind == 'ucb':\n return self._ucb(x, gp, self.kappa)\n if self.kind == 'ei':\n return self._ei(x, gp, y_max, self.xi)\n if self.kind == 'poi':\n return self._poi(x, gp, y_max, self.xi)\n\n @staticmethod\n def _ucb(x, gp, kappa):\n mean, var = gp.predict(x, eval_MSE=True)\n return mean + kappa * np.sqrt(var)\n\n @staticmethod\n def _ei(x, gp, y_max, xi):\n mean, var = gp.predict(x, eval_MSE=True)\n\n # Avoid points with zero variance\n var = np.maximum(var, 1e-9 + 0 * var)\n\n z = (mean - y_max - xi) / np.sqrt(var)\n return (mean - y_max - xi) * norm.cdf(z) + np.sqrt(var) * norm.pdf(z)\n\n @staticmethod\n def _poi(x, gp, y_max, xi):\n mean, var = gp.predict(x, eval_MSE=True)\n\n # Avoid points with zero variance\n var = np.maximum(var, 1e-9 + 0 * var)\n\n z = (mean - y_max - xi) / np.sqrt(var)\n return norm.cdf(z)"
},
{
"identifier": "unique_rows",
"path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py",
"snippet": "def unique_rows(a):\n \"\"\"\n A functions to trim repeated rows that may appear when optimizing.\n This is necessary to avoid the sklearn GP object from breaking\n\n :param a: array to trim repeated rows from\n\n :return: mask of unique rows\n \"\"\"\n\n # Sort array and kep track of where things should go back to\n order = np.lexsort(a.T)\n reorder = np.argsort(order)\n\n a = a[order]\n diff = np.diff(a, axis=0)\n ui = np.ones(len(a), 'bool')\n ui[1:] = (diff != 0).any(axis=1)\n\n return ui[reorder]"
},
{
"identifier": "PrintLog",
"path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py",
"snippet": "class PrintLog(object):\n\n def __init__(self, params):\n\n self.ymax = None\n self.xmax = None\n self.params = params\n self.ite = 1\n\n self.start_time = datetime.now()\n self.last_round = datetime.now()\n\n # sizes of parameters name and all\n self.sizes = [max(len(ps), 7) for ps in params]\n\n # Sorted indexes to access parameters\n self.sorti = sorted(range(len(self.params)),\n key=self.params.__getitem__)\n\n def reset_timer(self):\n self.start_time = datetime.now()\n self.last_round = datetime.now()\n\n def print_header(self, initialization=True):\n\n if initialization:\n print(\"{}Initialization{}\".format(BColours.RED,\n BColours.ENDC))\n else:\n print(\"{}Bayesian Optimization{}\".format(BColours.RED,\n BColours.ENDC))\n\n print(BColours.BLUE + \"-\" * (29 + sum([s + 5 for s in self.sizes]))\n + BColours.ENDC)\n\n print(\"{0:>{1}}\".format(\"Step\", 5), end=\" | \")\n print(\"{0:>{1}}\".format(\"Time\", 6), end=\" | \")\n print(\"{0:>{1}}\".format(\"Value\", 10), end=\" | \")\n\n for index in self.sorti:\n print(\"{0:>{1}}\".format(self.params[index],\n self.sizes[index] + 2),\n end=\" | \")\n print('')\n\n def print_step(self, x, y, warning=False):\n\n print(\"{:>5d}\".format(self.ite), end=\" | \")\n\n m, s = divmod((datetime.now() - self.last_round).total_seconds(), 60)\n print(\"{:>02d}m{:>02d}s\".format(int(m), int(s)), end=\" | \")\n\n if self.ymax is None or self.ymax < y:\n self.ymax = y\n self.xmax = x\n print(\"{0}{2: >10.5f}{1}\".format(BColours.MAGENTA,\n BColours.ENDC,\n y),\n end=\" | \")\n\n for index in self.sorti:\n print(\"{0}{2: >{3}.{4}f}{1}\".format(BColours.GREEN,\n BColours.ENDC,\n x[index],\n self.sizes[index] + 2,\n min(self.sizes[index] - 3,\n 6 - 2)),\n end=\" | \")\n else:\n print(\"{: >10.5f}\".format(y), end=\" | \")\n for index in self.sorti:\n print(\"{0: >{1}.{2}f}\".format(x[index],\n self.sizes[index] + 2,\n min(self.sizes[index] - 3,\n 6 - 2)),\n end=\" | \")\n\n if warning:\n print(\"{}Warning: Test point chose at \"\n \"random due to repeated sample.{}\".format(BColours.RED,\n BColours.ENDC))\n\n print()\n\n self.last_round = datetime.now()\n self.ite += 1\n\n def print_summary(self):\n pass"
}
] | import numpy as np
from .helpers import GaussianProcess
from scipy.optimize import minimize
from .helpers import UtilityFunction, unique_rows, PrintLog | 10,755 | res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_try.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
# Store it if better than previous minimum(maximum).
if max_acq is None or -res.fun >= max_acq:
x_max = res.x
max_acq = -res.fun
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
def matern52(theta, d):
"""
Matern 5/2 correlation model.::
theta, d --> r(theta, d) = (1+sqrt(5)*r + 5/3*r^2)*exp(-sqrt(5)*r) n
where r = sqrt(sum (d_i)^2 / (theta_i)^2 ) i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation modle.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
r = np.sqrt(np.sum(d ** 2, axis=1)) / theta[0]
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
r = np.sqrt(np.sum(d ** 2 / theta.reshape(1, n_features) ** 2, axis=1))
return (1 + np.sqrt(5) * r + 5 / 3. * r ** 2) * np.exp(-np.sqrt(5) * r)
class BayesianOptimization(object):
def __init__(self, f, pbounds, verbose=1):
"""
:param f:
Function to be maximized.
:param pbounds:
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
:param verbose:
Whether or not to print progress.
"""
# Store the original dictionary
self.pbounds = pbounds
# Get the name of the parameters
self.keys = list(pbounds.keys())
# Find number of parameters
self.dim = len(pbounds)
# Create an array with parameters bounds
self.bounds = []
for key in self.pbounds.keys():
self.bounds.append(self.pbounds[key])
self.bounds = np.asarray(self.bounds)
# Some function to be optimized
self.f = f
# Initialization flag
self.initialized = False
# Initialization lists --- stores starting points before process begins
self.init_points = []
self.x_init = []
self.y_init = []
# Numpy array place holders
self.X = None
self.Y = None
# Counter of iterations
self.i = 0
# Since scipy 0.16 passing lower and upper bound to theta seems to be
# broken. However, there is a lot of development going on around GP
# is scikit-learn. So I'll pick the easy route here and simple specify
# only theta0.
self.gp = GaussianProcess(corr=matern52,
theta0=np.random.uniform(0.001, 0.05,
self.dim),
thetaL=1e-5 * np.ones(self.dim),
thetaU=1e0 * np.ones(self.dim),
random_start=30)
# Utility Function placeholder
self.util = None
# PrintLog object
| """
BAYESIAN OPTIMIZATION MODULE - Version 0.1.0
Created by Fernando Nogueira (fmfn). Available in
- https://github.com/fmfn/BayesianOptimization
"""
__author__ = 'fmfn'
def acq_max(ac, gp, y_max, bounds):
"""
A function to find the maximum of the acquisition function using
the 'L-BFGS-B' method.
Parameters
----------
:param ac:
The acquisition function object that return its point-wise value.
:param gp:
A gaussian process fitted to the relevant data.
:param y_max:
The current maximum known value of the target function.
:param bounds:
The variables bounds to limit the search of the acq max.
Returns
-------
:return: x_max, The arg max of the acquisition function.
"""
# Start with the lower bound as the argmax
x_max = bounds[:, 0]
max_acq = None
x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1],
size=(100, bounds.shape[0]))
for x_try in x_tries:
# Find the minimum of minus the acquisition function
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_try.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
# Store it if better than previous minimum(maximum).
if max_acq is None or -res.fun >= max_acq:
x_max = res.x
max_acq = -res.fun
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
def matern52(theta, d):
"""
Matern 5/2 correlation model.::
theta, d --> r(theta, d) = (1+sqrt(5)*r + 5/3*r^2)*exp(-sqrt(5)*r) n
where r = sqrt(sum (d_i)^2 / (theta_i)^2 ) i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation modle.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
r = np.sqrt(np.sum(d ** 2, axis=1)) / theta[0]
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
r = np.sqrt(np.sum(d ** 2 / theta.reshape(1, n_features) ** 2, axis=1))
return (1 + np.sqrt(5) * r + 5 / 3. * r ** 2) * np.exp(-np.sqrt(5) * r)
class BayesianOptimization(object):
def __init__(self, f, pbounds, verbose=1):
"""
:param f:
Function to be maximized.
:param pbounds:
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
:param verbose:
Whether or not to print progress.
"""
# Store the original dictionary
self.pbounds = pbounds
# Get the name of the parameters
self.keys = list(pbounds.keys())
# Find number of parameters
self.dim = len(pbounds)
# Create an array with parameters bounds
self.bounds = []
for key in self.pbounds.keys():
self.bounds.append(self.pbounds[key])
self.bounds = np.asarray(self.bounds)
# Some function to be optimized
self.f = f
# Initialization flag
self.initialized = False
# Initialization lists --- stores starting points before process begins
self.init_points = []
self.x_init = []
self.y_init = []
# Numpy array place holders
self.X = None
self.Y = None
# Counter of iterations
self.i = 0
# Since scipy 0.16 passing lower and upper bound to theta seems to be
# broken. However, there is a lot of development going on around GP
# is scikit-learn. So I'll pick the easy route here and simple specify
# only theta0.
self.gp = GaussianProcess(corr=matern52,
theta0=np.random.uniform(0.001, 0.05,
self.dim),
thetaL=1e-5 * np.ones(self.dim),
thetaU=1e0 * np.ones(self.dim),
random_start=30)
# Utility Function placeholder
self.util = None
# PrintLog object | self.plog = PrintLog(self.keys) | 3 | 2023-11-17 09:23:38+00:00 | 12k |
PlaxtonFlarion/NexaFlow | nexaflow/cutter/cutter.py | [
{
"identifier": "toolbox",
"path": "nexaflow/toolbox.py",
"snippet": "def video_capture(video_path: str):\ndef video_jump(video_cap: cv2.VideoCapture, frame_id: int):\ndef compare_ssim(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef multi_compare_ssim(\n pic1_list: typing.List, pic2_list: typing.List, hooks: typing.List = None\n) -> typing.List[float]:\ndef get_current_frame_id(video_cap: cv2.VideoCapture) -> int:\ndef get_current_frame_time(video_cap: cv2.VideoCapture) -> float:\ndef imread(img_path: str, *_, **__) -> np.ndarray:\ndef get_frame_time(\n video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None\n) -> float:\ndef get_frame_count(video_cap: cv2.VideoCapture) -> int:\ndef get_frame_size(video_cap: cv2.VideoCapture) -> typing.Tuple[int, int]:\ndef get_frame(\n video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None\n) -> np.ndarray:\ndef turn_grey(old: np.ndarray) -> np.ndarray:\ndef turn_binary(old: np.ndarray) -> np.ndarray:\ndef turn_hog_desc(old: np.ndarray) -> np.ndarray:\ndef turn_lbp_desc(old: np.ndarray, radius: int = None) -> np.ndarray:\ndef turn_blur(old: np.ndarray) -> np.ndarray:\ndef sharpen_frame(old: np.ndarray) -> np.ndarray:\ndef calc_mse(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef calc_psnr(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef compress_frame(\n old: np.ndarray,\n compress_rate: float = None,\n target_size: typing.Tuple[int, int] = None,\n not_grey: bool = None,\n interpolation: int = None,\n *_,\n **__,\n) -> np.ndarray:\ndef get_timestamp_str() -> str:\ndef np2b64str(frame: np.ndarray) -> str:\ndef fps_convert(\n target_fps: int, source_path: str, target_path: str, ffmpeg_exe: str = None\n) -> int:\ndef match_template_with_object(\n template: np.ndarray,\n target: np.ndarray,\n engine_template_cv_method_name: str = None,\n **kwargs,\n) -> typing.Dict[str, typing.Any]:\ndef match_template_with_path(\n template: str, target: np.ndarray, **kwargs\n) -> typing.Dict[str, typing.Any]:\ndef show_progress(total: int, color: int, title: str) -> tqdm:\ndef draw_line(image_path: str, save_path: str = None):"
},
{
"identifier": "VideoCutRange",
"path": "nexaflow/cutter/cut_range.py",
"snippet": "class VideoCutRange(object):\n\n def __init__(\n self,\n video: typing.Union[VideoObject, typing.Dict],\n start: int,\n end: int,\n ssim: typing.List[float],\n mse: typing.List[float],\n psnr: typing.List[float],\n start_time: float,\n end_time: float,\n ):\n if isinstance(video, dict):\n self.video = VideoObject(**video)\n else:\n self.video = video\n\n self.start = start\n self.end = end\n self.ssim = ssim\n self.mse = mse\n self.psnr = psnr\n self.start_time = start_time\n self.end_time = end_time\n\n if start > end:\n self.start, self.end = self.end, self.start\n self.start_time, self.end_time = self.end_time, self.start_time\n\n # logger.debug(\n # f\"new a range: {self.start}({self.start_time}) - {self.end}({self.end_time})\"\n # )\n\n def can_merge(self, another: \"VideoCutRange\", offset: int = None, **_):\n if not offset:\n is_continuous = self.end == another.start\n else:\n is_continuous = self.end + offset >= another.start\n return is_continuous and self.video.path == another.video.path\n\n def merge(self, another: \"VideoCutRange\", **kwargs) -> \"VideoCutRange\":\n assert self.can_merge(another, **kwargs)\n return __class__(\n self.video,\n self.start,\n another.end,\n self.ssim + another.ssim,\n self.mse + another.mse,\n self.psnr + another.psnr,\n self.start_time,\n another.end_time,\n )\n\n def contain(self, frame_id: int) -> bool:\n return frame_id in range(self.start, self.end + 1)\n\n contain_frame_id = contain\n\n def contain_image(\n self, image_path: str = None, image_object: np.ndarray = None, *args, **kwargs\n ) -> typing.Dict[str, typing.Any]:\n target_id = self.pick(*args, **kwargs)[0]\n operator = self.video.get_operator()\n frame = operator.get_frame_by_id(target_id)\n return frame.contain_image(\n image_path=image_path, image_object=image_object, **kwargs\n )\n\n def pick(\n self, frame_count: int = None, is_random: bool = None, *_, **__\n ) -> typing.List[int]:\n if not frame_count:\n frame_count = 3\n logger.debug(\n f\"pick {frame_count} frames \"\n f\"from {self.start}({self.start_time}) \"\n f\"to {self.end}({self.end_time}) \"\n f\"on video {self.video.path}\"\n )\n\n result = list()\n if is_random:\n return random.sample(range(self.start, self.end), frame_count)\n length = self.get_length()\n\n frame_count += 1\n for _ in range(1, frame_count):\n cur = int(self.start + length / frame_count * _)\n result.append(cur)\n return result\n\n def get_frames(\n self, frame_id_list: typing.List[int], *_, **__\n ) -> typing.List[VideoFrame]:\n\n out = list()\n operator = self.video.get_operator()\n for each_id in frame_id_list:\n frame = operator.get_frame_by_id(each_id)\n out.append(frame)\n return out\n\n def pick_and_get(self, *args, **kwargs) -> typing.List[VideoFrame]:\n picked = self.pick(*args, **kwargs)\n return self.get_frames(picked, *args, **kwargs)\n\n def get_length(self):\n return self.end - self.start + 1\n\n def is_stable(\n self, threshold: float = None, psnr_threshold: float = None, **_\n ) -> bool:\n\n if not threshold:\n threshold = constants.DEFAULT_THRESHOLD\n\n res = np.mean(self.ssim) > threshold\n if res and psnr_threshold:\n res = np.mean(self.psnr) > psnr_threshold\n\n return res\n\n def is_loop(self, threshold: float = None, **_) -> bool:\n if not threshold:\n threshold = constants.DEFAULT_THRESHOLD\n operator = self.video.get_operator()\n start_frame = operator.get_frame_by_id(self.start)\n end_frame = operator.get_frame_by_id(self.end)\n return toolbox.compare_ssim(start_frame.data, end_frame.data) > threshold\n\n def diff(\n self,\n another: \"VideoCutRange\",\n pre_hooks: typing.List[BaseHook],\n *args,\n **kwargs,\n ) -> typing.List[float]:\n self_picked = self.pick_and_get(*args, **kwargs)\n another_picked = another.pick_and_get(*args, **kwargs)\n return toolbox.multi_compare_ssim(self_picked, another_picked, pre_hooks)\n\n def __str__(self):\n return f\"<VideoCutRange [{self.start}({self.start_time})-{self.end}({self.end_time})] ssim={self.ssim}>\"\n\n __repr__ = __str__"
},
{
"identifier": "VideoCutResult",
"path": "nexaflow/cutter/cut_result.py",
"snippet": "class VideoCutResult(object):\n\n def __init__(\n self,\n video: VideoObject,\n range_list: typing.List[VideoCutRange],\n cut_kwargs: typing.Dict = None,\n ):\n self.video = video\n self.range_list = range_list\n self.cut_kwargs = cut_kwargs or {}\n\n def get_target_range_by_id(self, frame_id: int) -> VideoCutRange:\n for each in self.range_list:\n if each.contain(frame_id):\n return each\n raise RuntimeError(f\"frame {frame_id} not found in video\")\n\n @staticmethod\n def _length_filter(\n range_list: typing.List[VideoCutRange], limit: int\n ) -> typing.List[VideoCutRange]:\n after = list()\n for each in range_list:\n if each.get_length() >= limit:\n after.append(each)\n return after\n\n def get_unstable_range(\n self, limit: int = None, range_threshold: float = None, **kwargs\n ) -> typing.List[VideoCutRange]:\n\n change_range_list = sorted(\n [i for i in self.range_list if not i.is_stable(**kwargs)],\n key=lambda x: x.start,\n )\n\n if len(change_range_list) <= 1:\n return change_range_list\n\n i = 0\n merged_change_range_list = list()\n while i < len(change_range_list) - 1:\n cur = change_range_list[i]\n while cur.can_merge(change_range_list[i + 1], **kwargs):\n i += 1\n cur = cur.merge(change_range_list[i], **kwargs)\n if i + 1 >= len(change_range_list):\n break\n merged_change_range_list.append(cur)\n i += 1\n if change_range_list[-1].start > merged_change_range_list[-1].end:\n merged_change_range_list.append(change_range_list[-1])\n\n if limit:\n merged_change_range_list = self._length_filter(\n merged_change_range_list, limit\n )\n\n if range_threshold:\n merged_change_range_list = [\n i for i in merged_change_range_list if not i.is_loop(range_threshold)\n ]\n # logger.debug(\n # f\"unstable range of [{self.video.path}]: {merged_change_range_list}\"\n # )\n return merged_change_range_list\n\n def get_range(\n self, limit: int = None, unstable_limit: int = None, **kwargs\n ) -> typing.Tuple[typing.List[VideoCutRange], typing.List[VideoCutRange]]:\n\n unstable_range_list = self.get_unstable_range(unstable_limit, **kwargs)\n\n video_start_frame_id = 1\n video_start_timestamp = 0.0\n\n video_end_frame_id = self.range_list[-1].end\n video_end_timestamp = self.range_list[-1].end_time\n\n _default = {\n \"ssim\": [1.0],\n \"mse\": [0.0],\n \"psnr\": [0.0],\n }\n\n if len(unstable_range_list) == 0:\n # logger.warning(\n # \"no unstable stage detected, seems nothing happened in your video\"\n # )\n logger.warning(\"你的视频看上去是静止的 ...\")\n return (\n # stable\n [\n VideoCutRange(\n video=self.video,\n start=video_start_frame_id,\n end=video_end_frame_id,\n start_time=video_start_timestamp,\n end_time=video_end_timestamp,\n **_default,\n )\n ],\n # unstable\n [],\n )\n\n first_stable_range_end_id = unstable_range_list[0].start - 1\n end_stable_range_start_id = unstable_range_list[-1].end + 1\n\n # IMPORTANT: len(ssim_list) + 1 == video_end_frame_id\n range_list: typing.List[VideoCutRange] = list()\n # stable start\n if first_stable_range_end_id >= 1:\n # logger.debug(f\"stable start\")\n logger.info(\"稳定阶段开始 ...\")\n range_list.append(\n VideoCutRange(\n video=self.video,\n start=video_start_frame_id,\n end=first_stable_range_end_id,\n start_time=video_start_timestamp,\n end_time=self.get_target_range_by_id(\n first_stable_range_end_id\n ).end_time,\n **_default,\n )\n )\n # unstable start\n else:\n # logger.debug(\"unstable start\")\n logger.info(\"不稳定阶段开始 ...\")\n\n # stable end\n if end_stable_range_start_id <= video_end_frame_id:\n # logger.debug(\"stable end\")\n logger.info(\"稳定阶段结束 ...\")\n range_list.append(\n VideoCutRange(\n video=self.video,\n start=end_stable_range_start_id,\n end=video_end_frame_id,\n start_time=self.get_target_range_by_id(\n end_stable_range_start_id\n ).end_time,\n end_time=video_end_timestamp,\n **_default,\n )\n )\n # unstable end\n else:\n # logger.debug(\"unstable end\")\n logger.info(\"不稳定阶段结束 ...\")\n\n for i in range(len(unstable_range_list) - 1):\n range_start_id = unstable_range_list[i].end + 1\n range_end_id = unstable_range_list[i + 1].start - 1\n\n if range_start_id > range_end_id:\n range_start_id, range_end_id = range_end_id, range_start_id\n\n range_list.append(\n VideoCutRange(\n video=self.video,\n start=range_start_id,\n end=range_end_id,\n start_time=self.get_target_range_by_id(range_start_id).start_time,\n end_time=self.get_target_range_by_id(range_end_id).start_time,\n **_default,\n )\n )\n\n if limit:\n range_list = self._length_filter(range_list, limit)\n # logger.debug(f\"stable range of [{self.video.path}]: {range_list}\")\n stable_range_list = sorted(range_list, key=lambda x: x.start)\n return stable_range_list, unstable_range_list\n\n def get_stable_range(\n self, limit: int = None, **kwargs\n ) -> typing.List[VideoCutRange]:\n\n return self.get_range(limit, **kwargs)[0]\n\n def get_range_dynamic(\n self,\n stable_num_limit: typing.List[int],\n threshold: float,\n step: float = 0.005,\n max_retry: int = 10,\n **kwargs,\n ) -> typing.Tuple[typing.List[VideoCutRange], typing.List[VideoCutRange]]:\n\n assert max_retry != 0, f\"fail to get range dynamically: {stable_num_limit}\"\n assert len(stable_num_limit) == 2, \"num_limit should be something like [1, 3]\"\n assert 0.0 < threshold < 1.0, \"threshold out of range\"\n\n stable, unstable = self.get_range(threshold=threshold, **kwargs)\n cur_num = len(stable)\n logger.debug(f\"current stable range is {cur_num}\")\n if stable_num_limit[0] <= cur_num <= stable_num_limit[1]:\n logger.debug(f\"range num is fine\")\n return stable, unstable\n\n if cur_num < stable_num_limit[0]:\n logger.debug(\"too fewer stages\")\n threshold += step\n\n elif cur_num > stable_num_limit[1]:\n logger.debug(\"too many stages\")\n threshold -= step\n\n return self.get_range_dynamic(\n stable_num_limit, threshold=threshold, max_retry=max_retry - 1, **kwargs\n )\n\n def thumbnail(\n self,\n target_range: VideoCutRange,\n to_dir: str = None,\n compress_rate: float = None,\n is_vertical: bool = None,\n *_,\n **__,\n ) -> np.ndarray:\n\n if not compress_rate:\n compress_rate = 0.1\n\n if is_vertical:\n stack_func = np.vstack\n\n def get_split_line(f):\n return np.zeros((5, f.shape[1]))\n\n else:\n stack_func = np.hstack\n\n def get_split_line(f):\n return np.zeros((f.shape[0], 5))\n\n frame_list = list()\n with toolbox.video_capture(self.video.path) as cap:\n toolbox.video_jump(cap, target_range.start)\n ret, frame = cap.read()\n count = 1\n length = target_range.get_length()\n while ret and count <= length:\n frame = toolbox.compress_frame(frame, compress_rate)\n frame_list.append(frame)\n frame_list.append(get_split_line(frame))\n ret, frame = cap.read()\n count += 1\n merged = stack_func(frame_list)\n\n if to_dir:\n target_path = os.path.join(\n to_dir, f\"thumbnail_{target_range.start}-{target_range.end}.png\"\n )\n cv2.imwrite(target_path, merged)\n logger.debug(f\"save thumbnail to {target_path}\")\n return merged\n\n def pick_and_save(\n self,\n range_list: typing.List[VideoCutRange],\n frame_count: int,\n to_dir: str = None,\n prune: float = None,\n meaningful_name: bool = None,\n # in kwargs\n # compress_rate: float = None,\n # target_size: typing.Tuple[int, int] = None,\n # to_grey: bool = None,\n *args,\n **kwargs,\n ) -> str:\n\n stage_list = list()\n for index, each_range in enumerate(range_list):\n picked = each_range.pick(frame_count, *args, **kwargs)\n picked_frames = each_range.get_frames(picked)\n logger.info(f\"pick {picked} in range {each_range}\")\n stage_list.append((str(index), picked_frames))\n\n if prune:\n stage_list = self._prune(prune, stage_list)\n\n if not to_dir:\n to_dir = toolbox.get_timestamp_str()\n # logger.debug(f\"try to make dirs: {to_dir}\")\n os.makedirs(to_dir, exist_ok=True)\n\n for each_stage_id, each_frame_list in stage_list:\n each_stage_dir = os.path.join(to_dir, str(each_stage_id))\n\n if os.path.isdir(each_stage_dir):\n logger.warning(f\"sub dir [{each_stage_dir}] already existed\")\n logger.warning(\n \"NOTICE: make sure your data will not be polluted by accident\"\n )\n os.makedirs(each_stage_dir, exist_ok=True)\n\n for each_frame_object in each_frame_list:\n if meaningful_name:\n image_name = (\n f\"{os.path.basename(os.path.splitext(self.video.path)[0])}\"\n f\"_\"\n f\"{each_frame_object.frame_id}\"\n f\"_\"\n f\"{each_frame_object.timestamp}\"\n f\".png\"\n )\n else:\n image_name = f\"{uuid.uuid4()}.png\"\n\n each_frame_path = os.path.join(each_stage_dir, image_name)\n compressed = toolbox.compress_frame(each_frame_object.data, **kwargs)\n cv2.imwrite(each_frame_path, compressed)\n # logger.debug(\n # f\"frame [{each_frame_object.frame_id}] saved to {each_frame_path}\"\n # )\n\n return to_dir\n\n @staticmethod\n def _prune(\n threshold: float,\n stages: typing.List[typing.Tuple[str, typing.List[VideoFrame]]],\n ) -> typing.List[typing.Tuple[str, typing.List[VideoFrame]]]:\n logger.debug(\n f\"start pruning ranges, origin length is {len(stages)}, threshold is {threshold}\"\n )\n after = list()\n for i in range(len(stages)):\n index, frames = stages[i]\n for j in range(i + 1, len(stages)):\n next_index, next_frames = stages[j]\n ssim_list = toolbox.multi_compare_ssim(frames, next_frames)\n min_ssim = min(ssim_list)\n logger.debug(f\"compare {index} with {next_index}: {ssim_list}\")\n if min_ssim > threshold:\n logger.debug(f\"stage {index} has been pruned\")\n break\n else:\n after.append(stages[i])\n return after\n\n def dumps(self) -> str:\n\n def _handler(obj: object):\n if isinstance(obj, np.ndarray):\n return \"<np.ndarray object>\"\n return obj.__dict__\n\n return json.dumps(self, sort_keys=True, default=_handler)\n\n def dump(self, json_path: str, **kwargs):\n logger.debug(f\"dump result to {json_path}\")\n assert not os.path.exists(json_path), f\"{json_path} already existed\"\n with open(json_path, \"w+\", **kwargs) as f:\n f.write(self.dumps())\n\n @classmethod\n def loads(cls, content: str) -> \"VideoCutResult\":\n json_dict: dict = json.loads(content)\n return cls(\n VideoObject(**json_dict[\"video\"]),\n [VideoCutRange(**each) for each in json_dict[\"range_list\"]],\n )\n\n @classmethod\n def load(cls, json_path: str, **kwargs) -> \"VideoCutResult\":\n logger.debug(f\"load result from {json_path}\")\n with open(json_path, **kwargs) as f:\n return cls.loads(f.read())\n\n def diff(\n self,\n another: \"VideoCutResult\",\n auto_merge: bool = None,\n pre_hooks: typing.List[BaseHook] = None,\n output_path: str = None,\n *args,\n **kwargs,\n ) -> \"VideoCutResultDiff\":\n\n self_stable, _ = self.get_range(*args, **kwargs)\n another_stable, _ = another.get_range(*args, **kwargs)\n self.pick_and_save(self_stable, 3, to_dir=output_path)\n another.pick_and_save(another_stable, 3, to_dir=output_path)\n\n result = VideoCutResultDiff(self_stable, another_stable)\n result.apply_diff(pre_hooks)\n\n if auto_merge:\n after = dict()\n for self_stage_name, each_result in result.data.items():\n max_one = sorted(each_result.items(), key=lambda x: max(x[1]))[-1]\n max_one = (max_one[0], max(max_one[1]))\n after[self_stage_name] = max_one\n result.data = after\n return result\n\n @staticmethod\n def range_diff(\n range_list_1: typing.List[VideoCutRange],\n range_list_2: typing.List[VideoCutRange],\n *args,\n **kwargs,\n ) -> typing.Dict[int, typing.Dict[int, typing.List[float]]]:\n\n self_stable_range_count = len(range_list_1)\n another_stable_range_count = len(range_list_2)\n if self_stable_range_count != another_stable_range_count:\n logger.warning(\n f\"stage counts not equal: {self_stable_range_count} & {another_stable_range_count}\"\n )\n\n data = dict()\n for self_id, each_self_range in enumerate(range_list_1):\n temp = dict()\n for another_id, another_self_range in enumerate(range_list_2):\n temp[another_id] = each_self_range.diff(\n another_self_range, *args, **kwargs\n )\n data[self_id] = temp\n return data"
},
{
"identifier": "VideoObject",
"path": "nexaflow/video.py",
"snippet": "class VideoObject(object):\n\n def __init__(\n self,\n path: typing.Union[str, os.PathLike],\n fps: int = None,\n ):\n \"\"\"\n 初始化,检查文件路径是否有效,执行其他一些初始化操作\n \"\"\"\n assert os.path.isfile(path), f\"video {path} not existed\"\n self.path: str = str(path)\n self.grey_data: typing.Optional[typing.Tuple[\"VideoFrame\"]] = tuple() # 灰度帧\n self.hued_data: typing.Optional[typing.Tuple[\"ColorFrame\"]] = tuple() # 彩色帧\n\n if fps:\n video_path = os.path.join(tempfile.mkdtemp(), f\"tmp_{fps}.mp4\")\n logger.debug(f\"convert video, and bind path to {video_path}\")\n logger.info(f\"转换视频: {video_path}\")\n toolbox.fps_convert(\n fps, self.path, video_path, imageio_ffmpeg.get_ffmpeg_exe()\n )\n self.path = video_path\n\n with toolbox.video_capture(self.path) as cap:\n self.frame_count = toolbox.get_frame_count(cap)\n self.frame_size = toolbox.get_frame_size(cap)\n\n logger.info(f\"视频已生成,视频帧长度: {self.frame_count} 分辨率: {self.frame_size}\")\n\n def __str__(self):\n return f\"<VideoObject path={self.path}>\"\n\n __repr__ = __str__\n\n def sync_timestamp(self, frame_data: tuple[VideoFrame]) -> None:\n assert frame_data, \"load_frames() first\"\n vid = mpy.VideoFileClip(self.path)\n\n vid_count = vid.reader.nframes\n pbar = toolbox.show_progress(vid_count, 153, \"Synzer\")\n for frame_id, (timestamp, _) in enumerate(vid.iter_frames(with_times=True)):\n if frame_id >= len(frame_data):\n break\n # frame_id_real = frame_id + 1\n if not frame_data[frame_id].timestamp:\n # logger.debug(f\"fix frame {frame_id_real}'s timestamp: {timestamp}\")\n frame_data[frame_id].timestamp = timestamp\n pbar.update(1)\n pbar.close()\n\n def sync_backstage(self, frame_data: tuple[ColorFrame]) -> None:\n assert frame_data, \"load_frames() first\"\n vid = mpy.VideoFileClip(self.path)\n\n for frame_id, (timestamp, _) in enumerate(vid.iter_frames(with_times=True)):\n if frame_id >= len(frame_data):\n break\n # frame_id_real = frame_id + 1\n if not frame_data[frame_id].timestamp:\n # logger.debug(f\"fix frame {frame_id_real}'s timestamp: {timestamp}\")\n frame_data[frame_id].timestamp = timestamp\n\n def clean_frames(self):\n \"\"\"\n 清除所有帧数据\n \"\"\"\n self.grey_data = tuple()\n self.hued_data = tuple()\n\n @staticmethod\n def frame_details(frame_type):\n each_cost = frame_type[0].data.nbytes / (1024 ** 2)\n total_cost = each_cost * len(frame_type)\n frame_size = frame_type[0].data.shape[::-1]\n return f\"{frame_type[0].__class__.__name__}: [{each_cost:.2f} MB] [{total_cost:.2f} MB] {frame_size}\"\n\n def load_frames(self, color: bool = False):\n \"\"\"\n 从文件中加载所有帧到内存\n \"\"\"\n logger.info(f\"加载视频帧到内存: {os.path.basename(self.path)}\")\n\n def load_stream(frames: type[VideoFrame]):\n pbar = toolbox.show_progress(self.frame_count, 180, \"Loader\")\n data: list[VideoFrame] = []\n with toolbox.video_capture(self.path) as cap:\n for success, frame in iter(lambda: cap.read(), (False, None)):\n if success:\n data.append(frames.initial(cap, frame))\n pbar.update(1)\n pbar.close()\n return data\n\n def back_ground(frames: type[ColorFrame]):\n data: list[ColorFrame] = []\n with toolbox.video_capture(self.path) as cap:\n for success, frame in iter(lambda: cap.read(), (False, None)):\n if success:\n data.append(frames.initial(cap, frame))\n return data\n\n def load_stream_sync(brand):\n self.sync_timestamp(tuple(frame_data := load_stream(brand)))\n return frame_data\n\n def back_ground_sync(brand):\n self.sync_backstage(tuple(frame_data := back_ground(brand)))\n return frame_data\n\n start_time, task, hued = time.time(), None, None\n if color:\n task = ThreadPoolExecutor()\n hued = task.submit(back_ground_sync, ColorFrame)\n\n grey = load_stream_sync(VideoFrame)\n self.grey_data = tuple(grey)\n logger.info(f\"灰度帧已加载: {self.frame_details(self.grey_data)}\")\n logger.info(f\"视频加载耗时: {time.time() - start_time:.2f} 秒\")\n return task, hued\n\n def _read_from_file(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 从文件中读取帧\n \"\"\"\n with toolbox.video_capture(self.path) as cap:\n success, frame = cap.read()\n while success:\n yield VideoFrame.initial(cap, frame)\n success, frame = cap.read()\n\n def _read_from_mem(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 从内存中读取帧\n \"\"\"\n for each_frame in self.grey_data:\n yield each_frame\n\n def _read(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 选择从文件还是从内存中读取帧\n \"\"\"\n if self.grey_data:\n yield from self._read_from_mem()\n else:\n yield from self._read_from_file()\n\n def get_iterator(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 获取帧的迭代器\n \"\"\"\n return self._read()\n\n def get_operator(self) -> _BaseFrameOperator:\n \"\"\"\n 根据是否已经加载帧,返回相应的FrameOperator(`MemFrameOperator`或`FileFrameOperator`)\n \"\"\"\n if self.grey_data:\n return MemFrameOperator(self)\n return FileFrameOperator(self)\n\n def __iter__(self):\n \"\"\"\n 返回一个用于迭代帧的迭代器\n \"\"\"\n return self.get_iterator()"
},
{
"identifier": "VideoFrame",
"path": "nexaflow/video.py",
"snippet": "class VideoFrame(Frame):\n\n def __init__(self, frame_id: int, timestamp: float, data: np.ndarray):\n super().__init__(frame_id, timestamp, data)\n\n def __str__(self):\n return f\"<VideoFrame id={self.frame_id} timestamp={self.timestamp}>\"\n\n @staticmethod\n def initial(cap: cv2.VideoCapture, frame: np.ndarray) -> \"VideoFrame\":\n frame_id = toolbox.get_current_frame_id(cap)\n timestamp = toolbox.get_current_frame_time(cap)\n new_frame = toolbox.compress_frame(frame, 0.5, (350, 700), False)\n return VideoFrame(frame_id, timestamp, new_frame)\n\n def copy(self) -> \"VideoFrame\":\n return VideoFrame(self.frame_id, self.timestamp, self.data[:])\n\n def contain_image(\n self, *, image_path: str = None, image_object: np.ndarray = None, **kwargs\n ) -> typing.Dict[str, typing.Any]:\n \"\"\"\n 检查给定图像(通过路径或numpy对象)是否存在于当前帧中,并返回匹配的字典\n \"\"\"\n assert image_path or (\n image_object is not None\n ), \"should fill image_path or image_object\"\n\n if image_path:\n logger.debug(f\"found image path, use it first: {image_path}\")\n return toolbox.match_template_with_path(image_path, self.data, **kwargs)\n image_object = toolbox.turn_grey(image_object)\n return toolbox.match_template_with_object(image_object, self.data, **kwargs)"
},
{
"identifier": "BaseHook",
"path": "nexaflow/hook.py",
"snippet": "class BaseHook(object):\n\n def __init__(self, *_, **__):\n # logger.debug(f\"start initialing: {self.__class__.__name__} ...\")\n logger.info(f\"加载视频帧处理单元: Frame Processor {self.__class__.__name__} ...\")\n self.result = dict()\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n # info = f\"execute hook: {self.__class__.__name__}\"\n\n frame_id = frame.frame_id\n if frame_id != -1:\n # logger.debug(f\"{info}, frame id: {frame_id}\")\n pass\n return frame"
},
{
"identifier": "GreyHook",
"path": "nexaflow/hook.py",
"snippet": "class GreyHook(BaseHook):\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n super().do(frame, *_, **__)\n frame.data = toolbox.turn_grey(frame.data)\n return frame"
},
{
"identifier": "CompressHook",
"path": "nexaflow/hook.py",
"snippet": "class CompressHook(BaseHook):\n\n def __init__(\n self,\n compress_rate: float = None,\n target_size: typing.Tuple[int, int] = None,\n *_,\n **__,\n ):\n super().__init__(*_, **__)\n self.compress_rate = compress_rate\n self.target_size = target_size\n # logger.debug(f\"compress rate: {compress_rate}\")\n # logger.debug(f\"target size: {target_size}\")\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n super().do(frame, *_, **__)\n frame.data = toolbox.compress_frame(\n frame.data, compress_rate=self.compress_rate, target_size=self.target_size\n )\n return frame"
}
] | import os
import time
import typing
import numpy as np
from loguru import logger
from typing import List, Tuple
from concurrent.futures import ThreadPoolExecutor
from nexaflow import toolbox
from nexaflow.cutter.cut_range import VideoCutRange
from nexaflow.cutter.cut_result import VideoCutResult
from nexaflow.video import VideoObject, VideoFrame
from nexaflow.hook import BaseHook, GreyHook, CompressHook | 8,679 |
class Window(object):
def __init__(self, video: "VideoObject", *args):
self.video = video
assert len(args) == 7, "需要7个参数"
(self.step, self.block, self.window_size, self.window_coefficient,
self.start, self.video_length, self.frame_total) = args
self.end = self.start + self.window_size * self.step
def load_data(self) -> typing.List[VideoFrame]:
cur = self.start
result = []
video_operator = self.video.get_operator()
while cur <= self.end:
frame = video_operator.get_frame_by_id(cur)
result.append(frame)
cur += self.step
if len(result) < 2:
last = video_operator.get_frame_by_id(self.end)
result.append(last)
return result
def shift(self) -> bool:
self.start += self.step
self.end += self.step
if self.start >= self.video_length:
return False
if self.end >= self.video_length:
self.end = self.video_length
return True
def float_merge(self, float_list: typing.List[float]) -> float:
length = len(float_list)
result = 0.0
denominator = 0.0
for i, each in enumerate(float_list):
weight = pow(length - i, self.window_coefficient)
denominator += weight
result += each * weight
final = result / denominator
return final
class VideoCutter(object):
def __init__(
self,
step: int = None,
compress_rate: float = None,
target_size: typing.Tuple[int, int] = None,
):
self.step = step or 1
if (not compress_rate) and (not target_size):
# logger.debug(
# f"no compress rate or target size received. set compress rate to 0.2"
# )
compress_rate = 0.2
self._hook_list: typing.List[BaseHook] = list()
compress_hook = CompressHook(
overwrite=True, compress_rate=compress_rate, target_size=target_size
)
|
class Window(object):
def __init__(self, video: "VideoObject", *args):
self.video = video
assert len(args) == 7, "需要7个参数"
(self.step, self.block, self.window_size, self.window_coefficient,
self.start, self.video_length, self.frame_total) = args
self.end = self.start + self.window_size * self.step
def load_data(self) -> typing.List[VideoFrame]:
cur = self.start
result = []
video_operator = self.video.get_operator()
while cur <= self.end:
frame = video_operator.get_frame_by_id(cur)
result.append(frame)
cur += self.step
if len(result) < 2:
last = video_operator.get_frame_by_id(self.end)
result.append(last)
return result
def shift(self) -> bool:
self.start += self.step
self.end += self.step
if self.start >= self.video_length:
return False
if self.end >= self.video_length:
self.end = self.video_length
return True
def float_merge(self, float_list: typing.List[float]) -> float:
length = len(float_list)
result = 0.0
denominator = 0.0
for i, each in enumerate(float_list):
weight = pow(length - i, self.window_coefficient)
denominator += weight
result += each * weight
final = result / denominator
return final
class VideoCutter(object):
def __init__(
self,
step: int = None,
compress_rate: float = None,
target_size: typing.Tuple[int, int] = None,
):
self.step = step or 1
if (not compress_rate) and (not target_size):
# logger.debug(
# f"no compress rate or target size received. set compress rate to 0.2"
# )
compress_rate = 0.2
self._hook_list: typing.List[BaseHook] = list()
compress_hook = CompressHook(
overwrite=True, compress_rate=compress_rate, target_size=target_size
) | grey_hook = GreyHook(overwrite=True) | 6 | 2023-11-13 05:27:34+00:00 | 12k |
OpenBMB/XAgent | XAgentServer/application/cruds/interaction.py | [
{
"identifier": "InteractionDBInterface",
"path": "XAgentServer/database/interface/interaction.py",
"snippet": "class InteractionDBInterface(metaclass=abc.ABCMeta):\n \"\"\"Interaction DB Interface\n \"\"\"\n\n @classmethod\n def search_many_interaction(cls, db: Session) -> list[InteractionBase]:\n \"\"\"search many interactions\n\n Args:\n db (Session): db session\n\n Returns:\n list[InteractionBase]: interaction list\n \"\"\"\n interactions = db.query(Interaction).all()\n return [InteractionBase.from_db(interaction) for interaction in interactions]\n\n @classmethod\n def get_interaction(cls,\n db: Session,\n interaction_id: str) -> InteractionBase | None:\n \"\"\"get interaction by interaction_id\n\n Args:\n db (Session): db session\n interaction_id (str): interaction id\n\n Returns:\n InteractionBase | None: _description_\n \"\"\"\n interaction = db.query(Interaction).filter(\n Interaction.interaction_id == interaction_id,\n Interaction.is_deleted.is_not(True)).first()\n return InteractionBase.from_db(interaction) if interaction else None\n\n @classmethod\n def get_ready_interaction(cls,\n db: Session,\n user_id: str) -> InteractionBase | None:\n \"\"\"get interaction by user_id\n\n Args:\n db (Session): db session\n user_id (str): user id\n\n Returns:\n InteractionBase | None: _description_\n \"\"\"\n interaction = db.query(Interaction).filter(\n Interaction.user_id == user_id,\n Interaction.status == 'ready').first()\n return InteractionBase.from_db(interaction) if interaction else None\n\n @classmethod\n def create_interaction(cls,\n db: Session,\n base: InteractionBase) -> InteractionBase:\n \"\"\"\n create interaction\n\n Args:\n db (Session): db session\n base (InteractionBase): interaction base\n\n Returns:\n None\n \"\"\"\n db.add(Interaction(**base.to_dict()))\n db.commit()\n return None\n\n @classmethod\n def add_parameter(cls,\n db: Session,\n parameter: InteractionParameter):\n \"\"\"\n add parameter for interaction\n \"\"\"\n db.add(Parameter(**parameter.to_dict()))\n db.commit()\n\n return None\n\n @classmethod\n def search_interaction_by_user_id(cls,\n db: Session,\n user_id: str,\n page_size: int = 20,\n page_num: int = 1) -> list[dict]:\n \"\"\"\n search interaction by user id\n\n Args:\n db (Session): db session\n user_id (str): user id\n page_size (int, optional): page size. Defaults to 20.\n page_num (int, optional): page num. Defaults to 1.\n\n Returns:\n list[dict]: interaction list\n \"\"\"\n total = db.query(func.count(Interaction.id)).filter(\n Interaction.user_id == user_id, Interaction.is_deleted.is_(False)).scalar()\n\n interaction_list = db.query(Interaction).filter(\n Interaction.user_id == user_id,\n Interaction.is_deleted.is_(False),\n Interaction.status.in_([StatusEnum.FINISHED])).limit(\n page_size).offset((page_num - 1) * page_size).all()\n data = []\n for interaction in interaction_list:\n d_ = InteractionBase.from_db(interaction).to_dict(\n exclude=[\"recorder_root_dir\", \"is_deleted\"])\n parameter = cls.get_parameter(\n db=db, interaction_id=d_[\"interaction_id\"])\n d_[\"parameters\"] = [parameter[0]]\n data.append(d_)\n return {\n \"total\": total,\n \"rows\": data\n }\n\n @classmethod\n def search_many_shared(cls,\n db: Session,\n page_size: int = 20,\n page_index: int = 1) -> list[dict]:\n \"\"\"\n search many shared interactions from community\n\n Args:\n db (Session): db session\n page_size (int, optional): page size. Defaults to 20.\n page_index (int, optional): page index. Defaults to 1.\n\n Returns:\n list[dict]: interaction list\n \"\"\"\n total = db.query(func.count(SharedInteraction.id)).filter(\n SharedInteraction.is_deleted.is_(False),\n SharedInteraction.is_audit.is_(True)).scalar()\n interaction_list = db.query(SharedInteraction).filter(\n SharedInteraction.is_deleted.is_(False),\n SharedInteraction.is_audit.is_(True)).order_by(\n SharedInteraction.star.desc()).limit(page_size).offset(\n (page_index - 1) * page_size).all()\n data = []\n for interaction in interaction_list:\n d_ = SharedInteractionBase.from_db(interaction).to_dict(\n exclude=[\"record_dir\", \"is_deleted\"])\n parameter = cls.get_parameter(\n db=db, interaction_id=d_[\"interaction_id\"])\n d_[\"parameters\"] = parameter\n data.append(d_)\n return {\n \"total\": total,\n \"rows\": data\n }\n\n @classmethod\n def get_shared_interaction(cls,\n db: Session,\n interaction_id: str) -> SharedInteractionBase | None:\n \"\"\"\n get shared interaction by interaction id\n\n Args:\n db (Session): db session\n interaction_id (str): interaction id\n\n Returns:\n\n SharedInteractionBase | None: shared interaction\n \"\"\"\n interaction = db.query(SharedInteraction).filter(\n SharedInteraction.interaction_id == interaction_id, SharedInteraction.is_deleted.is_(False)).first()\n return SharedInteractionBase.from_db(interaction) if interaction else None\n\n @classmethod\n def is_exist(cls,\n db: Session,\n interaction_id: str) -> bool:\n \"\"\"\n check interaction is exist or not\n\n Args:\n db (Session): db session\n interaction_id (str): interaction id\n\n Returns:\n bool: True or False\n \"\"\"\n interaction = db.query(Interaction).filter(\n Interaction.interaction_id == interaction_id,\n Interaction.is_deleted.is_(False)).first()\n return interaction is not None\n\n @classmethod\n def update_interaction(cls, db: Session, base_data: dict):\n \"\"\"\n update interaction\n\n Args:\n db (Session): db session\n base_data (dict): interaction data\n\n Returns:\n None\n \"\"\"\n if \"interaction_id\" not in base_data:\n raise XAgentError(\"interaction_id is required\")\n interaction = db.query(Interaction).filter(\n Interaction.interaction_id == base_data[\"interaction_id\"]).first()\n if interaction is None:\n raise XAgentError(\"interaction is not exist\")\n for k, v in base_data.items():\n if k == \"interaction_id\":\n continue\n setattr(interaction, k, v)\n interaction.update_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n db.commit()\n\n @classmethod\n def update_interaction_status(cls,\n db: Session,\n interaction_id: str,\n status: str,\n message: str,\n current_step: int):\n \"\"\"\n update interaction status\n\n Args:\n db (Session): db session\n interaction_id (str): interaction id\n status (str): status\n message (str): message\n current_step (int): current step\n\n Returns:\n None\n \"\"\"\n db_interaction = db.query(Interaction).filter(\n Interaction.interaction_id == interaction_id).first()\n if db_interaction is None:\n raise XAgentError(\"interaction is not exist\")\n\n db_interaction.status = status\n db_interaction.message = message\n db_interaction.current_step = current_step\n db_interaction.update_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n db.commit()\n\n @classmethod\n def update_interaction_parameter(cls,\n db: Session,\n interaction_id: str,\n parameter: InteractionParameter):\n \"\"\"\n update interaction parameter\n\n Args:\n db (Session): db session\n interaction_id (str): interaction id\n parameter (InteractionParameter): parameter\n\n Returns:\n None\n \"\"\"\n db_parameter = db.query(Parameter).filter(\n Parameter.interaction_id == interaction_id,\n Parameter.parameter_id == parameter.parameter_id).first()\n\n if db_parameter is None:\n db.add(Parameter(**parameter.to_dict()))\n db.commit()\n\n @classmethod\n def is_running(cls, db: Session, user_id: str):\n \"\"\"\n check user is only running one interaction\n\n Args:\n db (Session): db session\n user_id (str): user id\n\n Returns: \n bool: True or False\n \"\"\"\n interaction = db.query(Interaction).filter(\n Interaction.user_id == user_id,\n Interaction.status.in_((\"running\", \"waiting\"))).first()\n return interaction is not None\n\n @classmethod\n def get_parameter(cls, db: Session, interaction_id: str) -> list:\n \"\"\"\n get interaction running parameter\n\n Args:\n db (Session): db session\n interaction_id (str): interaction id\n\n Returns:\n list: parameter list\n \"\"\"\n raws = db.query(Raw).filter(\n Raw.interaction_id == interaction_id,\n Raw.is_human.is_(True),\n Raw.human_data.is_not(None)).order_by(Raw.step.asc()).all()\n return [raw.human_data for raw in raws]\n\n @classmethod\n def delete_interaction(cls, db: Session, interaction_id: str):\n \"\"\"\n delete interaction\n\n Args:\n db (Session): db session\n interaction_id (str): interaction id\n\n Returns:\n None\n\n Raises:\n XAgentError: interaction is not exist\n \"\"\"\n interaction = db.query(Interaction).filter(\n Interaction.interaction_id == interaction_id).first()\n if interaction is None:\n raise XAgentError(\"interaction is not exist\")\n interaction.is_deleted = True\n db.commit()\n\n @classmethod\n def add_share(cls, db: Session, shared: SharedInteractionBase):\n \"\"\"add share interaction\n\n Args:\n db (Session): db session\n shared (SharedInteractionBase): shared interaction from community\n \"\"\"\n db.add(SharedInteraction(**shared.to_dict()))\n\n @classmethod\n def insert_raw(cls, db: Session, process: XAgentRaw):\n \"\"\"\n insert an interaction process for recording\n\n Args:\n db (Session): db session\n process (XAgentRaw): interaction process\n\n Returns:\n None\n\n Raises:\n XAgentError: interaction is not exist\n \"\"\"\n interaction = db.query(Interaction).filter(\n process.interaction_id == process.interaction_id).first()\n exist_process = db.query(Raw).filter(\n Raw.interaction_id == process.interaction_id, Raw.is_deleted.is_(False)).order_by(Raw.step.desc()).first()\n if interaction is None:\n raise XAgentError(\"interaction is not exist\")\n\n if exist_process is not None:\n process.step = exist_process.step + 1\n else:\n process.step = 0\n\n db.add(Raw(**process.to_dict()))\n db.commit()\n\n @classmethod\n def search_many_raws(cls, db: Session, interaction_id: str):\n \"\"\"search many raws\n\n Args:\n db (Session): db session\n interaction_id (str): interaction id\n\n Returns:\n list[XAgentRaw]: interaction process list\n \"\"\"\n processes = db.query(Raw).filter(\n Raw.interaction_id == interaction_id, Raw.is_deleted.is_(False)).order_by(Raw.step.asc()).all()\n return processes\n\n @classmethod\n def get_raw(cls, db: Session, interaction_id: str, node_id: str):\n \"\"\"\n get raw by interaction id and node id\n \"\"\"\n process = db.query(Raw).filter(\n Raw.interaction_id == interaction_id, Raw.node_id == node_id, Raw.is_deleted.is_(False)).first()\n return process\n\n @classmethod\n def get_next_send(cls, db: Session, interaction_id: str):\n \"\"\"\n get next send process\n\n Args:\n db (Session): db session\n interaction_id (str): interaction id\n\n Returns:\n XAgentRaw: interaction process\n \"\"\"\n processes = db.query(Raw).filter(Raw.interaction_id == interaction_id,\n Raw.is_send.is_(False),\n Raw.is_deleted.is_(False)).order_by(Raw.step.desc()).all()\n return processes\n\n @classmethod\n def update_send_flag(cls, db: Session, interaction_id: str, node_id: str):\n \"\"\"\n update send flag, if send success, update flag\n if send flag is True, it means that the process has been sent\n and no longer needs to be sent\n\n Args:\n db (Session): db session\n interaction_id (str): interaction id\n node_id (str): node id\n\n Returns:\n None\n\n Raises:\n XAgentError: process is not exist\n \"\"\"\n process = db.query(Raw).filter(\n Raw.interaction_id == interaction_id, Raw.node_id == node_id).first()\n if process is None:\n raise XAgentError(\"process is not exist\")\n process.is_send = True\n process.update_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n db.commit()\n\n @classmethod\n def update_receive_flag(cls, db: Session, interaction_id: str, node_id: str):\n \"\"\"\n update receive flag, if receive success, update flag\n if this flag is True, it means that the process has been received from human\n\n Args:\n db (Session): db session\n interaction_id (str): interaction id\n node_id (str): node id\n\n Returns:\n None\n\n Raises:\n XAgentError: process is not exist\n \"\"\"\n process = db.query(Raw).filter(\n Raw.interaction_id == interaction_id, Raw.node_id == node_id).first()\n if process is None:\n raise XAgentError(\"process is not exist\")\n process.is_receive = True\n process.update_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n db.commit()\n\n @classmethod\n def update_human_data(cls, db: Session, interaction_id: str, node_id: str, human_data: dict):\n \"\"\"\n update human data\n\n Args:\n db (Session): db session\n interaction_id (str): interaction id\n node_id (str): node id\n human_data (dict): human data\n\n Returns:\n None\n\n Raises:\n XAgentError: process is not exist\n \"\"\"\n process = db.query(Raw).filter(\n Raw.interaction_id == interaction_id, Raw.node_id == node_id, Raw.is_deleted.is_(False)).first()\n if process is None:\n raise XAgentError(\"process is not exist\")\n process.is_receive = True\n process.is_human = True\n process.human_data = human_data\n db.commit()\n\n @classmethod\n def insert_error(cls, db: Session, interaction_id: str, message: str):\n \"\"\"\n if interaction is failed, insert error message\n this message will be displayed in the interaction list\n\n Args:\n db (Session): db session\n interaction_id (str): interaction id\n message (str): error message\n\n Returns:\n None\n\n Raises:\n None\n\n \"\"\"\n process = Raw(\n node_id=uuid.uuid4().hex,\n interaction_id=interaction_id,\n current=\"\",\n step=0,\n data=message,\n file_list=[],\n status=StatusEnum.FAILED,\n create_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n update_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n )\n db.add(process)\n db.commit()\n\n @classmethod\n def get_finish_status(cls, db: Session, interaction_id: str):\n \"\"\"\n get interaction finish status\n\n Args:\n db (Session): db session\n interaction_id (str): interaction id\n\n Returns:\n Boolean: True or False\n \"\"\"\n process = db.query(Raw).filter(\n Raw.interaction_id == interaction_id,\n Raw.is_deleted.is_(False),\n Raw.status == \"finished\").first()\n return process is not None"
},
{
"identifier": "XAgentDBError",
"path": "XAgentServer/exts/exception_ext.py",
"snippet": "class XAgentDBError(XAgentError):\n \"\"\"Exception raised because of DB error\n\n Attributes:\n message -- explanation of the error\n \"\"\"\n\n def __init__(self, message=\"XAgent DB Error!\"):\n self.message = message\n super().__init__(self.message)"
},
{
"identifier": "InteractionBase",
"path": "XAgentServer/models/interaction.py",
"snippet": "class InteractionBase(metaclass=abc.ABCMeta):\n def __init__(self,\n interaction_id: str,\n user_id: str,\n create_time: str,\n description: str,\n agent: str = \"\",\n mode: str = \"\",\n file_list: list = [],\n recorder_root_dir: str = \"\",\n status: str = \"\",\n message: str = \"\",\n current_step: str = \"\",\n update_time: str = \"\",\n is_deleted: bool = False,\n call_method: str = \"web\",\n ):\n self.interaction_id = interaction_id\n self.user_id = user_id\n self.create_time = create_time\n self.description = description\n self.agent = agent\n self.mode = mode\n self.file_list = file_list\n self.recorder_root_dir = recorder_root_dir\n self.status = status\n self.message = message\n self.current_step = current_step\n self.update_time = update_time\n self.is_deleted = is_deleted\n self.call_method = call_method\n\n def to_dict(self, include=None, exclude=None):\n data = {\n \"interaction_id\": self.interaction_id,\n \"user_id\": self.user_id,\n \"create_time\": self.create_time,\n \"description\": self.description,\n \"agent\": self.agent,\n \"mode\": self.mode,\n \"file_list\": self.file_list,\n \"recorder_root_dir\": self.recorder_root_dir,\n \"status\": self.status,\n \"message\": self.message,\n \"current_step\": self.current_step,\n \"update_time\": self.update_time,\n \"is_deleted\": self.is_deleted,\n \"call_method\": self.call_method,\n }\n if include:\n data = {k: v for k, v in data.items() if k in include}\n if exclude:\n data = {k: v for k, v in data.items() if k not in exclude}\n return data\n \n def to_json(self):\n return json.dumps(self.to_dict(), indent=2, ensure_ascii=False)\n \n @classmethod\n def from_json(cls, json_data):\n return cls(**json_data)\n \n @classmethod\n def from_db(cls, interaction):\n return cls(interaction.interaction_id,\n interaction.user_id,\n interaction.create_time,\n interaction.description,\n interaction.agent,\n interaction.mode,\n interaction.file_list,\n interaction.recorder_root_dir,\n interaction.status,\n interaction.message,\n interaction.current_step,\n interaction.update_time,\n interaction.is_deleted,\n interaction.call_method,\n )"
},
{
"identifier": "Raw",
"path": "XAgentServer/database/models.py",
"snippet": "class Raw(Base):\n \"\"\"Raw Data\"\"\"\n __tablename__ = \"raw\"\n # id/id\n id = Column(Integer, primary_key=True, index=True)\n # node_id\n node_id = Column(String(255))\n # 交互id/interaction_id\n interaction_id = Column(String(255))\n # 当前节点/current\n current = Column(String(128))\n # step/step\n step = Column(Integer, default=0)\n # 数据/agent data\n data = Column(JSON)\n # workspace文件列表/workspace file list\n file_list = Column(JSON)\n # 状态/status\n status = Column(String(20))\n # 是否中断/interrupt or not\n do_interrupt = Column(Boolean, default=False)\n # 已等待时间/wait seconds\n wait_seconds = Column(Integer, default=0)\n # 是否需要人工干预/ask for human help or not\n ask_for_human_help = Column(Boolean, default=False)\n # 创建时间/create time\n create_time = Column(String(255))\n # 更新时间/update time\n update_time = Column(String(255))\n # 是否删除/is deleted or not\n is_deleted = Column(Boolean, default=False)\n # 是否人工已经输入/has human input or not\n is_human = Column(Boolean, default=False)\n # 人工输入数据/human data\n human_data = Column(JSON)\n # 人工文件列表/agent file list\n human_file_list = Column(JSON)\n # 是否推送前端/has send to frontend or not\n is_send = Column(Boolean, default=False)\n # 是否接收前端消息/has receive message from frontend or not\n is_receive = Column(Boolean, default=False)\n # 是否包含png/has png or not\n include_pictures = Column(Boolean, default=False)"
},
{
"identifier": "InteractionParameter",
"path": "XAgentServer/models/parameter.py",
"snippet": "class InteractionParameter(metaclass=abc.ABCMeta):\n \"\"\"\n 交互参数\n \"\"\"\n\n def __init__(self,\n interaction_id: str,\n parameter_id: str,\n args: Union[str, dict, None] = None\n ):\n self.interaction_id = interaction_id\n self.args = args\n self.parameter_id = parameter_id\n\n def to_dict(self):\n return {\n \"interaction_id\": self.interaction_id,\n \"parameter_id\": self.parameter_id,\n \"args\": self.args,\n }\n\n def to_json(self):\n return json.dumps(self.to_dict(), indent=2, ensure_ascii=False)\n\n @classmethod\n def from_json(cls, json_data):\n return cls(**json_data)\n \n @classmethod\n def from_db(cls, interaction):\n return cls(interaction.interaction_id,\n interaction.parameter_id,\n interaction.args\n )"
},
{
"identifier": "XAgentRaw",
"path": "XAgentServer/models/raw.py",
"snippet": "class XAgentRaw(metaclass=abc.ABCMeta):\n \"\"\"XAgent Raw Object\"\"\"\n\n def __init__(self, node_id: str,\n interaction_id: str,\n current: str,\n step: int,\n data: dict,\n file_list: list,\n status: str,\n do_interrupt: bool,\n wait_seconds: int,\n ask_for_human_help: bool,\n create_time: str,\n update_time: str,\n is_deleted: bool,\n is_human: bool,\n human_data: dict,\n human_file_list: list,\n is_send: bool,\n is_receive: bool,\n include_pictures: bool = False,):\n self.node_id = node_id\n self.interaction_id = interaction_id\n self.current = current\n self.step = step\n self.data = data\n self.file_list = file_list\n self.status = status\n self.do_interrupt = do_interrupt\n self.wait_seconds = wait_seconds\n self.ask_for_human_help = ask_for_human_help\n self.create_time = create_time\n self.update_time = update_time\n self.is_deleted = is_deleted\n self.is_human = is_human\n self.human_data = human_data\n self.human_file_list = human_file_list\n self.is_send = is_send\n self.is_receive = is_receive\n self.include_pictures = include_pictures\n\n def to_dict(self):\n \"\"\"XAgent Raw Object to dict\"\"\"\n return {\n \"node_id\": self.node_id,\n \"interaction_id\": self.interaction_id,\n \"current\": self.current,\n \"step\": self.step,\n \"data\": self.data,\n \"file_list\": self.file_list,\n \"status\": self.status,\n \"do_interrupt\": self.do_interrupt,\n \"wait_seconds\": self.wait_seconds,\n \"ask_for_human_help\": self.ask_for_human_help,\n \"create_time\": self.create_time,\n \"update_time\": self.update_time,\n \"is_deleted\": self.is_deleted,\n \"is_human\": self.is_human,\n \"human_data\": self.human_data,\n \"human_file_list\": self.human_file_list,\n \"is_send\": self.is_send,\n \"is_receive\": self.is_receive,\n \"include_pictures\": self.include_pictures\n }\n\n def to_json(self):\n \"\"\"XAgent Raw Object to json\"\"\"\n return json.dumps(self.to_dict(), indent=2, ensure_ascii=False)\n\n @classmethod\n def from_json(cls, json_data):\n \"\"\"XAgent Raw Object from json\"\"\"\n return cls(**json_data)\n\n def update(self, update_data: dict):\n \"\"\"XAgent Raw Object update\"\"\"\n for k, v in update_data.items():\n setattr(self, k, v)\n return self\n\n @classmethod\n def from_db(cls, db_data):\n \"\"\"XAgent Raw Object from db\"\"\"\n return cls(\n node_id=db_data.node_id,\n interaction_id=db_data.interaction_id,\n current=db_data.current,\n step=db_data.step,\n data=db_data.data,\n file_list=db_data.file_list,\n status=db_data.status,\n do_interrupt=db_data.do_interrupt,\n wait_seconds=db_data.wait_seconds,\n ask_for_human_help=db_data.ask_for_human_help,\n create_time=db_data.create_time,\n update_time=db_data.update_time,\n is_deleted=db_data.is_deleted,\n is_human=db_data.is_human,\n human_data=db_data.human_data,\n human_file_list=db_data.human_file_list,\n is_send=db_data.is_send,\n is_receive=db_data.is_receive,\n include_pictures=db_data.include_pictures\n )"
}
] | import abc
import uuid
from datetime import datetime
from typing import List
from sqlalchemy.orm import Session
from XAgentServer.database.interface.interaction import InteractionDBInterface
from XAgentServer.exts.exception_ext import XAgentDBError
from XAgentServer.models.interaction import InteractionBase
from XAgentServer.database.models import Raw
from XAgentServer.models.parameter import InteractionParameter
from XAgentServer.models.raw import XAgentRaw | 8,430 | @classmethod
def delete_interaction(cls, db: Session, interaction_id: str):
"""
delete interaction
Args:
db: db
interaction_id: interaction id
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.delete_interaction(
db=db, interaction_id=interaction_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_shared_interaction(cls,
db: Session,
interaction_id: str) -> InteractionBase | None:
"""
get shared interaction
Args:
db: db
interaction_id: interaction id
Returns:
interaction InteractionBase, if not found, return None
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.get_shared_interaction(
db=db,
interaction_id=interaction_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def search_many_shared(cls,
db: Session,
page_size: int = 20,
page_index: int = 1) -> list[dict]:
"""
search many shared
Args:
db: db
page_size: page size
page_index: page index
Returns:
interaction list [dict]
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.search_many_shared(db=db,
page_size=page_size,
page_index=page_index)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def insert_raw(cls, db: Session, process: XAgentRaw):
"""
insert raw
Args:
db: db
process: process
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.insert_raw(db=db, process=process)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def search_many_raws(cls, db: Session, interaction_id: str) -> List[XAgentRaw] | None:
"""
search many raws
Args:
db: db
interaction_id: interaction id
Returns:
raw list [XAgentRaw]
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return [XAgentRaw.from_db(raw) for raw in
InteractionDBInterface.search_many_raws(db=db, interaction_id=interaction_id)]
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_raw(cls, db: Session, interaction_id: str, node_id: str) -> XAgentRaw | None:
"""
get raw
Args:
db: db
interaction_id: interaction id
node_id: node id
Returns:
raw XAgentRaw, if not found, return None
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.get_raw(db=db,
interaction_id=interaction_id,
node_id=node_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
| """XAgentServer application cruds interaction module."""
class InteractionCRUD(metaclass=abc.ABCMeta):
"""
interaction crud
"""
@classmethod
def search_many_interaction(cls, db: Session) -> list:
"""
search many interaction
"""
try:
return InteractionDBInterface.search_many_interaction(db=db)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_interaction(cls, db: Session, interaction_id: str) -> InteractionBase | None:
"""
get interaction
Args:
db: db
interaction_id: interaction id
Returns:
interaction InteractionBase
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.get_interaction(db=db, interaction_id=interaction_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def create_interaction(cls, db: Session, base: InteractionBase):
"""
create interaction
Args:
db: db
base: base
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.create_interaction(db=db, base=base)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_ready_interaction(cls, db: Session, user_id: str):
"""
create interaction
Args:
db: db
user_id: user_id
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.get_ready_interaction(db=db, user_id=user_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def add_parameter(cls, db: Session, parameter: InteractionParameter = None):
"""
add parameter
Args:
db: db
parameter: parameter
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.add_parameter(db=db, parameter=parameter)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_parameter(cls, db: Session, interaction_id: str) -> list:
"""
get parameter
Args:
db: db
interaction_id: interaction id
Returns:
parameter list [InteractionParameter]
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.get_parameter(db=db, interaction_id=interaction_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_init_parameter(cls, db: Session, interaction_id: str) -> InteractionParameter:
"""
get init parameter
Args:
db: db
interaction_id: interaction id
Returns:
parameter InteractionParameter
Raises:
XAgentDBError: XAgent DB Error
"""
try:
parameters = InteractionDBInterface.get_parameter(db=db, interaction_id=interaction_id)
init_parameter = parameters[0]
parameter = InteractionParameter.from_json({"args": init_parameter, "interaction_id": interaction_id, "parameter_id": None})
return parameter
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def search_interaction_by_user_id(cls,
db: Session,
user_id: str,
page_size: int = 10,
page_num: int = 1) -> list[dict]:
"""
get interaction by user id
Args:
db: db
user_id: user id
page_size: page size
page_num: page num
Returns:
interaction list [dict]
Raises:
XAgentDBError: XAgent DB Error
"""
return InteractionDBInterface.search_interaction_by_user_id(db=db,
user_id=user_id,
page_size=page_size,
page_num=page_num)
@classmethod
def is_exist(cls, db: Session, interaction_id: str) -> bool:
"""
interaction is exist
Args:
db: db
interaction_id: interaction id
Returns:
True if interaction is exist, else False
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.is_exist(db=db, interaction_id=interaction_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def update_interaction(cls, db: Session, base_data: dict):
"""
update interaction
Args:
db: db
base_data: base data
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.update_interaction(db=db, base_data=base_data)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def update_interaction_status(cls,
db: Session,
interaction_id: str,
status: str,
message: str,
current_step: int):
"""
update interaction status
Args:
db: db
interaction_id: interaction id
status: status
message: message
current_step: current step
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.update_interaction_status(
db=db,
interaction_id=interaction_id,
status=status,
message=message,
current_step=current_step)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def update_interaction_parameter(cls,
db: Session,
interaction_id: str,
parameter: InteractionParameter):
"""
update interaction parameter
Args:
db: db
interaction_id: interaction id
parameter: parameter
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.update_interaction_parameter(
db=db,
interaction_id=interaction_id,
parameter=parameter)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def is_running(cls, db: Session, user_id: str):
"""
is running
Args:
db: db
user_id: user id
Returns:
True if running, else False
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.is_running(db=db, user_id=user_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def delete_interaction(cls, db: Session, interaction_id: str):
"""
delete interaction
Args:
db: db
interaction_id: interaction id
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.delete_interaction(
db=db, interaction_id=interaction_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_shared_interaction(cls,
db: Session,
interaction_id: str) -> InteractionBase | None:
"""
get shared interaction
Args:
db: db
interaction_id: interaction id
Returns:
interaction InteractionBase, if not found, return None
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.get_shared_interaction(
db=db,
interaction_id=interaction_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def search_many_shared(cls,
db: Session,
page_size: int = 20,
page_index: int = 1) -> list[dict]:
"""
search many shared
Args:
db: db
page_size: page size
page_index: page index
Returns:
interaction list [dict]
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.search_many_shared(db=db,
page_size=page_size,
page_index=page_index)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def insert_raw(cls, db: Session, process: XAgentRaw):
"""
insert raw
Args:
db: db
process: process
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.insert_raw(db=db, process=process)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def search_many_raws(cls, db: Session, interaction_id: str) -> List[XAgentRaw] | None:
"""
search many raws
Args:
db: db
interaction_id: interaction id
Returns:
raw list [XAgentRaw]
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return [XAgentRaw.from_db(raw) for raw in
InteractionDBInterface.search_many_raws(db=db, interaction_id=interaction_id)]
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_raw(cls, db: Session, interaction_id: str, node_id: str) -> XAgentRaw | None:
"""
get raw
Args:
db: db
interaction_id: interaction id
node_id: node id
Returns:
raw XAgentRaw, if not found, return None
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.get_raw(db=db,
interaction_id=interaction_id,
node_id=node_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod | def get_next_send(cls, db: Session, interaction_id: str) -> List[Raw] | None: | 3 | 2023-10-16 03:44:57+00:00 | 12k |
PKU-YuanGroup/Video-LLaVA | llava/model/multimodal_encoder/languagebind/video/modeling_video.py | [
{
"identifier": "LanguageBindVideoConfig",
"path": "llava/model/multimodal_encoder/languagebind/video/configuration_video.py",
"snippet": "class LanguageBindVideoConfig(PretrainedConfig):\n r\"\"\"\n [`CLIPConfig`] is the configuration class to store the configuration of a [`CLIPModel`]. It is used to instantiate\n a CLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating\n a configuration with the defaults will yield a similar configuration to that of the CLIP\n [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n text_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`CLIPTextConfig`].\n vision_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`CLIPVisionConfig`].\n projection_dim (`int`, *optional*, defaults to 512):\n Dimentionality of text and vision projection layers.\n logit_scale_init_value (`float`, *optional*, defaults to 2.6592):\n The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation.\n kwargs (*optional*):\n Dictionary of keyword arguments.\n\n Example:\n\n ```python\n >>> from transformers import CLIPConfig, CLIPModel\n\n >>> # Initializing a CLIPConfig with openai/clip-vit-base-patch32 style configuration\n >>> configuration = CLIPConfig()\n\n >>> # Initializing a CLIPModel (with random weights) from the openai/clip-vit-base-patch32 style configuration\n >>> model = CLIPModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n\n >>> # We can also initialize a CLIPConfig from a CLIPTextConfig and a CLIPVisionConfig\n >>> from transformers import CLIPTextConfig, CLIPVisionConfig\n\n >>> # Initializing a CLIPText and CLIPVision configuration\n >>> config_text = CLIPTextConfig()\n >>> config_vision = CLIPVisionConfig()\n\n >>> config = CLIPConfig.from_text_vision_configs(config_text, config_vision)\n ```\"\"\"\n\n model_type = \"LanguageBindVideo\"\n is_composition = True\n\n def __init__(\n self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs\n ):\n # If `_config_dict` exist, we use them for the backward compatibility.\n # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot\n # of confusion!).\n text_config_dict = kwargs.pop(\"text_config_dict\", None)\n vision_config_dict = kwargs.pop(\"vision_config_dict\", None)\n\n super().__init__(**kwargs)\n\n # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in\n # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most\n # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.\n if text_config_dict is not None:\n if text_config is None:\n text_config = {}\n\n # This is the complete result when using `text_config_dict`.\n _text_config_dict = CLIPTextConfig(**text_config_dict).to_dict()\n\n # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.\n for key, value in _text_config_dict.items():\n if key in text_config and value != text_config[key] and key not in [\"transformers_version\"]:\n # If specified in `text_config_dict`\n if key in text_config_dict:\n message = (\n f\"`{key}` is found in both `text_config_dict` and `text_config` but with different values. \"\n f'The value `text_config_dict[\"{key}\"]` will be used instead.'\n )\n # If inferred from default argument values (just to be super careful)\n else:\n message = (\n f\"`text_config_dict` is provided which will be used to initialize `CLIPTextConfig`. The \"\n f'value `text_config[\"{key}\"]` will be overriden.'\n )\n logger.warning(message)\n\n # Update all values in `text_config` with the ones in `_text_config_dict`.\n text_config.update(_text_config_dict)\n\n if vision_config_dict is not None:\n if vision_config is None:\n vision_config = {}\n\n # This is the complete result when using `vision_config_dict`.\n _vision_config_dict = CLIPVisionConfig(**vision_config_dict).to_dict()\n # convert keys to string instead of integer\n if \"id2label\" in _vision_config_dict:\n _vision_config_dict[\"id2label\"] = {\n str(key): value for key, value in _vision_config_dict[\"id2label\"].items()\n }\n\n # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.\n for key, value in _vision_config_dict.items():\n if key in vision_config and value != vision_config[key] and key not in [\"transformers_version\"]:\n # If specified in `vision_config_dict`\n if key in vision_config_dict:\n message = (\n f\"`{key}` is found in both `vision_config_dict` and `vision_config` but with different \"\n f'values. The value `vision_config_dict[\"{key}\"]` will be used instead.'\n )\n # If inferred from default argument values (just to be super careful)\n else:\n message = (\n f\"`vision_config_dict` is provided which will be used to initialize `CLIPVisionConfig`. \"\n f'The value `vision_config[\"{key}\"]` will be overriden.'\n )\n logger.warning(message)\n\n # Update all values in `vision_config` with the ones in `_vision_config_dict`.\n vision_config.update(_vision_config_dict)\n\n if text_config is None:\n text_config = {}\n logger.info(\"`text_config` is `None`. Initializing the `CLIPTextConfig` with default values.\")\n\n if vision_config is None:\n vision_config = {}\n logger.info(\"`vision_config` is `None`. initializing the `CLIPVisionConfig` with default values.\")\n\n self.text_config = CLIPTextConfig(**text_config)\n self.vision_config = CLIPVisionConfig(**vision_config)\n\n self.projection_dim = projection_dim\n self.logit_scale_init_value = logit_scale_init_value\n self.initializer_factor = 1.0\n\n @classmethod\n def from_text_vision_configs(cls, text_config: CLIPTextConfig, vision_config: CLIPVisionConfig, **kwargs):\n r\"\"\"\n Instantiate a [`CLIPConfig`] (or a derived class) from clip text model configuration and clip vision model\n configuration.\n\n Returns:\n [`CLIPConfig`]: An instance of a configuration object\n \"\"\"\n\n return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].\n\n Returns:\n `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n output[\"text_config\"] = self.text_config.to_dict()\n output[\"vision_config\"] = self.vision_config.to_dict()\n output[\"model_type\"] = self.__class__.model_type\n return output"
},
{
"identifier": "CLIPVisionConfig",
"path": "llava/model/multimodal_encoder/languagebind/video/configuration_video.py",
"snippet": "class CLIPVisionConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`CLIPVisionModel`]. It is used to instantiate a\n CLIP vision encoder according to the specified arguments, defining the model architecture. Instantiating a\n configuration with the defaults will yield a similar configuration to that of the vision encoder of the CLIP\n [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n image_size (`int`, *optional*, defaults to 224):\n The size (resolution) of each image.\n patch_size (`int`, *optional*, defaults to 32):\n The size (resolution) of each patch.\n hidden_act (`str` or `function`, *optional*, defaults to `\"quick_gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` ``\"quick_gelu\"` are supported.\n layer_norm_eps (`float`, *optional*, defaults to 1e-5):\n The epsilon used by the layer normalization layers.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n initializer_factor (`float`, *optional*, defaults to 1):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n\n Example:\n\n ```python\n >>> from transformers import CLIPVisionConfig, CLIPVisionModel\n\n >>> # Initializing a CLIPVisionConfig with openai/clip-vit-base-patch32 style configuration\n >>> configuration = CLIPVisionConfig()\n\n >>> # Initializing a CLIPVisionModel (with random weights) from the openai/clip-vit-base-patch32 style configuration\n >>> model = CLIPVisionModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n\n model_type = \"clip_vision_model\"\n\n def __init__(\n self,\n hidden_size=768,\n intermediate_size=3072,\n projection_dim=512,\n num_hidden_layers=12,\n num_attention_heads=12,\n num_channels=3,\n image_size=224,\n patch_size=32,\n hidden_act=\"quick_gelu\",\n layer_norm_eps=1e-5,\n attention_dropout=0.0,\n initializer_range=0.02,\n initializer_factor=1.0,\n\n add_time_attn=False, ################################\n num_frames=1, ################################\n force_patch_dropout=0.0, ################################\n lora_r=2, ################################\n lora_alpha=16, ################################\n lora_dropout=0.0, ################################\n num_mel_bins=0.0, ################################\n target_length=0.0, ################################\n video_decode_backend='decord', #########################\n **kwargs,\n ):\n super().__init__(**kwargs)\n\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.projection_dim = projection_dim\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.num_channels = num_channels\n self.patch_size = patch_size\n self.image_size = image_size\n self.initializer_range = initializer_range\n self.initializer_factor = initializer_factor\n self.attention_dropout = attention_dropout\n self.layer_norm_eps = layer_norm_eps\n self.hidden_act = hidden_act\n\n self.add_time_attn = add_time_attn ################\n self.num_frames = num_frames ################\n self.force_patch_dropout = force_patch_dropout ################\n self.lora_r = lora_r ################\n self.lora_alpha = lora_alpha ################\n self.lora_dropout = lora_dropout ################\n self.num_mel_bins = num_mel_bins ################\n self.target_length = target_length ################\n self.video_decode_backend = video_decode_backend ################\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n cls._set_token_in_kwargs(kwargs)\n\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the vision config dict if we are loading from CLIPConfig\n if config_dict.get(\"model_type\") == \"clip\":\n config_dict = config_dict[\"vision_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)"
},
{
"identifier": "CLIPTextConfig",
"path": "llava/model/multimodal_encoder/languagebind/video/configuration_video.py",
"snippet": "class CLIPTextConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`CLIPTextModel`]. It is used to instantiate a CLIP\n text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration\n with the defaults will yield a similar configuration to that of the text encoder of the CLIP\n [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n vocab_size (`int`, *optional*, defaults to 49408):\n Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by\n the `inputs_ids` passed when calling [`CLIPModel`].\n hidden_size (`int`, *optional*, defaults to 512):\n Dimensionality of the encoder layers and the pooler layer.\n intermediate_size (`int`, *optional*, defaults to 2048):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 8):\n Number of attention heads for each attention layer in the Transformer encoder.\n max_position_embeddings (`int`, *optional*, defaults to 77):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n hidden_act (`str` or `function`, *optional*, defaults to `\"quick_gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` `\"quick_gelu\"` are supported.\n layer_norm_eps (`float`, *optional*, defaults to 1e-5):\n The epsilon used by the layer normalization layers.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n initializer_factor (`float`, *optional*, defaults to 1):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n\n Example:\n\n ```python\n >>> from transformers import CLIPTextConfig, CLIPTextModel\n\n >>> # Initializing a CLIPTextConfig with openai/clip-vit-base-patch32 style configuration\n >>> configuration = CLIPTextConfig()\n\n >>> # Initializing a CLIPTextModel (with random weights) from the openai/clip-vit-base-patch32 style configuration\n >>> model = CLIPTextModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n model_type = \"clip_text_model\"\n\n def __init__(\n self,\n vocab_size=49408,\n hidden_size=512,\n intermediate_size=2048,\n projection_dim=512,\n num_hidden_layers=12,\n num_attention_heads=8,\n max_position_embeddings=77,\n hidden_act=\"quick_gelu\",\n layer_norm_eps=1e-5,\n attention_dropout=0.0,\n initializer_range=0.02,\n initializer_factor=1.0,\n # This differs from `CLIPTokenizer`'s default and from openai/clip\n # See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538\n pad_token_id=1,\n bos_token_id=49406,\n eos_token_id=49407,\n **kwargs,\n ):\n super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)\n\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.projection_dim = projection_dim\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.max_position_embeddings = max_position_embeddings\n self.layer_norm_eps = layer_norm_eps\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.initializer_factor = initializer_factor\n self.attention_dropout = attention_dropout\n self.add_time_attn = False ######################################\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n cls._set_token_in_kwargs(kwargs)\n\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the text config dict if we are loading from CLIPConfig\n if config_dict.get(\"model_type\") == \"clip\":\n config_dict = config_dict[\"text_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)"
}
] | import math
import torch
from typing import Optional, Tuple, Union
from einops import rearrange
from peft import LoraConfig, get_peft_model
from torch import nn
from torch.nn import functional as F
from transformers import PreTrainedModel, add_start_docstrings
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from transformers.models.clip.modeling_clip import CLIPMLP, CLIPAttention, CLIPTextEmbeddings, CLIPVisionEmbeddings, \
CLIPVisionModelWithProjection, CLIPTextModelWithProjection, _expand_mask, CLIPOutput, clip_loss
from transformers.utils import add_start_docstrings_to_model_forward, replace_return_docstrings
from .configuration_video import LanguageBindVideoConfig, CLIPVisionConfig, CLIPTextConfig | 7,852 | Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`CLIPEncoderLayer`].
Args:
config: CLIPConfig
"""
def __init__(self, config: LanguageBindVideoConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
causal_attention_mask,
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
class CLIPTextTransformer(nn.Module):
|
class PatchDropout(nn.Module):
"""
https://arxiv.org/abs/2212.00794
"""
def __init__(self, prob, exclude_first_token=True):
super().__init__()
assert 0 <= prob < 1.
self.prob = prob
self.exclude_first_token = exclude_first_token # exclude CLS token
def forward(self, x, B, T):
if not self.training or self.prob == 0.:
return x
if self.exclude_first_token:
cls_tokens, x = x[:, :1], x[:, 1:]
else:
cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
batch = x.size()[0]
num_tokens = x.size()[1]
batch_indices = torch.arange(batch)
batch_indices = batch_indices[..., None]
keep_prob = 1 - self.prob
num_patches_keep = max(1, int(num_tokens * keep_prob))
if T == 1:
rand = torch.randn(batch, num_tokens)
patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
else:
rand = torch.randn(B, num_tokens)
patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
patch_indices_keep = patch_indices_keep.unsqueeze(1).repeat(1, T, 1)
patch_indices_keep = rearrange(patch_indices_keep, 'b t n -> (b t) n')
x = x[batch_indices, patch_indices_keep]
if self.exclude_first_token:
x = torch.cat((cls_tokens, x), dim=1)
return x
class CLIPEncoderLayer(nn.Module):
def __init__(self, config: LanguageBindVideoConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = CLIPAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = CLIPMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.add_time_attn = config.add_time_attn
if self.add_time_attn:
self.t = config.num_frames
self.temporal_embedding = nn.Parameter(torch.zeros(1, config.num_frames, config.hidden_size))
nn.init.normal_(self.temporal_embedding, std=config.hidden_size ** -0.5)
self.embed_dim = config.hidden_size
self.temporal_attn = CLIPAttention(config)
self.temporal_layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
# self.temporal_mlp = CLIPMLP(config)
# self.temporal_layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
causal_attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
if self.add_time_attn:
bt, n, d = hidden_states.shape
t = self.t
# time embed
if t != 1:
n = hidden_states.shape[1]
hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t)
hidden_states = hidden_states + self.temporal_embedding[:, :t, :]
hidden_states = rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n)
# time attn
residual = hidden_states
hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t)
# hidden_states = self.layer_norm1(hidden_states) # share layernorm
hidden_states = self.temporal_layer_norm1(hidden_states)
hidden_states, attn_weights = self.temporal_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n)
# residual = hidden_states
# hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t)
# # hidden_states = self.layer_norm2(hidden_states) # share layernorm
# hidden_states = self.temporal_layer_norm2(hidden_states)
# hidden_states = self.temporal_mlp(hidden_states)
# hidden_states = residual + rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n)
# spatial attn
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class CLIPPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LanguageBindVideoConfig
base_model_prefix = "clip"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, CLIPTextEmbeddings):
module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
elif isinstance(module, CLIPVisionEmbeddings):
factor = self.config.initializer_factor
nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
elif isinstance(module, CLIPAttention):
factor = self.config.initializer_factor
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
out_proj_std = (module.embed_dim**-0.5) * factor
nn.init.normal_(module.q_proj.weight, std=in_proj_std)
nn.init.normal_(module.k_proj.weight, std=in_proj_std)
nn.init.normal_(module.v_proj.weight, std=in_proj_std)
nn.init.normal_(module.out_proj.weight, std=out_proj_std)
elif isinstance(module, CLIPMLP):
factor = self.config.initializer_factor
in_proj_std = (
(module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
)
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
nn.init.normal_(module.fc1.weight, std=fc_std)
nn.init.normal_(module.fc2.weight, std=in_proj_std)
elif isinstance(module, LanguageBindVideo):
nn.init.normal_(
module.text_projection.weight,
std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
)
nn.init.normal_(
module.visual_projection.weight,
std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
)
elif isinstance(module, CLIPVisionModelWithProjection):
nn.init.normal_(
module.visual_projection.weight,
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
)
elif isinstance(module, CLIPTextModelWithProjection):
nn.init.normal_(
module.text_projection.weight,
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
)
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, CLIPEncoder):
module.gradient_checkpointing = value
CLIP_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CLIP_TEXT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
CLIP_VISION_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
CLIP_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class CLIPEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`CLIPEncoderLayer`].
Args:
config: CLIPConfig
"""
def __init__(self, config: LanguageBindVideoConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
causal_attention_mask,
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
class CLIPTextTransformer(nn.Module): | def __init__(self, config: CLIPTextConfig): | 2 | 2023-10-23 05:43:54+00:00 | 12k |
deepseek-ai/DreamCraft3D | extern/ldm_zero123/models/diffusion/classifier.py | [
{
"identifier": "EncoderUNetModel",
"path": "extern/ldm_zero123/modules/diffusionmodules/openaimodel.py",
"snippet": "class EncoderUNetModel(nn.Module):\n \"\"\"\n The half UNet model with attention and timestep embedding.\n For usage, see UNet.\n \"\"\"\n\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n pool=\"adaptive\",\n *args,\n **kwargs,\n ):\n super().__init__()\n\n if num_heads_upsample == -1:\n num_heads_upsample = num_heads\n\n self.in_channels = in_channels\n self.model_channels = model_channels\n self.out_channels = out_channels\n self.num_res_blocks = num_res_blocks\n self.attention_resolutions = attention_resolutions\n self.dropout = dropout\n self.channel_mult = channel_mult\n self.conv_resample = conv_resample\n self.use_checkpoint = use_checkpoint\n self.dtype = th.float16 if use_fp16 else th.float32\n self.num_heads = num_heads\n self.num_head_channels = num_head_channels\n self.num_heads_upsample = num_heads_upsample\n\n time_embed_dim = model_channels * 4\n self.time_embed = nn.Sequential(\n linear(model_channels, time_embed_dim),\n nn.SiLU(),\n linear(time_embed_dim, time_embed_dim),\n )\n\n self.input_blocks = nn.ModuleList(\n [\n TimestepEmbedSequential(\n conv_nd(dims, in_channels, model_channels, 3, padding=1)\n )\n ]\n )\n self._feature_size = model_channels\n input_block_chans = [model_channels]\n ch = model_channels\n ds = 1\n for level, mult in enumerate(channel_mult):\n for _ in range(num_res_blocks):\n layers = [\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=mult * model_channels,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = mult * model_channels\n if ds in attention_resolutions:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=num_head_channels,\n use_new_attention_order=use_new_attention_order,\n )\n )\n self.input_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n input_block_chans.append(ch)\n if level != len(channel_mult) - 1:\n out_ch = ch\n self.input_blocks.append(\n TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n down=True,\n )\n if resblock_updown\n else Downsample(\n ch, conv_resample, dims=dims, out_channels=out_ch\n )\n )\n )\n ch = out_ch\n input_block_chans.append(ch)\n ds *= 2\n self._feature_size += ch\n\n self.middle_block = TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=num_head_channels,\n use_new_attention_order=use_new_attention_order,\n ),\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n )\n self._feature_size += ch\n self.pool = pool\n if pool == \"adaptive\":\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n nn.AdaptiveAvgPool2d((1, 1)),\n zero_module(conv_nd(dims, ch, out_channels, 1)),\n nn.Flatten(),\n )\n elif pool == \"attention\":\n assert num_head_channels != -1\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n AttentionPool2d(\n (image_size // ds), ch, num_head_channels, out_channels\n ),\n )\n elif pool == \"spatial\":\n self.out = nn.Sequential(\n nn.Linear(self._feature_size, 2048),\n nn.ReLU(),\n nn.Linear(2048, self.out_channels),\n )\n elif pool == \"spatial_v2\":\n self.out = nn.Sequential(\n nn.Linear(self._feature_size, 2048),\n normalization(2048),\n nn.SiLU(),\n nn.Linear(2048, self.out_channels),\n )\n else:\n raise NotImplementedError(f\"Unexpected {pool} pooling\")\n\n def convert_to_fp16(self):\n \"\"\"\n Convert the torso of the model to float16.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f16)\n self.middle_block.apply(convert_module_to_f16)\n\n def convert_to_fp32(self):\n \"\"\"\n Convert the torso of the model to float32.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f32)\n self.middle_block.apply(convert_module_to_f32)\n\n def forward(self, x, timesteps):\n \"\"\"\n Apply the model to an input batch.\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :return: an [N x K] Tensor of outputs.\n \"\"\"\n emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))\n\n results = []\n h = x.type(self.dtype)\n for module in self.input_blocks:\n h = module(h, emb)\n if self.pool.startswith(\"spatial\"):\n results.append(h.type(x.dtype).mean(dim=(2, 3)))\n h = self.middle_block(h, emb)\n if self.pool.startswith(\"spatial\"):\n results.append(h.type(x.dtype).mean(dim=(2, 3)))\n h = th.cat(results, axis=-1)\n return self.out(h)\n else:\n h = h.type(x.dtype)\n return self.out(h)"
},
{
"identifier": "UNetModel",
"path": "extern/ldm_zero123/modules/diffusionmodules/openaimodel.py",
"snippet": "class UNetModel(nn.Module):\n \"\"\"\n The full UNet model with attention and timestep embedding.\n :param in_channels: channels in the input Tensor.\n :param model_channels: base channel count for the model.\n :param out_channels: channels in the output Tensor.\n :param num_res_blocks: number of residual blocks per downsample.\n :param attention_resolutions: a collection of downsample rates at which\n attention will take place. May be a set, list, or tuple.\n For example, if this contains 4, then at 4x downsampling, attention\n will be used.\n :param dropout: the dropout probability.\n :param channel_mult: channel multiplier for each level of the UNet.\n :param conv_resample: if True, use learned convolutions for upsampling and\n downsampling.\n :param dims: determines if the signal is 1D, 2D, or 3D.\n :param num_classes: if specified (as an int), then this model will be\n class-conditional with `num_classes` classes.\n :param use_checkpoint: use gradient checkpointing to reduce memory usage.\n :param num_heads: the number of attention heads in each attention layer.\n :param num_heads_channels: if specified, ignore num_heads and instead use\n a fixed channel width per attention head.\n :param num_heads_upsample: works with num_heads to set a different number\n of heads for upsampling. Deprecated.\n :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.\n :param resblock_updown: use residual blocks for up/downsampling.\n :param use_new_attention_order: use a different attention pattern for potentially\n increased efficiency.\n \"\"\"\n\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n disable_self_attentions=None,\n num_attention_blocks=None,\n ):\n super().__init__()\n if use_spatial_transformer:\n assert (\n context_dim is not None\n ), \"Fool!! You forgot to include the dimension of your cross-attention conditioning...\"\n\n if context_dim is not None:\n assert (\n use_spatial_transformer\n ), \"Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...\"\n from omegaconf.listconfig import ListConfig\n\n if type(context_dim) == ListConfig:\n context_dim = list(context_dim)\n\n if num_heads_upsample == -1:\n num_heads_upsample = num_heads\n\n if num_heads == -1:\n assert (\n num_head_channels != -1\n ), \"Either num_heads or num_head_channels has to be set\"\n\n if num_head_channels == -1:\n assert (\n num_heads != -1\n ), \"Either num_heads or num_head_channels has to be set\"\n\n self.image_size = image_size\n self.in_channels = in_channels\n self.model_channels = model_channels\n self.out_channels = out_channels\n if isinstance(num_res_blocks, int):\n self.num_res_blocks = len(channel_mult) * [num_res_blocks]\n else:\n if len(num_res_blocks) != len(channel_mult):\n raise ValueError(\n \"provide num_res_blocks either as an int (globally constant) or \"\n \"as a list/tuple (per-level) with the same length as channel_mult\"\n )\n self.num_res_blocks = num_res_blocks\n # self.num_res_blocks = num_res_blocks\n if disable_self_attentions is not None:\n # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not\n assert len(disable_self_attentions) == len(channel_mult)\n if num_attention_blocks is not None:\n assert len(num_attention_blocks) == len(self.num_res_blocks)\n assert all(\n map(\n lambda i: self.num_res_blocks[i] >= num_attention_blocks[i],\n range(len(num_attention_blocks)),\n )\n )\n print(\n f\"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. \"\n f\"This option has LESS priority than attention_resolutions {attention_resolutions}, \"\n f\"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, \"\n f\"attention will still not be set.\"\n ) # todo: convert to warning\n\n self.attention_resolutions = attention_resolutions\n self.dropout = dropout\n self.channel_mult = channel_mult\n self.conv_resample = conv_resample\n self.num_classes = num_classes\n self.use_checkpoint = use_checkpoint\n self.dtype = th.float16 if use_fp16 else th.float32\n self.num_heads = num_heads\n self.num_head_channels = num_head_channels\n self.num_heads_upsample = num_heads_upsample\n self.predict_codebook_ids = n_embed is not None\n\n time_embed_dim = model_channels * 4\n self.time_embed = nn.Sequential(\n linear(model_channels, time_embed_dim),\n nn.SiLU(),\n linear(time_embed_dim, time_embed_dim),\n )\n\n if self.num_classes is not None:\n self.label_emb = nn.Embedding(num_classes, time_embed_dim)\n\n self.input_blocks = nn.ModuleList(\n [\n TimestepEmbedSequential(\n conv_nd(dims, in_channels, model_channels, 3, padding=1)\n )\n ]\n )\n self._feature_size = model_channels\n input_block_chans = [model_channels]\n ch = model_channels\n ds = 1\n for level, mult in enumerate(channel_mult):\n for nr in range(self.num_res_blocks[level]):\n layers = [\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=mult * model_channels,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = mult * model_channels\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n # num_heads = 1\n dim_head = (\n ch // num_heads\n if use_spatial_transformer\n else num_head_channels\n )\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if (\n not exists(num_attention_blocks)\n or nr < num_attention_blocks[level]\n ):\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n )\n if not use_spatial_transformer\n else SpatialTransformer(\n ch,\n num_heads,\n dim_head,\n depth=transformer_depth,\n context_dim=context_dim,\n disable_self_attn=disabled_sa,\n )\n )\n self.input_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n input_block_chans.append(ch)\n if level != len(channel_mult) - 1:\n out_ch = ch\n self.input_blocks.append(\n TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n down=True,\n )\n if resblock_updown\n else Downsample(\n ch, conv_resample, dims=dims, out_channels=out_ch\n )\n )\n )\n ch = out_ch\n input_block_chans.append(ch)\n ds *= 2\n self._feature_size += ch\n\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n # num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n self.middle_block = TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n )\n if not use_spatial_transformer\n else SpatialTransformer( # always uses a self-attn\n ch,\n num_heads,\n dim_head,\n depth=transformer_depth,\n context_dim=context_dim,\n ),\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n )\n self._feature_size += ch\n\n self.output_blocks = nn.ModuleList([])\n for level, mult in list(enumerate(channel_mult))[::-1]:\n for i in range(self.num_res_blocks[level] + 1):\n ich = input_block_chans.pop()\n layers = [\n ResBlock(\n ch + ich,\n time_embed_dim,\n dropout,\n out_channels=model_channels * mult,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = model_channels * mult\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n # num_heads = 1\n dim_head = (\n ch // num_heads\n if use_spatial_transformer\n else num_head_channels\n )\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if (\n not exists(num_attention_blocks)\n or i < num_attention_blocks[level]\n ):\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads_upsample,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n )\n if not use_spatial_transformer\n else SpatialTransformer(\n ch,\n num_heads,\n dim_head,\n depth=transformer_depth,\n context_dim=context_dim,\n disable_self_attn=disabled_sa,\n )\n )\n if level and i == self.num_res_blocks[level]:\n out_ch = ch\n layers.append(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n up=True,\n )\n if resblock_updown\n else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)\n )\n ds //= 2\n self.output_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),\n )\n if self.predict_codebook_ids:\n self.id_predictor = nn.Sequential(\n normalization(ch),\n conv_nd(dims, model_channels, n_embed, 1),\n # nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits\n )\n\n def convert_to_fp16(self):\n \"\"\"\n Convert the torso of the model to float16.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f16)\n self.middle_block.apply(convert_module_to_f16)\n self.output_blocks.apply(convert_module_to_f16)\n\n def convert_to_fp32(self):\n \"\"\"\n Convert the torso of the model to float32.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f32)\n self.middle_block.apply(convert_module_to_f32)\n self.output_blocks.apply(convert_module_to_f32)\n\n def forward(self, x, timesteps=None, context=None, y=None, **kwargs):\n \"\"\"\n Apply the model to an input batch.\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :param context: conditioning plugged in via crossattn\n :param y: an [N] Tensor of labels, if class-conditional.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n assert (y is not None) == (\n self.num_classes is not None\n ), \"must specify y if and only if the model is class-conditional\"\n hs = []\n t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)\n emb = self.time_embed(t_emb)\n\n if self.num_classes is not None:\n assert y.shape == (x.shape[0],)\n emb = emb + self.label_emb(y)\n\n h = x.type(self.dtype)\n for module in self.input_blocks:\n h = module(h, emb, context)\n hs.append(h)\n h = self.middle_block(h, emb, context)\n for module in self.output_blocks:\n h = th.cat([h, hs.pop()], dim=1)\n h = module(h, emb, context)\n h = h.type(x.dtype)\n if self.predict_codebook_ids:\n return self.id_predictor(h)\n else:\n return self.out(h)"
},
{
"identifier": "default",
"path": "extern/ldm_zero123/util.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "instantiate_from_config",
"path": "extern/ldm_zero123/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
},
{
"identifier": "ismap",
"path": "extern/ldm_zero123/util.py",
"snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)"
},
{
"identifier": "log_txt_as_img",
"path": "extern/ldm_zero123/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts"
}
] | import os
import pytorch_lightning as pl
import torch
from copy import deepcopy
from glob import glob
from einops import rearrange
from natsort import natsorted
from omegaconf import OmegaConf
from torch.nn import functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from extern.ldm_zero123.modules.diffusionmodules.openaimodel import (
EncoderUNetModel,
UNetModel,
)
from extern.ldm_zero123.util import (
default,
instantiate_from_config,
ismap,
log_txt_as_img,
) | 7,969 | def compute_top_k(self, logits, labels, k, reduction="mean"):
_, top_ks = torch.topk(logits, k, dim=1)
if reduction == "mean":
return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
elif reduction == "none":
return (top_ks == labels[:, None]).float().sum(dim=-1)
def on_train_epoch_start(self):
# save some memory
self.diffusion_model.model.to("cpu")
@torch.no_grad()
def write_logs(self, loss, logits, targets):
log_prefix = "train" if self.training else "val"
log = {}
log[f"{log_prefix}/loss"] = loss.mean()
log[f"{log_prefix}/acc@1"] = self.compute_top_k(
logits, targets, k=1, reduction="mean"
)
log[f"{log_prefix}/acc@5"] = self.compute_top_k(
logits, targets, k=5, reduction="mean"
)
self.log_dict(
log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True
)
self.log("loss", log[f"{log_prefix}/loss"], prog_bar=True, logger=False)
self.log(
"global_step", self.global_step, logger=False, on_epoch=False, prog_bar=True
)
lr = self.optimizers().param_groups[0]["lr"]
self.log("lr_abs", lr, on_step=True, logger=True, on_epoch=False, prog_bar=True)
def shared_step(self, batch, t=None):
x, *_ = self.diffusion_model.get_input(
batch, k=self.diffusion_model.first_stage_key
)
targets = self.get_conditioning(batch)
if targets.dim() == 4:
targets = targets.argmax(dim=1)
if t is None:
t = torch.randint(
0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device
).long()
else:
t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long()
x_noisy = self.get_x_noisy(x, t)
logits = self(x_noisy, t)
loss = F.cross_entropy(logits, targets, reduction="none")
self.write_logs(loss.detach(), logits.detach(), targets.detach())
loss = loss.mean()
return loss, logits, x_noisy, targets
def training_step(self, batch, batch_idx):
loss, *_ = self.shared_step(batch)
return loss
def reset_noise_accs(self):
self.noisy_acc = {
t: {"acc@1": [], "acc@5": []}
for t in range(
0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t
)
}
def on_validation_start(self):
self.reset_noise_accs()
@torch.no_grad()
def validation_step(self, batch, batch_idx):
loss, *_ = self.shared_step(batch)
for t in self.noisy_acc:
_, logits, _, targets = self.shared_step(batch, t)
self.noisy_acc[t]["acc@1"].append(
self.compute_top_k(logits, targets, k=1, reduction="mean")
)
self.noisy_acc[t]["acc@5"].append(
self.compute_top_k(logits, targets, k=5, reduction="mean")
)
return loss
def configure_optimizers(self):
optimizer = AdamW(
self.model.parameters(),
lr=self.learning_rate,
weight_decay=self.weight_decay,
)
if self.use_scheduler:
scheduler = instantiate_from_config(self.scheduler_config)
print("Setting up LambdaLR scheduler...")
scheduler = [
{
"scheduler": LambdaLR(optimizer, lr_lambda=scheduler.schedule),
"interval": "step",
"frequency": 1,
}
]
return [optimizer], scheduler
return optimizer
@torch.no_grad()
def log_images(self, batch, N=8, *args, **kwargs):
log = dict()
x = self.get_input(batch, self.diffusion_model.first_stage_key)
log["inputs"] = x
y = self.get_conditioning(batch)
if self.label_key == "class_label":
y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
log["labels"] = y
|
__models__ = {"class_label": EncoderUNetModel, "segmentation": UNetModel}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
class NoisyLatentImageClassifier(pl.LightningModule):
def __init__(
self,
diffusion_path,
num_classes,
ckpt_path=None,
pool="attention",
label_key=None,
diffusion_ckpt_path=None,
scheduler_config=None,
weight_decay=1.0e-2,
log_steps=10,
monitor="val/loss",
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.num_classes = num_classes
# get latest config of diffusion model
diffusion_config = natsorted(
glob(os.path.join(diffusion_path, "configs", "*-project.yaml"))
)[-1]
self.diffusion_config = OmegaConf.load(diffusion_config).model
self.diffusion_config.params.ckpt_path = diffusion_ckpt_path
self.load_diffusion()
self.monitor = monitor
self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1
self.log_time_interval = self.diffusion_model.num_timesteps // log_steps
self.log_steps = log_steps
self.label_key = (
label_key
if not hasattr(self.diffusion_model, "cond_stage_key")
else self.diffusion_model.cond_stage_key
)
assert (
self.label_key is not None
), "label_key neither in diffusion model nor in model.params"
if self.label_key not in __models__:
raise NotImplementedError()
self.load_classifier(ckpt_path, pool)
self.scheduler_config = scheduler_config
self.use_scheduler = self.scheduler_config is not None
self.weight_decay = weight_decay
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
sd = torch.load(path, map_location="cpu")
if "state_dict" in list(sd.keys()):
sd = sd["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
missing, unexpected = (
self.load_state_dict(sd, strict=False)
if not only_model
else self.model.load_state_dict(sd, strict=False)
)
print(
f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys"
)
if len(missing) > 0:
print(f"Missing Keys: {missing}")
if len(unexpected) > 0:
print(f"Unexpected Keys: {unexpected}")
def load_diffusion(self):
model = instantiate_from_config(self.diffusion_config)
self.diffusion_model = model.eval()
self.diffusion_model.train = disabled_train
for param in self.diffusion_model.parameters():
param.requires_grad = False
def load_classifier(self, ckpt_path, pool):
model_config = deepcopy(self.diffusion_config.params.unet_config.params)
model_config.in_channels = (
self.diffusion_config.params.unet_config.params.out_channels
)
model_config.out_channels = self.num_classes
if self.label_key == "class_label":
model_config.pool = pool
self.model = __models__[self.label_key](**model_config)
if ckpt_path is not None:
print(
"#####################################################################"
)
print(f'load from ckpt "{ckpt_path}"')
print(
"#####################################################################"
)
self.init_from_ckpt(ckpt_path)
@torch.no_grad()
def get_x_noisy(self, x, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x))
continuous_sqrt_alpha_cumprod = None
if self.diffusion_model.use_continuous_noise:
continuous_sqrt_alpha_cumprod = (
self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1)
)
# todo: make sure t+1 is correct here
return self.diffusion_model.q_sample(
x_start=x,
t=t,
noise=noise,
continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod,
)
def forward(self, x_noisy, t, *args, **kwargs):
return self.model(x_noisy, t)
@torch.no_grad()
def get_input(self, batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = rearrange(x, "b h w c -> b c h w")
x = x.to(memory_format=torch.contiguous_format).float()
return x
@torch.no_grad()
def get_conditioning(self, batch, k=None):
if k is None:
k = self.label_key
assert k is not None, "Needs to provide label key"
targets = batch[k].to(self.device)
if self.label_key == "segmentation":
targets = rearrange(targets, "b h w c -> b c h w")
for down in range(self.numd):
h, w = targets.shape[-2:]
targets = F.interpolate(targets, size=(h // 2, w // 2), mode="nearest")
# targets = rearrange(targets,'b c h w -> b h w c')
return targets
def compute_top_k(self, logits, labels, k, reduction="mean"):
_, top_ks = torch.topk(logits, k, dim=1)
if reduction == "mean":
return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
elif reduction == "none":
return (top_ks == labels[:, None]).float().sum(dim=-1)
def on_train_epoch_start(self):
# save some memory
self.diffusion_model.model.to("cpu")
@torch.no_grad()
def write_logs(self, loss, logits, targets):
log_prefix = "train" if self.training else "val"
log = {}
log[f"{log_prefix}/loss"] = loss.mean()
log[f"{log_prefix}/acc@1"] = self.compute_top_k(
logits, targets, k=1, reduction="mean"
)
log[f"{log_prefix}/acc@5"] = self.compute_top_k(
logits, targets, k=5, reduction="mean"
)
self.log_dict(
log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True
)
self.log("loss", log[f"{log_prefix}/loss"], prog_bar=True, logger=False)
self.log(
"global_step", self.global_step, logger=False, on_epoch=False, prog_bar=True
)
lr = self.optimizers().param_groups[0]["lr"]
self.log("lr_abs", lr, on_step=True, logger=True, on_epoch=False, prog_bar=True)
def shared_step(self, batch, t=None):
x, *_ = self.diffusion_model.get_input(
batch, k=self.diffusion_model.first_stage_key
)
targets = self.get_conditioning(batch)
if targets.dim() == 4:
targets = targets.argmax(dim=1)
if t is None:
t = torch.randint(
0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device
).long()
else:
t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long()
x_noisy = self.get_x_noisy(x, t)
logits = self(x_noisy, t)
loss = F.cross_entropy(logits, targets, reduction="none")
self.write_logs(loss.detach(), logits.detach(), targets.detach())
loss = loss.mean()
return loss, logits, x_noisy, targets
def training_step(self, batch, batch_idx):
loss, *_ = self.shared_step(batch)
return loss
def reset_noise_accs(self):
self.noisy_acc = {
t: {"acc@1": [], "acc@5": []}
for t in range(
0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t
)
}
def on_validation_start(self):
self.reset_noise_accs()
@torch.no_grad()
def validation_step(self, batch, batch_idx):
loss, *_ = self.shared_step(batch)
for t in self.noisy_acc:
_, logits, _, targets = self.shared_step(batch, t)
self.noisy_acc[t]["acc@1"].append(
self.compute_top_k(logits, targets, k=1, reduction="mean")
)
self.noisy_acc[t]["acc@5"].append(
self.compute_top_k(logits, targets, k=5, reduction="mean")
)
return loss
def configure_optimizers(self):
optimizer = AdamW(
self.model.parameters(),
lr=self.learning_rate,
weight_decay=self.weight_decay,
)
if self.use_scheduler:
scheduler = instantiate_from_config(self.scheduler_config)
print("Setting up LambdaLR scheduler...")
scheduler = [
{
"scheduler": LambdaLR(optimizer, lr_lambda=scheduler.schedule),
"interval": "step",
"frequency": 1,
}
]
return [optimizer], scheduler
return optimizer
@torch.no_grad()
def log_images(self, batch, N=8, *args, **kwargs):
log = dict()
x = self.get_input(batch, self.diffusion_model.first_stage_key)
log["inputs"] = x
y = self.get_conditioning(batch)
if self.label_key == "class_label":
y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
log["labels"] = y
| if ismap(y): | 4 | 2023-10-23 07:40:20+00:00 | 12k |
YORG-AI/Open-Assistant | package/src/yorgassistant/core/assistant/async_threads.py | [
{
"identifier": "Assistants",
"path": "package/src/yorgassistant/core/assistant/assistant.py",
"snippet": "class Assistants():\n def __init__(self, config,yaml_path:Optional[str] = None):\n self.config = config\n YamlPathConfig.assistants_yaml_path = yaml_path if yaml_path else 'assistants.yaml'\n \n def set_assistants_yaml_path(yaml_path: str):\n # 检查 yaml_path 是否为绝对路径\n if not os.path.isabs(yaml_path):\n # 获取调用此方法的栈帧\n stack = inspect.stack()\n caller_frame = stack[1]\n # 获取调用者的文件路径\n caller_path = caller_frame.filename\n # 获取调用者的目录路径\n caller_dir = os.path.dirname(caller_path)\n # 构建 yaml 文件的绝对路径\n full_yaml_path = os.path.join(caller_dir, yaml_path)\n else:\n full_yaml_path = yaml_path\n\n # 获取 yaml 文件所在的目录\n yaml_dir = os.path.dirname(full_yaml_path)\n # 如果目录不存在,则创建它\n os.makedirs(yaml_dir, exist_ok=True)\n # 设置 assistants_yaml_path\n YamlPathConfig.assistants_yaml_path = full_yaml_path\n\n def save_to_yaml(self):\n # 构建 assistants.yaml 文件的绝对路径\n assistants_yaml_path = YamlPathConfig.assistants_yaml_path\n # 检查文件是否存在,如果不存在,则创建一个空的yaml文件\n if not os.path.exists(assistants_yaml_path):\n with open(assistants_yaml_path, 'w') as file:\n file.write('') # 创建一个空文件\n # 使用绝对路径打开 assistants.yaml 文件\n with open(assistants_yaml_path, 'r') as file:\n data = yaml.safe_load(file) or []\n # 查找具有相同 id 的 assistant\n for i, d in enumerate(data):\n if d['id'] == self.config.id:\n # 如果找到了,就更新它\n data[i] = self.config.__dict__\n break\n else:\n # 如果没有找到,就添加新的 assistant 到列表中\n data.append(self.config.__dict__)\n # 写回 YAML 文件\n with open(assistants_yaml_path, 'w') as file:\n yaml.dump(data, file)\n\n @property\n def id(self):\n return self.config.id\n\n @property\n def name(self):\n return self.config.name\n\n @name.setter\n def name(self, value):\n self.config.name = value\n self.save_to_yaml() # 更新 YAML 文件\n\n @property\n def instructions(self):\n return self.config.instructions\n\n @instructions.setter\n def instructions(self, value):\n self.config.instructions = value\n\n @property\n def description(self):\n return self.config.description\n\n @description.setter\n def description(self, value):\n self.config.description = value\n\n @property\n def tools(self):\n return self.config.tools\n\n @tools.setter\n def tools(self, value):\n self.config.tools = value\n self.save_to_yaml() # 更新 YAML 文件\n\n @property\n def model(self):\n return self.config.model\n\n @model.setter\n def model(self, value):\n self.config.model = value\n self.save_to_yaml() # 更新 YAML 文件\n\n def get_tools_type_list(self):\n return [tool['type'] for tool in self.config.tools]\n\n @staticmethod\n def create(name: str = None, instructions: str = None, tools: list[dict] = [{'type':''}], model: str = 'gpt-4', description: str = None, file_ids: list = None) -> 'Assistants':\n # 创建配置和 Assistants 对象\n config = AssistantConfig(\n id=str(uuid.uuid4()),\n created_at=int(time.time()),\n name=name,\n description=description,\n instructions=instructions,\n tools=tools,\n model=model,\n file_ids=file_ids if file_ids is not None else [],\n )\n assistant = Assistants(config,YamlPathConfig.assistants_yaml_path)\n assistant.save_to_yaml() # 保存到 YAML 文件\n return assistant\n \n @staticmethod\n def get_all_assistants() -> List[Dict[str, Any]]:\n \"\"\"\n 读取 YAML 文件并返回所有 assistants 的信息列表。\n \"\"\"\n # 确保 YAML 文件路径已经被设置\n if YamlPathConfig.assistants_yaml_path:\n if not os.path.isfile(YamlPathConfig.assistants_yaml_path):\n # 如果文件路径存在但文件不存在,则创建一个空文件\n with open(YamlPathConfig.assistants_yaml_path, 'w') as file:\n yaml.dump([], file)\n else:\n raise FileNotFoundError(\"The threads YAML file path is not set.\")\n\n # 读取 YAML 文件\n with open(YamlPathConfig.assistants_yaml_path, 'r') as file:\n assistants_data = yaml.safe_load(file) or []\n # 使用 from_dict 方法将每个字典转换为 AssistantConfig 实例\n assistants_list = []\n for item in assistants_data:\n config = AssistantConfig(**item)\n assistants_list.append(config)\n return assistants_list\n @classmethod\n def from_id(cls, id: str) -> 'Assistants':\n # 使用传入的 yaml_path 参数打开 YAML 文件\n with open(YamlPathConfig.assistants_yaml_path, 'r') as file:\n data = yaml.safe_load(file) or []\n # 查找具有相同 id 的配置\n for d in data:\n if d['id'] == id:\n # 如果找到了,就用这个配置创建一个新的 Assistants 对象\n config = AssistantConfig(**d)\n return cls(config, YamlPathConfig.assistants_yaml_path) # 使用传入的 yaml_path 创建 Assistants 实例\n # 如果没有找到,就抛出一个异常\n raise ValueError(f'No assistant with id {id} found in YAML file.')\n \n @classmethod\n def delete_by_id(cls, id: str):\n\n # 使用绝对路径打开 assistants.yaml 文件\n with open(YamlPathConfig.assistants_yaml_path, 'r') as file:\n data = yaml.safe_load(file) or []\n\n # 查找具有相同 id 的 assistant\n for i, d in enumerate(data):\n if d['id'] == id:\n # 如果找到了,就删除它\n del data[i]\n break\n else:\n # 如果没有找到,就抛出一个异常\n raise ValueError(f'No assistant with id {id} found in YAML file.')\n\n # 写回 YAML 文件\n with open(YamlPathConfig.assistants_yaml_path, 'w') as file:\n yaml.dump(data, file)"
},
{
"identifier": "OpenAINode",
"path": "package/src/yorgassistant/core/nodes/openai/openai.py",
"snippet": "class OpenAINode(BaseNode):\n config: NodeConfig = NodeConfig(**openai_node_config)\n\n history: list[dict[str, any]]\n functions: list[dict[str, any]]\n\n cur_role: Optional[str]\n cur_content: Optional[str]\n\n def __init__(self):\n super().__init__()\n\n self.history = []\n self.functions = []\n\n self.cur_role = None\n self.cur_content = None\n\n def complete(self, input: CompleteInput):\n \"\"\"\n Complete with only current history. No extra messages.\n \"\"\"\n return self._make_completion([], input)\n\n # TODO: generalize these chat functions\n def chat(self, input: ChatInput):\n \"\"\"\n Chat with OpenAI's model with simple text.\n \"\"\"\n return self._make_completion(\n [\n Message(\n role=\"user\",\n content=input.message_text,\n )\n ],\n input,\n )\n\n def chat_with_prompt_template(self, input: ChatWithPromptTemplateInput):\n \"\"\"\n Chat with OpenAI's model with a specific prompt template.\n \"\"\"\n return self._make_completion(\n [\n Message(\n role=\"user\",\n content=input.prompt_template.format(**input.params),\n )\n ],\n input,\n )\n\n def chat_with_message(self, input: ChatWithMessageInput):\n \"\"\"\n Chat with OpenAI's model with a specific message dict.\n \"\"\"\n return self._make_completion([input.message], input)\n\n def chat_with_messages(self, input: ChatWithMessagesInput):\n \"\"\"\n Chat with OpenAI's model with a specific message dict.\n \"\"\"\n return self._make_completion(input.messages, input)\n\n def use_old_openai_with_prompt(self, input: OldCompleteInput):\n return self._make_old_completion(input.prompt, input)\n\n def _make_old_completion(\n self, prompt: str, input: OldCompleteConfig\n ) -> OpenAIOldResp:\n \"\"\"\n Make a completion with the given messages.\n \"\"\"\n\n kwargs = {\"model\": input.model, \"max_tokens\": 1096}\n\n kwargs[\"prompt\"] = prompt\n # set streaming if needed\n if input.use_streaming:\n kwargs[\"stream\"] = True\n\n # TODO: add exception handling\n try:\n client = OpenAI(api_key=os.getenv(\"OPENAI_CHAT_API_KEY\"))\n response = client.completions.create(**kwargs)\n except Exception as e:\n logging.warn(f\"openai_node._make_completion: error occurred: {e}\")\n return OpenAIOldResp(\n text=f\"Error occurred: {e}\",\n finish_reason=\"error\",\n )\n\n if input.use_streaming:\n # TODO 目前不支持流式处理\n resp = OpenAIOldResp(text=\"\", finish_reason=\"\")\n for completion in response:\n resp.text += completion[\"choices\"][0][\"text\"]\n if choice.finish_reason:\n resp.finish_reason = completion[\"choices\"][0][\"finish_reason\"]\n break\n return resp\n\n resp = OpenAIOldResp(**response.choices[0].model_dump())\n return resp\n\n def _make_completion(\n self, messages: list[Message], input: ChatConfig\n ) -> OpenAIResp | OpenAIStreamingResp:\n \"\"\"\n Make a completion with the given messages.\n \"\"\"\n\n kwargs = {\n \"model\": input.model,\n }\n\n cur_messages = []\n\n # if history is empty, add a default system message\n if len(self.history) == 0:\n cur_messages.append(\n Message(\n role=\"system\",\n content=\"You are a helpful AI assistant. You should answer the user's questions and help them with their tasks.\",\n ).dict(exclude_none=True)\n )\n else:\n cur_messages += self.history\n\n # append history if needed\n if input.append_history:\n for message in messages:\n self.add_single_message(message)\n\n # add all input messages to argument `messages`\n for message in messages:\n cur_messages.append(message.dict(exclude_none=True))\n\n kwargs[\"messages\"] = tt.trim(cur_messages, input.model, max_tokens=9999)\n\n # add function definitions if exists\n if len(self.functions) > 0:\n kwargs[\"functions\"] = self.functions\n kwargs[\"function_call\"] = \"auto\"\n\n # set streaming if needed\n if input.use_streaming:\n kwargs[\"stream\"] = True\n\n # TODO: add exception handling\n try:\n client = OpenAI(api_key=os.getenv(\"OPENAI_CHAT_API_KEY\"))\n response = client.chat.completions.create(**kwargs)\n except Exception as e:\n logging.warn(f\"openai_node._make_completion: error occurred: {e}\")\n return OpenAIResp(\n message=Message(\n role=\"system\",\n content=f\"Error occurred: {e}\",\n ),\n finish_reason=\"error\",\n )\n\n if input.use_streaming:\n resp = OpenAIStreamingResp(**response.choices[0].dict())\n if input.append_history:\n self.history.append(resp.delta.dict(exclude_none=True))\n return resp\n\n resp = OpenAIResp(**response.choices[0].dict())\n if input.append_history:\n self.history.append(resp.message.dict(exclude_none=True))\n return resp\n\n def add_function(self, func_def: FunctionDefinition):\n self.functions.append(\n func_def.dict()\n ) # redefined dict() doesn't have exclude_none arg\n\n def add_single_message(self, msg: Message):\n if self.cur_role is not None and self.cur_content is not None:\n self.history.append(\n Message(\n role=self.cur_role,\n content=self.cur_content,\n ).dict(exclude_none=True)\n )\n self.cur_role = None\n self.cur_content = None\n\n self.history.append(msg.dict(exclude_none=True))\n\n def add_system_message(self, content: str):\n self.add_single_message(\n Message(\n role=\"system\",\n content=content,\n )\n )\n\n def add_role(self, role: str):\n if self.cur_role is not None and self.cur_content is not None:\n self.add_single_message(\n Message(\n role=self.cur_role,\n content=self.cur_content,\n )\n )\n\n self.cur_role = role\n\n def add_content(self, content: str):\n if self.cur_content is not None:\n self.cur_content += content\n else:\n self.cur_content = content"
},
{
"identifier": "AsyncOpenAINode",
"path": "package/src/yorgassistant/core/nodes/openai/openai.py",
"snippet": "class AsyncOpenAINode(BaseNode):\n config: NodeConfig = NodeConfig(**openai_node_config)\n\n history: list[dict[str, any]]\n functions: list[dict[str, any]]\n\n cur_role: Optional[str]\n cur_content: Optional[str]\n\n def __init__(self):\n super().__init__()\n\n self.history = []\n self.functions = []\n\n self.cur_role = None\n self.cur_content = None\n\n openai.api_key = os.getenv(\"OPENAI_CHAT_API_KEY\")\n openai.api_base = os.getenv(\"OPENAI_CHAT_API_BASE\")\n\n async def complete(self, input: CompleteInput):\n \"\"\"\n Complete with only current history. No extra messages.\n \"\"\"\n return await self._make_completion([], input)\n\n # TODO: generalize these chat functions\n async def chat(self, input: ChatInput):\n \"\"\"\n Chat with OpenAI's model with simple text.\n \"\"\"\n return await self._make_completion(\n [\n Message(\n role=\"user\",\n content=input.message_text,\n )\n ],\n input,\n )\n\n async def chat_with_prompt_template(self, input: ChatWithPromptTemplateInput):\n \"\"\"\n Chat with OpenAI's model with a specific prompt template.\n \"\"\"\n return await self._make_completion(\n [\n Message(\n role=\"user\",\n content=input.prompt_template.format(**input.params),\n )\n ],\n input,\n )\n\n async def chat_with_message(self, input: ChatWithMessageInput):\n \"\"\"\n Chat with OpenAI's model with a specific message dict.\n \"\"\"\n return await self._make_completion([input.message], input)\n\n async def chat_with_messages(self, input: ChatWithMessagesInput):\n \"\"\"\n Chat with OpenAI's model with a specific message dict.\n \"\"\"\n return await self._make_completion(input.messages, input)\n\n async def use_old_openai_with_prompt(self, input: OldCompleteInput):\n return await self._make_old_completion(input.prompt, input)\n\n async def _make_old_completion(\n self, prompt: str, input: OldCompleteConfig\n ) -> OpenAIOldResp:\n \"\"\"\n Make a completion with the given messages.\n \"\"\"\n\n kwargs = {\"model\": input.model, \"max_tokens\": 1096}\n\n kwargs[\"prompt\"] = prompt\n # set streaming if needed\n if input.use_streaming:\n kwargs[\"stream\"] = True\n\n # TODO: add exception handling\n try:\n client = OpenAI(api_key=os.getenv(\"OPENAI_CHAT_API_KEY\"))\n response = client.completions.create(**kwargs)\n except Exception as e:\n logging.warn(f\"openai_node._make_completion: error occurred: {e}\")\n return OpenAIOldResp(\n text=f\"Error occurred: {e}\",\n finish_reason=\"error\",\n )\n\n if input.use_streaming:\n # TODO 目前不支持流式处理\n resp = OpenAIOldResp(text=\"\", finish_reason=\"\")\n for completion in response:\n resp.text += completion[\"choices\"][0][\"text\"]\n if choice.finish_reason:\n resp.finish_reason = completion[\"choices\"][0][\"finish_reason\"]\n break\n return resp\n\n resp = OpenAIOldResp(**response.choices[0].model_dump())\n return resp\n\n async def _make_completion(\n self, messages: list[Message], input: ChatConfig\n ) -> OpenAIResp | OpenAIStreamingResp:\n \"\"\"\n Make a completion with the given messages.\n \"\"\"\n\n kwargs = {\n \"model\": input.model,\n }\n\n cur_messages = []\n\n # if history is empty, add a default system message\n if len(self.history) == 0:\n cur_messages.append(\n Message(\n role=\"system\",\n content=\"You are a helpful AI assistant. You should answer the user's questions and help them with their tasks.\",\n ).dict(exclude_none=True)\n )\n else:\n cur_messages += self.history\n\n # append history if needed\n if input.append_history:\n for message in messages:\n self.add_single_message(message)\n\n # add all input messages to argument `messages`\n for message in messages:\n cur_messages.append(message.dict(exclude_none=True))\n\n kwargs[\"messages\"] = tt.trim(cur_messages, input.model, max_tokens=9999)\n\n # add function definitions if exists\n if len(self.functions) > 0:\n kwargs[\"functions\"] = self.functions\n kwargs[\"function_call\"] = \"auto\"\n\n # set streaming if needed\n if input.use_streaming:\n kwargs[\"stream\"] = True\n\n # TODO: add exception handling\n try:\n client = AsyncOpenAI(api_key=os.getenv(\"OPENAI_CHAT_API_KEY\"))\n response = await client.chat.completions.create(**kwargs)\n except Exception as e:\n return OpenAIResp(\n message=Message(\n role=\"system\",\n content=f\"Error occurred: {e}\",\n ),\n finish_reason=\"error\",\n )\n\n if input.use_streaming:\n resp = OpenAIStreamingResp(**response.choices[0].dict())\n if input.append_history:\n self.history.append(resp.delta.dict(exclude_none=True))\n return resp\n\n resp = OpenAIResp(**response.choices[0].dict())\n if input.append_history:\n self.history.append(resp.message.dict(exclude_none=True))\n return resp\n\n def add_function(self, func_def: FunctionDefinition):\n self.functions.append(\n func_def.dict()\n ) # redefined dict() doesn't have exclude_none arg\n\n def add_single_message(self, msg: Message):\n if self.cur_role is not None and self.cur_content is not None:\n self.history.append(\n Message(\n role=self.cur_role,\n content=self.cur_content,\n ).dict(exclude_none=True)\n )\n self.cur_role = None\n self.cur_content = None\n\n self.history.append(msg.dict(exclude_none=True))\n\n def add_system_message(self, content: str):\n self.add_single_message(\n Message(\n role=\"system\",\n content=content,\n )\n )\n\n def add_role(self, role: str):\n if self.cur_role is not None and self.cur_content is not None:\n self.add_single_message(\n Message(\n role=self.cur_role,\n content=self.cur_content,\n )\n )\n\n self.cur_role = role\n\n def add_content(self, content: str):\n if self.cur_content is not None:\n self.cur_content += content\n else:\n self.cur_content = content"
},
{
"identifier": "Tools",
"path": "package/src/yorgassistant/core/assistant/tools/tools.py",
"snippet": "class Tools:\n tools: dict[str, Tool]\n\n def __init__(self):\n self.tools = {}\n # 获取调用此方法的栈帧\n stack = inspect.stack()\n caller_frame = stack[1]\n # 获取调用者的文件路径\n caller_path = caller_frame.filename\n # 获取调用者的目录路径\n caller_dir = os.path.dirname(caller_path)\n # 构建 openai.yaml 文件的绝对路径\n yaml_file_path = os.path.join(caller_dir, YamlPathConfig.tools_yaml_path)\n tools_yaml_path = yaml_file_path\n # 读取 tools.yaml 文件,初始化所有 tools\n with open(tools_yaml_path, \"r\") as f:\n config_obj = yaml.safe_load(f)\n for tool_name, tool_config in config_obj[\"tools\"].items():\n self.tools[tool_name] = Tool(config=ToolConfig(**tool_config))\n\n def set_tools_yaml_path(yaml_path:str):\n # 检查 yaml_path 是否为绝对路径\n if not os.path.isabs(yaml_path):\n # 获取调用此方法的栈帧\n stack = inspect.stack()\n caller_frame = stack[1]\n # 获取调用者的文件路径\n caller_path = caller_frame.filename\n # 获取调用者的目录路径\n caller_dir = os.path.dirname(caller_path)\n # 构建 yaml 文件的绝对路径\n full_yaml_path = os.path.join(caller_dir, yaml_path)\n else:\n full_yaml_path = yaml_path\n # 获取 yaml 文件所在的目录\n yaml_dir = os.path.dirname(full_yaml_path)\n # 如果目录不存在,则创建它\n os.makedirs(yaml_dir, exist_ok=True)\n # 设置 yaml_path\n YamlPathConfig.tools_yaml_path = full_yaml_path\n\n def get_tool(self, tool_name: str) -> Tool:\n # 找到对应的工具\n tool = self.tools.get(tool_name)\n if tool is None:\n raise ValueError(f\"No tool named {tool_name} found.\")\n\n return tool\n\n def get_tool_summary(self, tool_name: str) -> str:\n # 在 tools.yaml 文件中找到对应的工具\n tool = self.tools.get(tool_name)\n if tool is None:\n raise ValueError(f\"No tool named {tool_name} found.\")\n\n return tool.config.summary\n\n def get_tools_list_summary(self, tools_list: list[str]) -> dict[str, str]:\n tools_summary = {}\n for tool_name in tools_list:\n summary = self.get_tool_summary(tool_name)\n tools_summary[tool_name] = summary\n return tools_summary"
},
{
"identifier": "Tool",
"path": "package/src/yorgassistant/core/assistant/tools/tools.py",
"snippet": "class Tool:\n config: ToolConfig\n entity: BaseToolEntity\n _tool_type: str # 使用一个内部变量来存储 tool_type 的值\n\n def __init__(self, config: ToolConfig):\n self.config = config\n entity_name = config.entity_name\n\n if entity_name in FUNCTION_TOOL_ENTITIES:\n self.entity = FunctionToolEntity(FUNCTION_TOOL_ENTITIES[entity_name])\n self._tool_type = 'function'\n elif entity_name in STATEFUL_TOOL_ENTITIES:\n self.entity = STATEFUL_TOOL_ENTITIES[entity_name]()\n self._tool_type = 'stateful'\n else:\n raise Exception(f\"Tool entity {entity_name} not found.\")\n\n @property\n def tool_type(self):\n return self._tool_type\n\n @tool_type.setter\n def tool_type(self, value):\n self._tool_type = value\n # TODO: response check and type convert\n def call(self, **kwargs):\n return self.entity.call(**kwargs)\n\n def need_llm_generate_parameters(self) -> bool:\n return self.entity.need_llm_generate_parameters()\n\n def need_llm_generate_response(self) -> bool:\n return self.entity.need_llm_generate_response()\n\n def has_done(self) -> bool:\n return self.entity.current_state() == State.DONE"
}
] | import uuid
import time
import yaml
import os
import re
import logging
import json
import inspect
from typing import Any, List, Optional,Dict
from .assistant import Assistants
from ..nodes.openai.openai import OpenAINode,AsyncOpenAINode
from ..nodes.openai.openai_model import *
from .tools.tools import Tools, Tool
from .config import *
from .prompt.few_shot_cot_tools_choose_prompt import *
from .prompt.parameters_generate_prompt import *
from .prompt.response_generate_prompt import * | 8,047 | config = ThreadsConfig.from_dict(d)
return cls(config, YamlPathConfig.threads_yaml_path) # 使用传入的 yaml_path 创建 实例
# 如果没有找到,就抛出一个异常
raise ValueError(f'No threads with id {id} found in YAML file.')
@staticmethod
def get_all_threads() -> List[Dict[str, Any]]:
"""
读取 YAML 文件并返回所有 threads 的信息列表。
"""
# 确保 YAML 文件路径已经被设置
if YamlPathConfig.threads_yaml_path:
if not os.path.isfile(YamlPathConfig.threads_yaml_path):
# 如果文件路径存在但文件不存在,则创建一个空文件
with open(YamlPathConfig.threads_yaml_path, 'w') as file:
yaml.dump([], file)
else:
raise FileNotFoundError("The threads YAML file path is not set.")
# 读取 YAML 文件
with open(YamlPathConfig.threads_yaml_path, 'r') as file:
data = yaml.safe_load(file) or []
# 使用 from_dict 方法将每个字典转换为 ThreadsConfig 实例
threads_list = []
for item in data:
config = ThreadsConfig.from_dict(item)
threads_list.append(config)
return threads_list
async def run(self, assistant_id: str, input_text: str, **kwargs):
try:
# 使用 from_id 方法获取助手
assistant = Assistants.from_id(assistant_id)
tools_list = assistant.get_tools_type_list()
# 初始化 Tools 对象
tools = Tools()
# 获取 tools 的 summary
tools_summary = tools.get_tools_list_summary(tools_list)
# 如果第一次执行或当前的 tool 已执行完毕
if self.current_tool is None or self.current_tool.has_done():
# 使用 LLM 选择 tools
chosen_tools =await self._choose_tools(tools_summary, input_text)
# TODO: 支持多个 tool 执行
if len(chosen_tools) == 0:
logging.warn("No tool is recommended.")
self.current_tool = None
# 不使用 Tool, 直接 chat
res_message = self._chat(input_text, assistant)
else:
tool_name = chosen_tools[0]
# 获取对应的 tool 对象
self.current_tool = tools.get_tool(tool_name)
# 判断当前 tool 的执行是否需要 llm 生成参数
if self.current_tool is not None and self.current_tool.need_llm_generate_parameters():
# 使用 LLM 生成参数
parameters = self._generate_parameters(self.current_tool, input_text)
else:
parameters = kwargs
parameters['input_text'] = input_text
# 执行 tool
if self.current_tool is not None:
res_message = self.current_tool.call(**parameters)
# 根据执行结果,交给 LLM 进行包装
if self.current_tool is not None and self.current_tool.need_llm_generate_response():
# 使用 LLM 生成 response
res_message = self._generate_response(
self.current_tool, input_text, parameters, res_message, assistant
)
# 更新消息历史并保存到 YAML 文件
if isinstance(res_message, dict) and 'assistant' in res_message:
assistant_message_str = res_message['assistant']['message']
if res_message['type'] == 'success':
self._config.message_history.append(
[
{'user':input_text},
{'assistant':assistant_message_str},
]
)
self._config.assistant_id = assistant_id
await self.save_to_yaml()
res_message['content']['tool'] = self.current_tool.config.name
return res_message
else:
assistant_message_str = str(res_message)
self._config.message_history.append(
[
{'user':input_text},
{'assistant':assistant_message_str},
]
)
self._config.assistant_id = assistant_id
await self.save_to_yaml()
return {
'type': 'success',
'content': {'tool': self.current_tool.config.name},
'next_stages_info': {},
'assistant': {'message': assistant_message_str}
}
except Exception as e:
# 异常时的返回格式
logging.error(f"An error occurred: {e}")
return {
'type': 'error',
'content': {'message': str(e)},
'next_stages_info': {},
'assistant': {'message': ''}
}
async def _chat(
self, prompt: str, assistant: Assistants, system_message: Optional[str] = None
) -> str:
# 创建一个 OpenAINode 对象
|
def extract_bracket_content(s: str) -> list:
content = re.findall(r"\[(.*?)\]", s)
content = [c.replace("'", "") for c in content]
content = filter(lambda x: x != "", content)
ret = []
for item in content:
if "," in item:
ret.extend(item.split(","))
else:
ret.append(item)
return ret
class AsyncThreads:
current_tool: Tool
chat_node: OpenAINode # Threads 全局的 OpenAI node,仅用于 chat 交互以及对 tool 执行结果的分析(选择 tool 以及生成参数不使用该 node)
def __init__(self, config: ThreadsConfig,threads_yaml_path:Optional[str] = None):
self._config = config
self.current_tool = None
YamlPathConfig.threads_yaml_path = threads_yaml_path if threads_yaml_path else "threads.yaml"
@property
def config(self):
return self._config
@property
def id(self):
return self._config.id
def set_threads_yaml_path(yaml_path:str):
# 检查 yaml_path 是否为绝对路径
if not os.path.isabs(yaml_path):
# 获取调用此方法的栈帧
stack = inspect.stack()
caller_frame = stack[1]
# 获取调用者的文件路径
caller_path = caller_frame.filename
# 获取调用者的目录路径
caller_dir = os.path.dirname(caller_path)
# 构建 yaml 文件的绝对路径
full_yaml_path = os.path.join(caller_dir, yaml_path)
else:
full_yaml_path = yaml_path
# 获取 yaml 文件所在的目录
yaml_dir = os.path.dirname(full_yaml_path)
# 如果目录不存在,则创建它
os.makedirs(yaml_dir, exist_ok=True)
# 设置 yaml_path
YamlPathConfig.threads_yaml_path = full_yaml_path
async def save_to_yaml(self):
# 构建 threads.yaml 文件的绝对路径
threads_yaml_path = YamlPathConfig.threads_yaml_path
# 检查文件是否存在,如果不存在,则创建一个空的yaml文件
if not os.path.exists(threads_yaml_path):
with open(threads_yaml_path, 'w') as file:
file.write('') # 创建一个空文件
# 使用绝对路径打开 threads.yaml 文件
with open(threads_yaml_path, "r") as file:
data = yaml.safe_load(file) or []
# 查找具有相同 id 的 assistant
for i, d in enumerate(data):
if d["id"] == self.config.id:
# 如果找到了,就更新它
data[i] = self.config.to_dict()
break
else:
# 如果没有找到,就添加新的 assistant 到列表中
data.append(self.config.to_dict())
# 写回 YAML 文件
with open(threads_yaml_path, "w") as file:
yaml.dump(data, file)
@staticmethod
def create(yaml_file_path:str) -> "AsyncThreads":
# 创建 ThreadsConfig 对象
config = ThreadsConfig(
id=str(uuid.uuid4()),
object="AsyncThreads",
created_at=int(time.time()),
message_history=[],
metadata={},
)
# 创建 Threads 对象
threads = AsyncThreads(config,YamlPathConfig.threads_yaml_path)
# 保存到 YAML 文件
threads.save_to_yaml()
return threads
@classmethod
def from_id(cls, id: str) -> 'AsyncThreads':
# 使用传入的 yaml_path 参数打开 YAML 文件
with open(YamlPathConfig.threads_yaml_path, 'r') as file:
data = yaml.safe_load(file) or []
# 查找具有相同 id 的配置
for d in data:
if d['id'] == id:
# 如果找到了,就用这个配置创建一个新的对象
config = ThreadsConfig.from_dict(d)
return cls(config, YamlPathConfig.threads_yaml_path) # 使用传入的 yaml_path 创建 实例
# 如果没有找到,就抛出一个异常
raise ValueError(f'No threads with id {id} found in YAML file.')
@staticmethod
def get_all_threads() -> List[Dict[str, Any]]:
"""
读取 YAML 文件并返回所有 threads 的信息列表。
"""
# 确保 YAML 文件路径已经被设置
if YamlPathConfig.threads_yaml_path:
if not os.path.isfile(YamlPathConfig.threads_yaml_path):
# 如果文件路径存在但文件不存在,则创建一个空文件
with open(YamlPathConfig.threads_yaml_path, 'w') as file:
yaml.dump([], file)
else:
raise FileNotFoundError("The threads YAML file path is not set.")
# 读取 YAML 文件
with open(YamlPathConfig.threads_yaml_path, 'r') as file:
data = yaml.safe_load(file) or []
# 使用 from_dict 方法将每个字典转换为 ThreadsConfig 实例
threads_list = []
for item in data:
config = ThreadsConfig.from_dict(item)
threads_list.append(config)
return threads_list
async def run(self, assistant_id: str, input_text: str, **kwargs):
try:
# 使用 from_id 方法获取助手
assistant = Assistants.from_id(assistant_id)
tools_list = assistant.get_tools_type_list()
# 初始化 Tools 对象
tools = Tools()
# 获取 tools 的 summary
tools_summary = tools.get_tools_list_summary(tools_list)
# 如果第一次执行或当前的 tool 已执行完毕
if self.current_tool is None or self.current_tool.has_done():
# 使用 LLM 选择 tools
chosen_tools =await self._choose_tools(tools_summary, input_text)
# TODO: 支持多个 tool 执行
if len(chosen_tools) == 0:
logging.warn("No tool is recommended.")
self.current_tool = None
# 不使用 Tool, 直接 chat
res_message = self._chat(input_text, assistant)
else:
tool_name = chosen_tools[0]
# 获取对应的 tool 对象
self.current_tool = tools.get_tool(tool_name)
# 判断当前 tool 的执行是否需要 llm 生成参数
if self.current_tool is not None and self.current_tool.need_llm_generate_parameters():
# 使用 LLM 生成参数
parameters = self._generate_parameters(self.current_tool, input_text)
else:
parameters = kwargs
parameters['input_text'] = input_text
# 执行 tool
if self.current_tool is not None:
res_message = self.current_tool.call(**parameters)
# 根据执行结果,交给 LLM 进行包装
if self.current_tool is not None and self.current_tool.need_llm_generate_response():
# 使用 LLM 生成 response
res_message = self._generate_response(
self.current_tool, input_text, parameters, res_message, assistant
)
# 更新消息历史并保存到 YAML 文件
if isinstance(res_message, dict) and 'assistant' in res_message:
assistant_message_str = res_message['assistant']['message']
if res_message['type'] == 'success':
self._config.message_history.append(
[
{'user':input_text},
{'assistant':assistant_message_str},
]
)
self._config.assistant_id = assistant_id
await self.save_to_yaml()
res_message['content']['tool'] = self.current_tool.config.name
return res_message
else:
assistant_message_str = str(res_message)
self._config.message_history.append(
[
{'user':input_text},
{'assistant':assistant_message_str},
]
)
self._config.assistant_id = assistant_id
await self.save_to_yaml()
return {
'type': 'success',
'content': {'tool': self.current_tool.config.name},
'next_stages_info': {},
'assistant': {'message': assistant_message_str}
}
except Exception as e:
# 异常时的返回格式
logging.error(f"An error occurred: {e}")
return {
'type': 'error',
'content': {'message': str(e)},
'next_stages_info': {},
'assistant': {'message': ''}
}
async def _chat(
self, prompt: str, assistant: Assistants, system_message: Optional[str] = None
) -> str:
# 创建一个 OpenAINode 对象 | response_node = AsyncOpenAINode() | 2 | 2023-10-24 15:15:48+00:00 | 12k |
zju3dv/4K4D | scripts/ray_tracing/shading_ball.py | [
{
"identifier": "dotdict",
"path": "easyvolcap/utils/base_utils.py",
"snippet": "class dotdict(dict, Dict[KT, VT]):\n \"\"\"\n This is the default data passing object used throughout the codebase\n Main function: dot access for dict values & dict like merging and updates\n\n a dictionary that supports dot notation \n as well as dictionary access notation \n usage: d = make_dotdict() or d = make_dotdict{'val1':'first'})\n set attributes: d.val2 = 'second' or d['val2'] = 'second'\n get attributes: d.val2 or d['val2']\n \"\"\"\n\n def update(self, dct: Dict = None, **kwargs):\n dct = copy(dct) # avoid modifying the original dict, use super's copy to avoid recursion\n\n # Handle different arguments\n if dct is None:\n dct = kwargs\n elif isinstance(dct, Mapping):\n dct.update(kwargs)\n else:\n super().update(dct, **kwargs)\n return\n\n # Recursive updates\n for k, v in dct.items():\n if k in self:\n\n # Handle type conversions\n target_type = type(self[k])\n if not isinstance(v, target_type):\n # NOTE: bool('False') will be True\n if target_type == bool and isinstance(v, str):\n dct[k] = v == 'True'\n else:\n dct[k] = target_type(v)\n\n if isinstance(v, dict):\n self[k].update(v) # recursion from here\n else:\n self[k] = v\n else:\n if isinstance(v, dict):\n self[k] = dotdict(v) # recursion?\n else:\n self[k] = v\n return self\n\n def __init__(self, *args, **kwargs):\n self.update(*args, **kwargs)\n\n copy = return_dotdict(dict.copy)\n fromkeys = return_dotdict(dict.fromkeys)\n\n # def __hash__(self):\n # # return hash(''.join([str(self.values().__hash__())]))\n # return super(dotdict, self).__hash__()\n\n # def __init__(self, *args, **kwargs):\n # super(dotdict, self).__init__(*args, **kwargs)\n\n \"\"\"\n Uncomment following lines and \n comment out __getattr__ = dict.__getitem__ to get feature:\n \n returns empty numpy array for undefined keys, so that you can easily copy things around\n TODO: potential caveat, harder to trace where this is set to np.array([], dtype=np.float32)\n \"\"\"\n\n def __getitem__(self, key):\n try:\n return dict.__getitem__(self, key)\n except KeyError as e:\n raise AttributeError(e)\n # MARK: Might encounter exception in newer version of pytorch\n # Traceback (most recent call last):\n # File \"/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/queues.py\", line 245, in _feed\n # obj = _ForkingPickler.dumps(obj)\n # File \"/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/reduction.py\", line 51, in dumps\n # cls(buf, protocol).dump(obj)\n # KeyError: '__getstate__'\n # MARK: Because you allow your __getattr__() implementation to raise the wrong kind of exception.\n # FIXME: not working typing hinting code\n __getattr__: Callable[..., 'torch.Tensor'] = __getitem__ # type: ignore # overidden dict.__getitem__\n __getattribute__: Callable[..., 'torch.Tensor'] # type: ignore\n # __getattr__ = dict.__getitem__\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n # TODO: better ways to programmically define these special variables?\n\n @property\n def meta(self) -> dotdict:\n # Special variable used for storing cpu tensor in batch\n if 'meta' not in self:\n self.meta = dotdict()\n return self.__getitem__('meta')\n\n @meta.setter\n def meta(self, meta):\n self.__setitem__('meta', meta)\n\n @property\n def output(self) -> dotdict: # late annotation needed for this\n # Special entry for storing output tensor in batch\n if 'output' not in self:\n self.output = dotdict()\n return self.__getitem__('output')\n\n @output.setter\n def output(self, output):\n self.__setitem__('output', output)\n\n @property\n def persistent(self) -> dotdict: # late annotation needed for this\n # Special entry for storing persistent tensor in batch\n if 'persistent' not in self:\n self.persistent = dotdict()\n return self.__getitem__('persistent')\n\n @persistent.setter\n def persistent(self, persistent):\n self.__setitem__('persistent', persistent)\n\n @property\n def type(self) -> str: # late annotation needed for this\n # Special entry for type based construction system\n return self.__getitem__('type')\n\n @type.setter\n def type(self, type):\n self.__setitem__('type', type)\n\n def to_dict(self):\n out = dict()\n for k, v in self.items():\n if isinstance(v, dotdict):\n v = v.to_dict() # recursion point\n out[k] = v\n return out"
},
{
"identifier": "log",
"path": "easyvolcap/utils/console_utils.py",
"snippet": "class MyYAML(YAML):\nclass without_live:\nclass PathColumn(ProgressColumn):\nclass RateColumn(ProgressColumn):\nclass PrefixColumn(ProgressColumn):\nclass TimeColumn(ProgressColumn):\nclass tqdm_rich(std_tqdm):\nclass Timer:\n def dumps(self, obj: Union[dict, dotdict]):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\ndef stop_live():\ndef start_live():\ndef stop_prog():\ndef start_prog():\ndef stacktrace(extra_lines=0, **kwargs): # be consise\ndef disable_breakpoint():\ndef enable_breakpoint():\ndef disable_console():\ndef enable_console():\ndef disable_progress():\ndef enable_progress():\ndef disable_verbose_log():\ndef enable_verbose_log():\ndef set_trace(*args, **kwargs):\ndef post_mortem(*args, **kwargs):\ndef line(obj):\ndef path(string): # add path markup\ndef red(string: str) -> str: return f'[red bold]{string}[/]'\ndef blue(string: str) -> str: return f'[blue bold]{string}[/]'\ndef cyan(string: str) -> str: return f'[cyan bold]{string}[/]'\ndef green(string: str) -> str: return f'[green bold]{string}[/]'\ndef yellow(string: str) -> str: return f'[yellow bold]{string}[/]'\ndef magenta(string: str) -> str: return f'[magenta bold]{string}[/]'\ndef color(string: str, color: str): return f'[{color} bold]{string}[/]'\ndef red_slim(string: str) -> str: return f'[red]{string}[/]'\ndef blue_slim(string: str) -> str: return f'[blue]{string}[/]'\ndef cyan_slim(string: str) -> str: return f'[cyan]{string}[/]'\ndef green_slim(string: str) -> str: return f'[green]{string}[/]'\ndef yellow_slim(string: str) -> str: return f'[yellow]{string}[/]'\ndef magenta_slim(string: str) -> str: return f'[magenta]{string}[/]'\ndef color_slim(string: str, color: str): return f'[{color}]{string}[/]'\ndef markup_to_ansi(string: str) -> str:\ndef get_log_prefix(back=2,\n module_color=blue,\n func_color=green,\n ):\ndef log(*stuff,\n back=1,\n file: Optional[IO[str]] = None,\n no_prefix=False,\n module_color=blue,\n func_color=green,\n console: Optional[Console] = console,\n **kwargs):\ndef run(cmd,\n quite=False,\n dry_run=False,\n skip_failed=False,\n invokation=os.system, # or subprocess.run\n ):\ndef read(cmd: str, *args, **kwargs):\n def get_output(cmd: str):\ndef run_if_not_exists(cmd, outname, *args, **kwargs):\ndef catch_throw(func: Callable):\n def inner(*args, **kwargs):\ndef print(*stuff,\n sep: str = \" \",\n end: str = \"\\n\",\n file: Optional[IO[str]] = None,\n flush: bool = False,\n console: Optional[Console] = console,\n **kwargs,\n ):\n def __init__(self, **kwargs):\n def render(self, task):\n def __init__(self, unit=\"\", unit_scale=False, unit_divisor=1000, **kwargs):\n def render(self, task):\n def __init__(self, content: str = None, **kwargs):\n def render(self, task):\n def render(self, task):\n def __init__(self, *args, back=2, **kwargs):\n def close(self):\n def clear(self, *_, **__):\n def display(self, refresh=True, *_, **__):\n def reset(self, total=None):\ndef time_function(sync_cuda: bool = True):\n def inner(*args, func: Callable = lambda x: x, **kwargs):\n def wrapper(func: Callable):\n def __init__(self, name='', disabled: bool = False, sync_cuda: bool = True):\n def __enter__(self):\n def start(self):\n def __exit__(self, exc_type=None, exc_value=None, traceback=None):\n def stop(self, print=True, back=2):\n def record(self, event: str = ''):\ndef run_once(f):\n def wrapper(*args, **kwargs):\ndef display_table(states: dotdict,\n styles=default_dotdict(\n NoneType,\n {\n 'eta': 'cyan',\n 'epoch': 'cyan',\n 'img_loss': 'magenta',\n 'psnr': 'magenta',\n 'loss': 'magenta',\n 'data': 'blue',\n 'batch': 'blue',\n }\n ),\n maxlen=5,\n ):\n def create_table(columns: List[str],\n rows: List[List[str]] = [],\n styles=default_dotdict(NoneType),\n ):\ndef build_parser(d: dict, parser: argparse.ArgumentParser = None):"
},
{
"identifier": "sample_envmap_image",
"path": "easyvolcap/utils/relight_utils.py",
"snippet": "def sample_envmap_image(image: torch.Tensor, ray_d: torch.Tensor):\n sh = ray_d.shape\n if image.ndim == 4:\n image = image[0]\n ray_d = ray_d.view(-1, 3)\n # envmap: H, W, C\n # viewdirs: N, 3\n\n # https://github.com/zju3dv/InvRender/blob/45e6cdc5e3c9f092b5d10e2904bbf3302152bb2f/code/model/sg_render.py\n image = image.permute(2, 0, 1).unsqueeze(0)\n\n theta = torch.arccos(ray_d[:, 2]).reshape(-1) - 1e-6\n phi = torch.atan2(ray_d[:, 1], ray_d[:, 0]).reshape(-1) # 0 - pi\n\n # normalize to [-1, 1]\n query_y = (theta / torch.pi) * 2 - 1\n query_x = - phi / torch.pi\n grid = torch.stack((query_x, query_y)).permute(1, 0).unsqueeze(0).unsqueeze(0)\n\n rgb = F.grid_sample(image, grid, align_corners=False, padding_mode='border')\n rgb = rgb.squeeze().permute(1, 0)\n return rgb.view(sh)"
},
{
"identifier": "read_hdr",
"path": "easyvolcap/utils/relight_utils.py",
"snippet": "def read_hdr(path):\n # TODO: will this support openexr? could not find valid openexr python binding\n # TODO: implement saving in hdr format\n \"\"\"Reads an HDR map from disk.\n\n Args:\n path (str): Path to the .hdr file.\n\n Returns:\n numpy.ndarray: Loaded (float) HDR map with RGB channels in order.\n \"\"\"\n with open(path, 'rb') as h:\n buffer_ = np.fromstring(h.read(), np.uint8)\n bgr = cv2.imdecode(buffer_, cv2.IMREAD_UNCHANGED)\n # bgr = cv2.imread(path, cv2.IMREAD_UNCHANGED)\n rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\n return rgb.astype(np.float32)"
},
{
"identifier": "Microfacet",
"path": "easyvolcap/utils/relight_utils.py",
"snippet": "class Microfacet:\n \"\"\"As described in:\n Microfacet Models for Refraction through Rough Surfaces [EGSR '07]\n \"\"\"\n\n def __init__(self,\n default_rough=0.1, default_albedo=0.8, f0=0.04,\n lambert_only=False, glossy_only=False):\n self.default_albedo = default_albedo\n self.default_rough = default_rough\n self.f0 = f0\n\n self.lambert_only = lambert_only\n self.glossy_only = glossy_only\n\n def __call__(self,\n pts2l: torch.Tensor,\n pts2c: torch.Tensor,\n normal: torch.Tensor,\n albedo: torch.Tensor = None,\n rough: torch.Tensor = None,\n fresnel: torch.Tensor = None,\n ):\n \"\"\"All in the world coordinates.\n\n Too low roughness is OK in the forward pass, but may be numerically\n unstable in the backward pass\n\n pts2l: NxLx3\n pts2c: Nx3\n normal: Nx3\n albedo: Nx3\n rough: Nx1\n \"\"\"\n if not isinstance(albedo, torch.Tensor): # scalar input\n albedo = torch.full((*pts2c.shape[:-1], 3), albedo if albedo is not None else self.default_albedo, device=normal.device)\n if not isinstance(rough, torch.Tensor): # scalar input\n rough = torch.full((*pts2c.shape[:-1], 1), rough if rough is not None else self.default_rough, device=normal.device)\n\n sh = pts2l.shape\n if len(sh) == 5:\n B, eH, eW, P, C = sh\n pts2l = pts2l.reshape(B, eH * eW, P, 3).permute(0, 2, 1, 3).reshape(B * P, eH * eW, 3)\n pts2c = pts2c.reshape(B * P, 3)\n normal = normal.reshape(B * P, 3)\n albedo = albedo.reshape(B * P, 3)\n rough = rough.reshape(B * P, 1)\n elif len(sh) == 3:\n B, P, C = sh\n pts2l = pts2l.reshape(B * P, 1, 3) # assume only one light\n pts2c = pts2c.reshape(B * P, 3)\n normal = normal.reshape(B * P, 3)\n albedo = albedo.reshape(B * P, 3)\n rough = rough.reshape(B * P, 1)\n elif len(sh) == 2:\n P, C = sh\n pts2l = pts2l.reshape(P, 1, 3) # assume only one light\n pts2c = pts2c.reshape(P, 3)\n normal = normal.reshape(P, 3)\n albedo = albedo.reshape(P, 3)\n rough = rough.reshape(P, 1)\n else:\n pass # whatever, might just error out\n\n # Normalize directions and normals\n pts2l = F.normalize(pts2l, p=2, dim=-1, eps=1e-7)\n pts2c = F.normalize(pts2c, p=2, dim=-1, eps=1e-7)\n normal = F.normalize(normal, p=2, dim=-1, eps=1e-7)\n\n # Glossy\n h = pts2l + pts2c[:, None, :] # NxLx3\n h = F.normalize(h, p=2, dim=-1, eps=1e-7)\n f = self._get_f(pts2l, h) # NxL, fresnel term (Schlick's approx)\n alpha = rough ** 2\n d = self._get_d(h, normal, alpha=alpha) # NxL, normal distribution\n g = self._get_g(pts2c, h, normal, alpha=alpha) # NxL, shadow masking term\n # g = torch.ones_like(g)\n # d = torch.ones_like(d)\n # f = torch.ones_like(f)\n\n l_dot_n = torch.einsum('ijk,ik->ij', pts2l, normal)\n v_dot_n = torch.einsum('ij,ij->i', pts2c, normal)\n denom = 4 * torch.abs(l_dot_n) * torch.abs(v_dot_n)[:, None]\n microfacet = safe_divide(f * g * d, denom) # NxL\n brdf_glossy = microfacet[:, :, None].repeat(1, 1, 3)\n\n # Diffuse\n # http://www.joshbarczak.com/blog/?p=272\n # Mix two shaders\n brdf_lambert = albedo[:, None, :].repeat(1, pts2l.shape[1], 1) / torch.pi\n if self.lambert_only:\n brdf = brdf_lambert # Nx3\n elif self.glossy_only:\n brdf = brdf_glossy\n else:\n brdf = brdf_glossy + brdf_lambert # Nx3 # TODO: energy conservation?\n\n if len(sh) == 5:\n return brdf.reshape(B, P, eH * eW, 3).permute(0, 2, 1, 3).reshape(B, eH, eW, P, 3)\n elif len(sh) == 3:\n return brdf.reshape(B, P, 3)\n elif len(sh) == 2:\n return brdf.reshape(P, 3)\n else:\n return brdf\n\n @staticmethod\n def _get_g(v, m, n, alpha=0.1):\n \"\"\"Geometric function (GGX).\n \"\"\"\n cos_theta_v = torch.einsum('ij,ij->i', n, v)\n cos_theta = torch.einsum('ijk,ik->ij', m, v)\n denom = cos_theta_v[:, None]\n div = safe_divide(cos_theta, denom)\n chi = torch.where(div > 0, 1., 0.)\n cos_theta_v_sq = torch.square(cos_theta_v)\n cos_theta_v_sq = torch.clip(cos_theta_v_sq, 0., 1.)\n denom = cos_theta_v_sq\n tan_theta_v_sq = safe_divide(1 - cos_theta_v_sq, denom)\n tan_theta_v_sq = torch.clip(tan_theta_v_sq, 0., 1e10)\n denom = 1 + torch.sqrt(1 + alpha ** 2 * tan_theta_v_sq[:, None])\n g = safe_divide(chi * 2, denom)\n return g # (n_pts, n_lights)\n\n @staticmethod\n def _get_d(m, n, alpha=0.1):\n \"\"\"Microfacet distribution (GGX).\n \"\"\"\n cos_theta_m = torch.einsum('ijk,ik->ij', m, n)\n chi = torch.where(cos_theta_m > 0, 1., 0.)\n cos_theta_m_sq = torch.square(cos_theta_m)\n denom = cos_theta_m_sq\n tan_theta_m_sq = safe_divide(1 - cos_theta_m_sq, denom)\n denom = torch.pi * torch.square(cos_theta_m_sq) * torch.square(\n alpha ** 2 + tan_theta_m_sq)\n d = safe_divide(alpha ** 2 * chi, denom)\n return d # (n_pts, n_lights)\n\n def _get_f(self, l, m):\n \"\"\"Fresnel (Schlick's approximation).\n \"\"\"\n cos_theta = torch.einsum('ijk,ijk->ij', l, m)\n f = self.f0 + (1 - self.f0) * (1 - cos_theta) ** 5\n return f # (n_pts, n_lights)"
},
{
"identifier": "linear2srgb",
"path": "easyvolcap/utils/relight_utils.py",
"snippet": "def linear2srgb(linear: torch.Tensor):\n srgb_linear_thres = 0.0031308\n srgb_linear_coeff = 12.92\n srgb_exponential_coeff = 1.055\n srgb_exponent = 2.4\n\n linear = linear.clip(0., 1.)\n tensor_linear = linear * srgb_linear_coeff\n tensor_nonlinear = srgb_exponential_coeff * (torch.pow(linear + 1e-7, 1 / srgb_exponent)) - (srgb_exponential_coeff - 1)\n\n is_linear = linear <= srgb_linear_thres\n tensor_srgb = torch.where(is_linear, tensor_linear, tensor_nonlinear)\n\n return tensor_srgb"
},
{
"identifier": "gen_light_xyz",
"path": "easyvolcap/utils/relight_utils.py",
"snippet": "def gen_light_xyz(envmap_h, envmap_w, envmap_r=1e2, device='cuda'):\n \"\"\"Additionally returns the associated solid angles, for integration.\n \"\"\"\n # OpenEXR \"latlong\" format\n # lat = pi/2\n # lng = pi\n # +--------------------+\n # | |\n # | |\n # +--------------------+\n # lat = -pi/2\n # lng = -pi\n # theta = 0\n # phi = pi\n # +--------------------+\n # | |\n # | |\n # +--------------------+\n # theta = pi\n # phi = -pi\n # lat_step_size = torch.pi / (envmap_h + 2) # 0.5 in pixel coordinates\n # lng_step_size = 2 * torch.pi / (envmap_w + 2) # 0.5 in pixel coordinates\n lat_half = torch.pi / envmap_h / 2 # 0.5 in pixel coordinates\n lng_half = 2 * torch.pi / envmap_w / 2 # 0.5 in pixel coordinates\n # Try to exclude the problematic polar points (linspace includes endpoints)\n lats = torch.linspace(torch.pi / 2 - lat_half, -torch.pi / 2 + lat_half, envmap_h, device=device)\n lngs = torch.linspace(torch.pi - lng_half, -torch.pi + lng_half, envmap_w, device=device)\n lngs, lats = torch.meshgrid(lngs, lats, indexing='xy')\n\n # To Cartesian\n rlatlngs = torch.dstack((envmap_r * torch.ones_like(lats), lats, lngs))\n rlatlngs = rlatlngs.reshape(-1, 3)\n xyz = sph2cart(rlatlngs)\n xyz = xyz.reshape(envmap_h, envmap_w, 3)\n\n # Calculate the area of each pixel on the unit sphere (useful for\n # integration over the sphere)\n sin_colat = torch.sin(torch.pi / 2 - lats)\n areas = 4 * torch.pi * sin_colat / torch.sum(sin_colat)\n\n assert 0 not in areas, \"There shouldn't be light pixel that doesn't contribute\"\n\n return xyz, areas"
},
{
"identifier": "gen_uniform_light_xyz",
"path": "easyvolcap/utils/relight_utils.py",
"snippet": "def gen_uniform_light_xyz(envmap_h, envmap_w, envmap_r=1e2, device=\"cuda\"):\n # See: https://www.bogotobogo.com/Algorithms/uniform_distribution_sphere.php\n theta_half = 1 / envmap_h / 2\n phi_half = 1 / envmap_w / 2\n theta = torch.acos(2.0 * torch.linspace(0 + theta_half, 1 - theta_half, envmap_h, device=device) - 1.0)\n phi = 2.0 * torch.pi * torch.linspace(0 + phi_half, 1 - phi_half, envmap_w, device=device)\n theta, phi = torch.meshgrid(theta, phi, indexing='ij')\n\n ray_d = spher2cart(theta, phi) # T, 3, z always bigger than zero\n xyz = ray_d * envmap_r\n area = 4 * torch.pi / (envmap_h * envmap_w)\n area = torch.full(xyz.shape[:2], area, device=xyz.device) # should be uniform\n\n return xyz, area"
},
{
"identifier": "normalize",
"path": "easyvolcap/utils/net_utils.py",
"snippet": "@torch.jit.script\ndef normalize(x: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:\n # channel last: normalization\n return x / (torch.norm(x, dim=-1, keepdim=True) + eps)"
},
{
"identifier": "multi_gather",
"path": "easyvolcap/utils/net_utils.py",
"snippet": "def multi_gather(values: torch.Tensor, indices: torch.Tensor, dim=-2):\n # Gather the value at the -2th dim of values, augment index shape on the back\n # Example: values: B, P, 3, index: B, N, -> B, N, 3\n\n # index will first be augmented to match the values' dimentionality at the back\n # take care of batch dimension of, and acts like a linear indexing in the target dimention\n # we assume that the values's second to last dimension is the dimension to be indexed on\n return values.gather(dim, multi_indexing(indices, values.shape, dim))"
},
{
"identifier": "multi_scatter",
"path": "easyvolcap/utils/net_utils.py",
"snippet": "def multi_scatter(target: torch.Tensor, indices: torch.Tensor, values: torch.Tensor, dim=-2):\n # backward of multi_gather\n return target.scatter(dim, multi_indexing(indices, values.shape, dim), values)"
},
{
"identifier": "spher2cart",
"path": "easyvolcap/utils/sh_utils.py",
"snippet": "def spher2cart(theta, phi):\n \"\"\"Convert spherical coordinates into Cartesian coordinates (radius 1).\"\"\"\n r = torch.sin(theta)\n x = r * torch.cos(phi)\n y = r * torch.sin(phi)\n z = torch.cos(theta)\n return torch.stack([x, y, z], dim=-1)"
},
{
"identifier": "spherical_uniform_sampling_upper",
"path": "easyvolcap/utils/sh_utils.py",
"snippet": "def spherical_uniform_sampling_upper(sample_count, device=\"cuda\"):\n # See: https://www.bogotobogo.com/Algorithms/uniform_distribution_sphere.php\n theta = torch.acos(1.0 * torch.rand([sample_count], device=device))\n phi = 2.0 * math.pi * torch.rand([sample_count], device=device)\n return theta, phi"
},
{
"identifier": "spherical_uniform_sampling",
"path": "easyvolcap/utils/sh_utils.py",
"snippet": "def spherical_uniform_sampling(sample_count, device=\"cuda\"):\n # See: https://www.bogotobogo.com/Algorithms/uniform_distribution_sphere.php\n theta = torch.acos(2.0 * torch.rand([sample_count], device=device) - 1.0)\n phi = 2.0 * math.pi * torch.rand([sample_count], device=device)\n return theta, phi"
},
{
"identifier": "save_image",
"path": "easyvolcap/utils/data_utils.py",
"snippet": "def save_image(img_path: str, img: np.ndarray, jpeg_quality=75, png_compression=9, save_dtype=np.uint8):\n if isinstance(img, torch.Tensor): img = img.detach().cpu().numpy()\n if img.ndim == 4: img = np.concatenate(img, axis=0)\n if img.shape[0] < img.shape[-1] and (img.shape[0] == 3 or img.shape[0] == 4): img = np.transpose(img, (1, 2, 0))\n if np.issubdtype(img.dtype, np.integer):\n img = img / np.iinfo(img.dtype).max # to float\n if img.shape[-1] >= 3:\n if not img.flags['WRITEABLE']:\n img = img.copy() # avoid assignment only inputs\n img[..., :3] = img[..., [2, 1, 0]]\n if os.path.dirname(img_path):\n os.makedirs(os.path.dirname(img_path), exist_ok=True)\n if img_path.endswith('.png'):\n max = np.iinfo(save_dtype).max\n img = (img * max).clip(0, max).astype(save_dtype)\n elif img_path.endswith('.jpg'):\n img = img[..., :3] # only color\n img = (img * 255).clip(0, 255).astype(np.uint8)\n elif img_path.endswith('.hdr'):\n img = img[..., :3] # only color\n elif img_path.endswith('.exr'):\n # ... https://github.com/opencv/opencv/issues/21326\n os.environ[\"OPENCV_IO_ENABLE_OPENEXR\"] = \"1\"\n else:\n # should we try to discard alpha channel here?\n # exr could store alpha channel\n pass # no transformation for other unspecified file formats\n return cv2.imwrite(img_path, img, [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality, cv2.IMWRITE_PNG_COMPRESSION, png_compression])"
}
] | import math
import torch
import argparse
import sys
from tqdm import tqdm
from glob import glob
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.console_utils import log, colored
from easyvolcap.utils.relight_utils import sample_envmap_image, read_hdr, Microfacet, linear2srgb, gen_light_xyz, gen_uniform_light_xyz
from easyvolcap.utils.net_utils import normalize, multi_gather, multi_scatter
from easyvolcap.utils.sh_utils import spher2cart, spherical_uniform_sampling_upper, spherical_uniform_sampling
from easyvolcap.utils.data_utils import save_image | 9,276 | zy = torch.stack(torch.meshgrid(torch.arange(H, device=args.device), torch.arange(W, device=args.device), indexing='ij'), dim=-1)
zy = zy - torch.tensor([H / 2, W / 2], device=zy.device)
zy = zy / torch.tensor(min(H, W) / 2, device=zy.device)
zy = zy.flip(dims=[0])
zy = zy.view(-1, 2) # H * W, 2
x = (1 - zy.pow(2).sum(-1)).sqrt()
x = x.nan_to_num(0)
zyx = torch.cat([zy, x[..., None]], dim=-1)
surf = zyx.flip(dims=[-1]) # H * W, 3
# Construct normal and material for the shading ball
C = torch.tensor([2, 0, 0], device=x.device) # simple perspecitve projection
norm = normalize(surf) # whatever for invalid regions
view = normalize(surf - C) # camera view direction
# Prepare mask for valid pixels
msk = (norm * view).sum(-1) < 0 # view direction and normal should be opposite
ind = msk.nonzero()
P = ind.shape[0]
surf = multi_gather(surf, ind) # get good pixels to shade on
view = multi_gather(view, ind) # get good pixels to shade on
norm = multi_gather(norm, ind) # get good pixels to shade on
def image_based_lighting(surf: torch.Tensor,
norm: torch.Tensor,
view: torch.Tensor,
probe: dotdict, # lighting
albedo: float, # scalar albedo
roughness: float, # scalar roughness
microfacet: Microfacet, # material
N: int = 1024, # number of samples
H: int = 16,
W: int = 32,
uniform: bool = True, # uniform or stratified sampling
perturb: bool = True,
):
# Generate sample_count uniformly and stratified samples over the sphere
P = surf.shape[0]
if uniform: # uniform random sampling
T = P * N
theta, phi = spherical_uniform_sampling_upper(T, device=surf.device) # T, T,
ray_d = spher2cart(theta, phi) # T, 3, z always bigger than zero
else: # stratified sampling
N = H * W
T = P * N
xyz, area = gen_light_xyz(H, W, device=surf.device)
if perturb:
R = torch.rand(3, 3, device=xyz.device)
Q, R = torch.linalg.qr(R) # 3, 3
xyz = xyz @ Q # apply random rotation
xyz, area = xyz.view(-1, 3), area.view(-1, 1)
ray_d = normalize(xyz) # T, 3
# Adding more samples seems to help, but not very good for low roughness surface (i.e. this implementation has a hard upper limit for specular surfaces)
# And the visibility's influence is not clear enough, you've only done ablation on one of the char in one of the novel lighting
# The physical correctness of distance field soft shadow is quesitonable
# __import__('ipdb').set_trace()
# torch.testing.assert_allclose(probe.view(-1, 3), sample_envmap_image(probe, ray_d))
xyz = xyz[:, None].expand(N, P, 3).reshape(-1, 3) # T, 3
area = area[:, None].expand(N, P, 1).reshape(-1, 1) # T, 1
ray_d = ray_d[:, None].expand(N, P, 3).reshape(-1, 3) # T, 3
# Preparing shapes
norm = norm[None].expand(N, P, 3).reshape(T, 3) # T, 3
view = view[None].expand(N, P, 3).reshape(T, 3) # T, 3
# Transform ray_d to be pointing upward from normal direction
if uniform:
R = torch.zeros([T, 3, 3], device=norm.device)
R[..., 0, 0] = 1.0
R[..., :3, 2] = norm # c2w, z axis is normal direction
R[..., :3, 1] = normalize(torch.cross(R[..., :3, 2], R[..., :3, 0]))
R[..., :3, 0] = normalize(torch.cross(R[..., :3, 1], R[..., :3, 2]))
ray_d = (R @ ray_d[..., None])[..., 0]
# Compute shading
ldot = (ray_d * norm).sum(dim=-1, keepdim=True) # T
light = sample_envmap_image(probe, ray_d)
brdf = microfacet(ray_d, -view, norm, albedo, roughness)
shading = light * ldot * brdf
# Apply area to normalize integration
if uniform:
shading = shading * 2.0 * torch.pi / N
else:
shading = shading * area
shading = shading.view(N, P, -1).sum(dim=-3)
return shading
microfacet = Microfacet(f0=fresnel)
if not args.stratified:
C = 2048
rgb: torch.Tensor = 0
for i in tqdm(range(0, N, C)):
CC = min(N, i + C) - i # the remaining chunk size (C or smaller)
shading = image_based_lighting(surf, norm, view,
probe, albedo, roughness, microfacet, CC)
rgb = (rgb + shading) # undo normalization and sum
rgb = rgb * CC / N # renormalization
else:
N = math.ceil(N / (args.env_h * args.env_w))
rgb: torch.Tensor = 0
for i in tqdm(range(0, N, 1)):
shading = image_based_lighting(surf, norm, view,
probe, albedo, roughness, microfacet,
H=args.env_h, W=args.env_w, uniform=False, perturb=N > 1)
rgb = (rgb + shading) # undo normalization and sum
rgb = rgb * 1 / N # renormalization
# Save rendered images
img = torch.zeros(H * W, 3, device=rgb.device)
img = multi_scatter(img, ind, rgb).view(H, W, 3)
| # this file is used for testing materials, thus should support large images files (8k maybe?)
# uniformly sample rays, no need for ray tracing since the ball should not be concerned with multi bounce shading
# currect techniques for static objects (articulated objects) including:
# 1. PRT (pre-computed radiance transfer), converts to a bunch of matrix dot product using spherical harmonics
# 2. Split-Sum for prefiltering the environment map and lighting, along with decomposed material node
# 3. Brute-Force shading with random sampling (this is what we're implementing) (used for ray tracing)
# fmt: off
sys.path.append('.')
# fmt: on
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--height', type=int, default=256)
parser.add_argument('--width', type=int, default=256)
parser.add_argument('--sample', type=int, default=10000, help='number of ray samples for the shaing results')
parser.add_argument('--fresnel', type=float, default=0.04)
parser.add_argument('--albedo', type=float, default=0.0) # maybe a file for albedo map?
parser.add_argument('--roughness', type=float, default=0.3) # maybe a file for roughness map?
parser.add_argument('--probe', type=str, default='data/lighting/8k/gym_entrance.hdr')
parser.add_argument('--output', type=str, default='data/shading_ball.png')
parser.add_argument('--stratified', action='store_true')
parser.add_argument('--env_h', type=int, default=16)
parser.add_argument('--env_w', type=int, default=32)
args = parser.parse_args()
# Prepare shapes
albedo, roughness, fresnel = args.albedo, args.roughness, args.fresnel
H, W, N = args.height, args.width, args.sample
log(f'Will produce a {colored(f"{H}, {W}", "magenta")} shading ball with {colored(f"{N}", "magenta")} samples for each pixel, albedo: {colored(str(albedo), "magenta")}, roughness: {colored(str(roughness), "magenta")}, fresnel: {colored(str(fresnel), "magenta")}')
# Loading environment map for shading computation
log(f'Loading environment map from {colored(args.probe, "blue")} onto {colored(args.device, "magenta")}')
probe = torch.from_numpy(read_hdr(args.probe)).to(args.device, non_blocking=True)
# Construct the coordinate of the shading ball
zy = torch.stack(torch.meshgrid(torch.arange(H, device=args.device), torch.arange(W, device=args.device), indexing='ij'), dim=-1)
zy = zy - torch.tensor([H / 2, W / 2], device=zy.device)
zy = zy / torch.tensor(min(H, W) / 2, device=zy.device)
zy = zy.flip(dims=[0])
zy = zy.view(-1, 2) # H * W, 2
x = (1 - zy.pow(2).sum(-1)).sqrt()
x = x.nan_to_num(0)
zyx = torch.cat([zy, x[..., None]], dim=-1)
surf = zyx.flip(dims=[-1]) # H * W, 3
# Construct normal and material for the shading ball
C = torch.tensor([2, 0, 0], device=x.device) # simple perspecitve projection
norm = normalize(surf) # whatever for invalid regions
view = normalize(surf - C) # camera view direction
# Prepare mask for valid pixels
msk = (norm * view).sum(-1) < 0 # view direction and normal should be opposite
ind = msk.nonzero()
P = ind.shape[0]
surf = multi_gather(surf, ind) # get good pixels to shade on
view = multi_gather(view, ind) # get good pixels to shade on
norm = multi_gather(norm, ind) # get good pixels to shade on
def image_based_lighting(surf: torch.Tensor,
norm: torch.Tensor,
view: torch.Tensor,
probe: dotdict, # lighting
albedo: float, # scalar albedo
roughness: float, # scalar roughness
microfacet: Microfacet, # material
N: int = 1024, # number of samples
H: int = 16,
W: int = 32,
uniform: bool = True, # uniform or stratified sampling
perturb: bool = True,
):
# Generate sample_count uniformly and stratified samples over the sphere
P = surf.shape[0]
if uniform: # uniform random sampling
T = P * N
theta, phi = spherical_uniform_sampling_upper(T, device=surf.device) # T, T,
ray_d = spher2cart(theta, phi) # T, 3, z always bigger than zero
else: # stratified sampling
N = H * W
T = P * N
xyz, area = gen_light_xyz(H, W, device=surf.device)
if perturb:
R = torch.rand(3, 3, device=xyz.device)
Q, R = torch.linalg.qr(R) # 3, 3
xyz = xyz @ Q # apply random rotation
xyz, area = xyz.view(-1, 3), area.view(-1, 1)
ray_d = normalize(xyz) # T, 3
# Adding more samples seems to help, but not very good for low roughness surface (i.e. this implementation has a hard upper limit for specular surfaces)
# And the visibility's influence is not clear enough, you've only done ablation on one of the char in one of the novel lighting
# The physical correctness of distance field soft shadow is quesitonable
# __import__('ipdb').set_trace()
# torch.testing.assert_allclose(probe.view(-1, 3), sample_envmap_image(probe, ray_d))
xyz = xyz[:, None].expand(N, P, 3).reshape(-1, 3) # T, 3
area = area[:, None].expand(N, P, 1).reshape(-1, 1) # T, 1
ray_d = ray_d[:, None].expand(N, P, 3).reshape(-1, 3) # T, 3
# Preparing shapes
norm = norm[None].expand(N, P, 3).reshape(T, 3) # T, 3
view = view[None].expand(N, P, 3).reshape(T, 3) # T, 3
# Transform ray_d to be pointing upward from normal direction
if uniform:
R = torch.zeros([T, 3, 3], device=norm.device)
R[..., 0, 0] = 1.0
R[..., :3, 2] = norm # c2w, z axis is normal direction
R[..., :3, 1] = normalize(torch.cross(R[..., :3, 2], R[..., :3, 0]))
R[..., :3, 0] = normalize(torch.cross(R[..., :3, 1], R[..., :3, 2]))
ray_d = (R @ ray_d[..., None])[..., 0]
# Compute shading
ldot = (ray_d * norm).sum(dim=-1, keepdim=True) # T
light = sample_envmap_image(probe, ray_d)
brdf = microfacet(ray_d, -view, norm, albedo, roughness)
shading = light * ldot * brdf
# Apply area to normalize integration
if uniform:
shading = shading * 2.0 * torch.pi / N
else:
shading = shading * area
shading = shading.view(N, P, -1).sum(dim=-3)
return shading
microfacet = Microfacet(f0=fresnel)
if not args.stratified:
C = 2048
rgb: torch.Tensor = 0
for i in tqdm(range(0, N, C)):
CC = min(N, i + C) - i # the remaining chunk size (C or smaller)
shading = image_based_lighting(surf, norm, view,
probe, albedo, roughness, microfacet, CC)
rgb = (rgb + shading) # undo normalization and sum
rgb = rgb * CC / N # renormalization
else:
N = math.ceil(N / (args.env_h * args.env_w))
rgb: torch.Tensor = 0
for i in tqdm(range(0, N, 1)):
shading = image_based_lighting(surf, norm, view,
probe, albedo, roughness, microfacet,
H=args.env_h, W=args.env_w, uniform=False, perturb=N > 1)
rgb = (rgb + shading) # undo normalization and sum
rgb = rgb * 1 / N # renormalization
# Save rendered images
img = torch.zeros(H * W, 3, device=rgb.device)
img = multi_scatter(img, ind, rgb).view(H, W, 3) | img = linear2srgb(img) | 5 | 2023-10-17 04:48:46+00:00 | 12k |
codefuse-ai/Test-Agent | chat/server/gradio_web_server_multi.py | [
{
"identifier": "SESSION_EXPIRATION_TIME",
"path": "chat/constants.py",
"snippet": "SESSION_EXPIRATION_TIME = 3600"
},
{
"identifier": "build_side_by_side_ui_anony",
"path": "chat/server/gradio_block_arena_anony.py",
"snippet": "def build_side_by_side_ui_anony(models):\n notice_markdown = \"\"\"\n# ⚔️ Chatbot Arena ⚔️ : Benchmarking LLMs in the Wild\n### Rules\n- Chat with two anonymous models side-by-side and vote for which one is better!\n- You can do multiple turns of conversations before voting.\n- The names of the models will be revealed after your vote. Conversations with identity keywords (e.g., ChatGPT, Bard, Vicuna) or any votes after the names are revealed will not count towards the leaderboard.\n- Click \"Clear history\" to start a new round.\n- | [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) |\n\n### Leaderboard\nSee [lmsys/chatbot-arena-leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) or the 4th tab above on this page.\n\n### Terms of use\nBy using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. **The service collects user dialogue data and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) license.** The demo works better on desktop devices with a wide screen.\n\n### Battle\nPlease scroll down and start chatting. The models include both closed-source models (e.g., ChatGPT) and open-source models (e.g., Llama, Vicuna).\n\"\"\"\n\n states = [gr.State() for _ in range(num_sides)]\n model_selectors = [None] * num_sides\n chatbots = [None] * num_sides\n\n gr.Markdown(notice_markdown, elem_id=\"notice_markdown\")\n\n with gr.Box(elem_id=\"share-region-anony\"):\n with gr.Row():\n for i in range(num_sides):\n with gr.Column():\n model_selectors[i] = gr.Markdown(anony_names[i])\n\n with gr.Row():\n for i in range(num_sides):\n label = \"Model A\" if i == 0 else \"Model B\"\n with gr.Column():\n chatbots[i] = gr.Chatbot(\n label=label, elem_id=f\"chatbot\", visible=False, height=550\n )\n\n with gr.Box() as button_row:\n with gr.Row():\n leftvote_btn = gr.Button(value=\"👈 A is better\", interactive=False)\n rightvote_btn = gr.Button(value=\"👉 B is better\", interactive=False)\n tie_btn = gr.Button(value=\"🤝 Tie\", interactive=False)\n bothbad_btn = gr.Button(value=\"👎 Both are bad\", interactive=False)\n\n with gr.Row():\n with gr.Column(scale=20):\n textbox = gr.Textbox(\n show_label=False,\n placeholder=\"Enter text and press ENTER\",\n visible=False,\n container=False,\n )\n with gr.Column(scale=1, min_width=50):\n send_btn = gr.Button(value=\"Send\", visible=False)\n\n with gr.Row() as button_row2:\n regenerate_btn = gr.Button(value=\"🔄 Regenerate\", interactive=False)\n clear_btn = gr.Button(value=\"🗑️ Clear history\", interactive=False)\n share_btn = gr.Button(value=\"📷 Share\")\n\n with gr.Accordion(\"Parameters\", open=False, visible=True) as parameter_row:\n temperature = gr.Slider(\n minimum=0.0,\n maximum=1.0,\n value=0.7,\n step=0.1,\n interactive=True,\n label=\"Temperature\",\n )\n top_p = gr.Slider(\n minimum=0.0,\n maximum=1.0,\n value=1.0,\n step=0.1,\n interactive=True,\n label=\"Top P\",\n )\n max_output_tokens = gr.Slider(\n minimum=16,\n maximum=1024,\n value=512,\n step=64,\n interactive=True,\n label=\"Max output tokens\",\n )\n\n gr.Markdown(acknowledgment_md)\n\n # Register listeners\n btn_list = [\n leftvote_btn,\n rightvote_btn,\n tie_btn,\n bothbad_btn,\n regenerate_btn,\n clear_btn,\n ]\n leftvote_btn.click(\n leftvote_last_response,\n states + model_selectors,\n model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n rightvote_btn.click(\n rightvote_last_response,\n states + model_selectors,\n model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n tie_btn.click(\n tievote_last_response,\n states + model_selectors,\n model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n bothbad_btn.click(\n bothbad_vote_last_response,\n states + model_selectors,\n model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n regenerate_btn.click(\n regenerate, states, states + chatbots + [textbox] + btn_list\n ).then(\n bot_response_multi,\n states + [temperature, top_p, max_output_tokens],\n states + chatbots + btn_list,\n ).then(\n flash_buttons, [], btn_list\n )\n clear_btn.click(\n clear_history, None, states + chatbots + model_selectors + [textbox] + btn_list\n )\n\n share_js = \"\"\"\nfunction (a, b, c, d) {\n const captureElement = document.querySelector('#share-region-anony');\n html2canvas(captureElement)\n .then(canvas => {\n canvas.style.display = 'none'\n document.body.appendChild(canvas)\n return canvas\n })\n .then(canvas => {\n const image = canvas.toDataURL('image/png')\n const a = document.createElement('a')\n a.setAttribute('download', 'chatbot-arena.png')\n a.setAttribute('href', image)\n a.click()\n canvas.remove()\n });\n return [a, b, c, d];\n}\n\"\"\"\n share_btn.click(share_click, states + model_selectors, [], _js=share_js)\n\n textbox.submit(\n add_text,\n states + model_selectors + [textbox],\n states + chatbots + [textbox] + btn_list,\n ).then(\n bot_response_multi,\n states + [temperature, top_p, max_output_tokens],\n states + chatbots + btn_list,\n ).then(\n flash_buttons, [], btn_list\n )\n\n send_btn.click(\n add_text,\n states + model_selectors + [textbox],\n states + chatbots + [textbox] + btn_list,\n ).then(\n bot_response_multi,\n states + [temperature, top_p, max_output_tokens],\n states + chatbots + btn_list,\n ).then(\n flash_buttons, [], btn_list\n )\n\n return (\n states,\n model_selectors,\n chatbots,\n textbox,\n send_btn,\n button_row,\n button_row2,\n parameter_row,\n )"
},
{
"identifier": "load_demo_side_by_side_anony",
"path": "chat/server/gradio_block_arena_anony.py",
"snippet": "def load_demo_side_by_side_anony(models_, url_params):\n global models\n models = models_\n\n states = (None,) * num_sides\n selector_updates = (\n gr.Markdown.update(visible=True),\n gr.Markdown.update(visible=True),\n )\n\n return (\n states\n + selector_updates\n + (gr.Chatbot.update(visible=True),) * num_sides\n + (\n gr.Textbox.update(visible=True),\n gr.Box.update(visible=True),\n gr.Row.update(visible=True),\n gr.Row.update(visible=True),\n gr.Accordion.update(visible=True),\n )\n )"
},
{
"identifier": "set_global_vars_anony",
"path": "chat/server/gradio_block_arena_anony.py",
"snippet": "def set_global_vars_anony(enable_moderation_):\n global enable_moderation\n enable_moderation = enable_moderation_"
},
{
"identifier": "build_side_by_side_ui_named",
"path": "chat/server/gradio_block_arena_named.py",
"snippet": "def build_side_by_side_ui_named(models):\n notice_markdown = \"\"\"\n# ⚔️ Chatbot Arena ⚔️ : Benchmarking LLMs in the Wild\n### Rules\n- Chat with two models side-by-side and vote for which one is better!\n- You pick the models you want to chat with.\n- You can do multiple turns of conversations before voting.\n- Click \"Clear history\" to start a new round.\n- | [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) |\n\n### Terms of use\nBy using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. **The service collects user dialogue data and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) license.** The demo works better on desktop devices with a wide screen.\n\n### Choose two models to chat with (view [leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard))\n\"\"\"\n\n states = [gr.State() for _ in range(num_sides)]\n model_selectors = [None] * num_sides\n chatbots = [None] * num_sides\n\n model_description_md = get_model_description_md(models)\n notice = gr.Markdown(\n notice_markdown + model_description_md, elem_id=\"notice_markdown\"\n )\n\n with gr.Box(elem_id=\"share-region-named\"):\n with gr.Row():\n for i in range(num_sides):\n with gr.Column():\n model_selectors[i] = gr.Dropdown(\n choices=models,\n value=models[i] if len(models) > i else \"\",\n interactive=True,\n show_label=False,\n container=False,\n )\n\n with gr.Row():\n for i in range(num_sides):\n label = \"Model A\" if i == 0 else \"Model B\"\n with gr.Column():\n chatbots[i] = gr.Chatbot(\n label=label, elem_id=f\"chatbot\", visible=False, height=550\n )\n\n with gr.Box() as button_row:\n with gr.Row():\n leftvote_btn = gr.Button(value=\"👈 A is better\", interactive=False)\n rightvote_btn = gr.Button(value=\"👉 B is better\", interactive=False)\n tie_btn = gr.Button(value=\"🤝 Tie\", interactive=False)\n bothbad_btn = gr.Button(value=\"👎 Both are bad\", interactive=False)\n\n with gr.Row():\n with gr.Column(scale=20):\n textbox = gr.Textbox(\n show_label=False,\n placeholder=\"Enter text and press ENTER\",\n visible=False,\n container=False,\n )\n with gr.Column(scale=1, min_width=50):\n send_btn = gr.Button(value=\"Send\", visible=False)\n\n with gr.Row() as button_row2:\n regenerate_btn = gr.Button(value=\"🔄 Regenerate\", interactive=False)\n clear_btn = gr.Button(value=\"🗑️ Clear history\", interactive=False)\n share_btn = gr.Button(value=\"📷 Share\")\n\n with gr.Accordion(\"Parameters\", open=False, visible=True) as parameter_row:\n temperature = gr.Slider(\n minimum=0.0,\n maximum=1.0,\n value=0.7,\n step=0.1,\n interactive=True,\n label=\"Temperature\",\n )\n top_p = gr.Slider(\n minimum=0.0,\n maximum=1.0,\n value=1.0,\n step=0.1,\n interactive=True,\n label=\"Top P\",\n )\n max_output_tokens = gr.Slider(\n minimum=16,\n maximum=1024,\n value=512,\n step=64,\n interactive=True,\n label=\"Max output tokens\",\n )\n\n gr.Markdown(acknowledgment_md)\n\n # Register listeners\n btn_list = [\n leftvote_btn,\n rightvote_btn,\n tie_btn,\n bothbad_btn,\n regenerate_btn,\n clear_btn,\n ]\n leftvote_btn.click(\n leftvote_last_response,\n states + model_selectors,\n [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n rightvote_btn.click(\n rightvote_last_response,\n states + model_selectors,\n [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n tie_btn.click(\n tievote_last_response,\n states + model_selectors,\n [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n bothbad_btn.click(\n bothbad_vote_last_response,\n states + model_selectors,\n [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n regenerate_btn.click(\n regenerate, states, states + chatbots + [textbox] + btn_list\n ).then(\n bot_response_multi,\n states + [temperature, top_p, max_output_tokens],\n states + chatbots + btn_list,\n ).then(\n flash_buttons, [], btn_list\n )\n clear_btn.click(clear_history, None, states + chatbots + [textbox] + btn_list)\n\n share_js = \"\"\"\nfunction (a, b, c, d) {\n const captureElement = document.querySelector('#share-region-named');\n html2canvas(captureElement)\n .then(canvas => {\n canvas.style.display = 'none'\n document.body.appendChild(canvas)\n return canvas\n })\n .then(canvas => {\n const image = canvas.toDataURL('image/png')\n const a = document.createElement('a')\n a.setAttribute('download', 'chatbot-arena.png')\n a.setAttribute('href', image)\n a.click()\n canvas.remove()\n });\n return [a, b, c, d];\n}\n\"\"\"\n share_btn.click(share_click, states + model_selectors, [], _js=share_js)\n\n for i in range(num_sides):\n model_selectors[i].change(\n clear_history, None, states + chatbots + [textbox] + btn_list\n )\n\n textbox.submit(\n add_text,\n states + model_selectors + [textbox],\n states + chatbots + [textbox] + btn_list,\n ).then(\n bot_response_multi,\n states + [temperature, top_p, max_output_tokens],\n states + chatbots + btn_list,\n ).then(\n flash_buttons, [], btn_list\n )\n send_btn.click(\n add_text,\n states + model_selectors + [textbox],\n states + chatbots + [textbox] + btn_list,\n ).then(\n bot_response_multi,\n states + [temperature, top_p, max_output_tokens],\n states + chatbots + btn_list,\n ).then(\n flash_buttons, [], btn_list\n )\n\n return (\n states,\n model_selectors,\n chatbots,\n textbox,\n send_btn,\n button_row,\n button_row2,\n parameter_row,\n )"
},
{
"identifier": "load_demo_side_by_side_named",
"path": "chat/server/gradio_block_arena_named.py",
"snippet": "def load_demo_side_by_side_named(models, url_params):\n states = (None,) * num_sides\n\n model_left = models[0] if len(models) > 0 else \"\"\n if len(models) > 1:\n weights = ([8] * 4 + [4] * 8 + [1] * 32)[: len(models) - 1]\n weights = weights / np.sum(weights)\n model_right = np.random.choice(models[1:], p=weights)\n else:\n model_right = model_left\n\n selector_updates = (\n gr.Dropdown.update(choices=models, value=model_left, visible=True),\n gr.Dropdown.update(choices=models, value=model_right, visible=True),\n )\n\n return (\n states\n + selector_updates\n + (gr.Chatbot.update(visible=True),) * num_sides\n + (\n gr.Textbox.update(visible=True),\n gr.Box.update(visible=True),\n gr.Row.update(visible=True),\n gr.Row.update(visible=True),\n gr.Accordion.update(visible=True),\n )\n )"
},
{
"identifier": "set_global_vars_named",
"path": "chat/server/gradio_block_arena_named.py",
"snippet": "def set_global_vars_named(enable_moderation_):\n global enable_moderation\n enable_moderation = enable_moderation_"
},
{
"identifier": "set_global_vars",
"path": "chat/server/gradio_web_server.py",
"snippet": "class State:\n def __init__(self, model_name):\n def to_gradio_chatbot(self):\n def dict(self):\ndef set_global_vars(controller_url_, enable_moderation_):\ndef get_conv_log_filename():\ndef get_model_list(\n controller_url, register_openai_compatible_models, add_chatgpt, add_claude, add_palm\n):\ndef load_demo_single(models, url_params):\ndef load_demo(url_params, request: gr.Request):\ndef vote_last_response(state, vote_type, model_selector, request: gr.Request):\ndef upvote_last_response(state, model_selector, request: gr.Request):\ndef downvote_last_response(state, model_selector, request: gr.Request):\ndef flag_last_response(state, model_selector, request: gr.Request):\ndef regenerate(state, request: gr.Request):\ndef clear_history(request: gr.Request):\ndef add_text(state, model_selector, text, request: gr.Request):\ndef post_process_code(code):\ndef model_worker_stream_iter(\n conv,\n model_name,\n worker_addr,\n prompt,\n temperature,\n repetition_penalty,\n top_p,\n max_new_tokens,\n):\ndef bot_response(state, temperature, top_p, max_new_tokens, request: gr.Request):\ndef get_model_description_md(models):\ndef build_single_model_ui(models, add_promotion_links=False):\ndef build_demo(models):"
},
{
"identifier": "build_leaderboard_tab",
"path": "chat/server/monitor/monitor.py",
"snippet": "def build_leaderboard_tab(elo_results_file, leaderboard_table_file):\n if elo_results_file is None: # Do live update\n md = \"Loading ...\"\n p1 = p2 = p3 = p4 = None\n else:\n with open(elo_results_file, \"rb\") as fin:\n elo_results = pickle.load(fin)\n\n md = make_leaderboard_md(elo_results)\n p1 = elo_results[\"win_fraction_heatmap\"]\n p2 = elo_results[\"battle_count_heatmap\"]\n p3 = elo_results[\"bootstrap_elo_rating\"]\n p4 = elo_results[\"average_win_rate_bar\"]\n\n md_1 = gr.Markdown(md, elem_id=\"leaderboard_markdown\")\n\n if leaderboard_table_file:\n data = load_leaderboard_table_csv(leaderboard_table_file)\n headers = [\n \"Model\",\n \"Arena Elo rating\",\n \"MT-bench (score)\",\n \"MMLU\",\n \"License\",\n ]\n values = []\n for item in data:\n row = []\n for key in headers:\n value = item[key]\n row.append(value)\n values.append(row)\n values.sort(key=lambda x: -x[1] if not np.isnan(x[1]) else 1e9)\n\n headers[1] = \"⭐ \" + headers[1]\n headers[2] = \"📈 \" + headers[2]\n\n gr.Dataframe(\n headers=headers,\n datatype=[\"markdown\", \"number\", \"number\", \"number\", \"str\"],\n value=values,\n elem_id=\"leaderboard_dataframe\",\n )\n gr.Markdown(\n \"If you want to see more models, please help us [add them](https://github.com/lm-sys/FastChat/blob/main/docs/arena.md#how-to-add-a-new-model).\"\n )\n else:\n pass\n\n gr.Markdown(\n f\"\"\"## More Statistics for Chatbot Arena\\n\nWe added some additional figures to show more statistics. The code for generating them is also included in this [notebook]({notebook_url}).\nPlease note that you may see different orders from different ranking methods. This is expected for models that perform similarly, as demonstrated by the confidence interval in the bootstrap figure. Going forward, we prefer the classical Elo calculation because of its scalability and interpretability. You can find more discussions in this blog [post](https://lmsys.org/blog/2023-05-03-arena/).\n\"\"\"\n )\n\n leader_component_values[:] = [md, p1, p2, p3, p4]\n\n with gr.Row():\n with gr.Column():\n gr.Markdown(\n \"#### Figure 1: Fraction of Model A Wins for All Non-tied A vs. B Battles\"\n )\n plot_1 = gr.Plot(p1, show_label=False)\n with gr.Column():\n gr.Markdown(\n \"#### Figure 2: Battle Count for Each Combination of Models (without Ties)\"\n )\n plot_2 = gr.Plot(p2, show_label=False)\n with gr.Row():\n with gr.Column():\n gr.Markdown(\n \"#### Figure 3: Bootstrap of Elo Estimates (1000 Rounds of Random Sampling)\"\n )\n plot_3 = gr.Plot(p3, show_label=False)\n with gr.Column():\n gr.Markdown(\n \"#### Figure 4: Average Win Rate Against All Other Models (Assuming Uniform Sampling and No Ties)\"\n )\n plot_4 = gr.Plot(p4, show_label=False)\n return [md_1, plot_1, plot_2, plot_3, plot_4]"
},
{
"identifier": "build_logger",
"path": "chat/utils.py",
"snippet": "def build_logger(logger_name, logger_filename):\n def __init__(self, logger, log_level=logging.INFO):\n def __getattr__(self, attr):\n def write(self, buf):\n def flush(self):\ndef disable_torch_init():\ndef get_gpu_memory(max_gpus=None):\ndef violates_moderation(text):\ndef clean_flant5_ckpt(ckpt_path):\ndef pretty_print_semaphore(semaphore):\ndef iter_over_async(\n async_gen: AsyncGenerator, event_loop: AbstractEventLoop\n) -> Generator:\n async def get_next():\ndef detect_language(text: str) -> str:\ndef parse_gradio_auth_creds(filename: str):\ndef is_partial_stop(output: str, stop_str: str):\ndef run_cmd(cmd: str):\ndef is_sentence_complete(output: str):\ndef get_context_length(config):\nclass StreamToLogger(object):\nSEQUENCE_LENGTH_KEYS = [\n \"max_sequence_length\",\n \"seq_length\",\n \"max_position_embeddings\",\n \"max_seq_len\",\n \"model_max_length\",\n]"
}
] | import argparse
import pickle
import time
import gradio as gr
from chat.constants import (
SESSION_EXPIRATION_TIME,
)
from chat.server.gradio_block_arena_anony import (
build_side_by_side_ui_anony,
load_demo_side_by_side_anony,
set_global_vars_anony,
)
from chat.server.gradio_block_arena_named import (
build_side_by_side_ui_named,
load_demo_side_by_side_named,
set_global_vars_named,
)
from chat.server.gradio_web_server import (
set_global_vars,
block_css,
build_single_model_ui,
get_model_list,
load_demo_single,
ip_expiration_dict,
)
from chat.server.monitor.monitor import build_leaderboard_tab
from chat.utils import (
build_logger,
get_window_url_params_js,
parse_gradio_auth_creds,
) | 7,348 | c_list = (
c_states
+ c_model_selectors
+ c_chatbots
+ [
c_textbox,
c_send_btn,
c_button_row,
c_button_row2,
c_parameter_row,
]
)
with gr.Tab("Single Model", id=2):
(
a_state,
a_model_selector,
a_chatbot,
a_textbox,
a_send_btn,
a_button_row,
a_parameter_row,
) = build_single_model_ui(models, add_promotion_links=True)
a_list = [
a_state,
a_model_selector,
a_chatbot,
a_textbox,
a_send_btn,
a_button_row,
a_parameter_row,
]
if elo_results_file:
with gr.Tab("Leaderboard", id=3):
build_leaderboard_tab(elo_results_file, leaderboard_table_file)
url_params = gr.JSON(visible=False)
if args.model_list_mode not in ["once", "reload"]:
raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
demo.load(
load_demo,
[url_params],
[tabs] + a_list + b_list + c_list,
_js=get_window_url_params_js,
)
return demo
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--port", type=int)
parser.add_argument(
"--share",
action="store_true",
help="Whether to generate a public, shareable link.",
)
parser.add_argument(
"--controller-url",
type=str,
default="http://localhost:21001",
help="The address of the controller.",
)
parser.add_argument(
"--concurrency-count",
type=int,
default=10,
help="The concurrency count of the gradio queue.",
)
parser.add_argument(
"--model-list-mode",
type=str,
default="once",
choices=["once", "reload"],
help="Whether to load the model list once or reload the model list every time.",
)
parser.add_argument(
"--moderate", action="store_true", help="Enable content moderation"
)
parser.add_argument(
"--add-chatgpt",
action="store_true",
help="Add OpenAI's ChatGPT models (gpt-3.5-turbo, gpt-4)",
)
parser.add_argument(
"--add-claude",
action="store_true",
help="Add Anthropic's Claude models (claude-2, claude-instant-1)",
)
parser.add_argument(
"--add-palm",
action="store_true",
help="Add Google's PaLM model (PaLM 2 for Chat: chat-bison@001)",
)
parser.add_argument(
"--anony-only-for-proprietary-model",
action="store_true",
help="Only add ChatGPT, Claude, Bard under anony battle tab",
)
parser.add_argument(
"--register-openai-compatible-models",
type=str,
help="Register custom OpenAI API compatible models by loading them from a JSON file",
)
parser.add_argument(
"--gradio-auth-path",
type=str,
help='Set the gradio authentication file path. The file should contain one or more user:password pairs in this format: "u1:p1,u2:p2,u3:p3"',
default=None,
)
parser.add_argument("--elo-results-file", type=str)
parser.add_argument("--leaderboard-table-file", type=str)
args = parser.parse_args()
logger.info(f"args: {args}")
# Set global variables
set_global_vars(args.controller_url, args.moderate)
| """
The gradio demo server with multiple tabs.
It supports chatting with a single model or chatting with two models side-by-side.
"""
logger = build_logger("gradio_web_server_multi", "gradio_web_server_multi.log")
def load_demo(url_params, request: gr.Request):
global models
ip = request.client.host
logger.info(f"load_demo. ip: {ip}. params: {url_params}")
ip_expiration_dict[ip] = time.time() + SESSION_EXPIRATION_TIME
selected = 0
if "arena" in url_params:
selected = 0
elif "compare" in url_params:
selected = 1
elif "single" in url_params:
selected = 2
elif "leaderboard" in url_params:
selected = 3
if args.model_list_mode == "reload":
if args.anony_only_for_proprietary_model:
models = get_model_list(
args.controller_url,
args.register_openai_compatible_models,
False,
False,
False,
)
else:
models = get_model_list(
args.controller_url,
args.register_openai_compatible_models,
args.add_chatgpt,
args.add_claude,
args.add_palm,
)
single_updates = load_demo_single(models, url_params)
models_anony = list(models)
if args.anony_only_for_proprietary_model:
# Only enable these models in anony battles.
if args.add_chatgpt:
models_anony += ["gpt-4", "gpt-3.5-turbo"]
if args.add_claude:
models_anony += ["claude-2", "claude-instant-1"]
if args.add_palm:
models_anony += ["palm-2"]
side_by_side_anony_updates = load_demo_side_by_side_anony(models_anony, url_params)
side_by_side_named_updates = load_demo_side_by_side_named(models, url_params)
return (
(gr.Tabs.update(selected=selected),)
+ single_updates
+ side_by_side_anony_updates
+ side_by_side_named_updates
)
def build_demo(models, elo_results_file, leaderboard_table_file):
with gr.Blocks(
title="Chat with Open Large Language Models",
theme=gr.themes.Base(),
css=block_css,
) as demo:
with gr.Tabs() as tabs:
with gr.Tab("Chatbot Arena (battle)", id=0):
(
b_states,
b_model_selectors,
b_chatbots,
b_textbox,
b_send_btn,
b_button_row,
b_button_row2,
b_parameter_row,
) = build_side_by_side_ui_anony(models)
b_list = (
b_states
+ b_model_selectors
+ b_chatbots
+ [
b_textbox,
b_send_btn,
b_button_row,
b_button_row2,
b_parameter_row,
]
)
with gr.Tab("Chatbot Arena (side-by-side)", id=1):
(
c_states,
c_model_selectors,
c_chatbots,
c_textbox,
c_send_btn,
c_button_row,
c_button_row2,
c_parameter_row,
) = build_side_by_side_ui_named(models)
c_list = (
c_states
+ c_model_selectors
+ c_chatbots
+ [
c_textbox,
c_send_btn,
c_button_row,
c_button_row2,
c_parameter_row,
]
)
with gr.Tab("Single Model", id=2):
(
a_state,
a_model_selector,
a_chatbot,
a_textbox,
a_send_btn,
a_button_row,
a_parameter_row,
) = build_single_model_ui(models, add_promotion_links=True)
a_list = [
a_state,
a_model_selector,
a_chatbot,
a_textbox,
a_send_btn,
a_button_row,
a_parameter_row,
]
if elo_results_file:
with gr.Tab("Leaderboard", id=3):
build_leaderboard_tab(elo_results_file, leaderboard_table_file)
url_params = gr.JSON(visible=False)
if args.model_list_mode not in ["once", "reload"]:
raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
demo.load(
load_demo,
[url_params],
[tabs] + a_list + b_list + c_list,
_js=get_window_url_params_js,
)
return demo
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--port", type=int)
parser.add_argument(
"--share",
action="store_true",
help="Whether to generate a public, shareable link.",
)
parser.add_argument(
"--controller-url",
type=str,
default="http://localhost:21001",
help="The address of the controller.",
)
parser.add_argument(
"--concurrency-count",
type=int,
default=10,
help="The concurrency count of the gradio queue.",
)
parser.add_argument(
"--model-list-mode",
type=str,
default="once",
choices=["once", "reload"],
help="Whether to load the model list once or reload the model list every time.",
)
parser.add_argument(
"--moderate", action="store_true", help="Enable content moderation"
)
parser.add_argument(
"--add-chatgpt",
action="store_true",
help="Add OpenAI's ChatGPT models (gpt-3.5-turbo, gpt-4)",
)
parser.add_argument(
"--add-claude",
action="store_true",
help="Add Anthropic's Claude models (claude-2, claude-instant-1)",
)
parser.add_argument(
"--add-palm",
action="store_true",
help="Add Google's PaLM model (PaLM 2 for Chat: chat-bison@001)",
)
parser.add_argument(
"--anony-only-for-proprietary-model",
action="store_true",
help="Only add ChatGPT, Claude, Bard under anony battle tab",
)
parser.add_argument(
"--register-openai-compatible-models",
type=str,
help="Register custom OpenAI API compatible models by loading them from a JSON file",
)
parser.add_argument(
"--gradio-auth-path",
type=str,
help='Set the gradio authentication file path. The file should contain one or more user:password pairs in this format: "u1:p1,u2:p2,u3:p3"',
default=None,
)
parser.add_argument("--elo-results-file", type=str)
parser.add_argument("--leaderboard-table-file", type=str)
args = parser.parse_args()
logger.info(f"args: {args}")
# Set global variables
set_global_vars(args.controller_url, args.moderate) | set_global_vars_named(args.moderate) | 6 | 2023-10-20 08:56:20+00:00 | 12k |
thuml/iTransformer | run.py | [
{
"identifier": "Exp_Long_Term_Forecast",
"path": "experiments/exp_long_term_forecasting.py",
"snippet": "class Exp_Long_Term_Forecast(Exp_Basic):\n def __init__(self, args):\n super(Exp_Long_Term_Forecast, self).__init__(args)\n\n def _build_model(self):\n model = self.model_dict[self.args.model].Model(self.args).float()\n\n if self.args.use_multi_gpu and self.args.use_gpu:\n model = nn.DataParallel(model, device_ids=self.args.device_ids)\n return model\n\n def _get_data(self, flag):\n data_set, data_loader = data_provider(self.args, flag)\n return data_set, data_loader\n\n def _select_optimizer(self):\n model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)\n return model_optim\n\n def _select_criterion(self):\n criterion = nn.MSELoss()\n return criterion\n\n def vali(self, vali_data, vali_loader, criterion):\n total_loss = []\n self.model.eval()\n with torch.no_grad():\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader):\n batch_x = batch_x.float().to(self.device)\n batch_y = batch_y.float()\n\n if 'PEMS' in self.args.data or 'Solar' in self.args.data:\n batch_x_mark = None\n batch_y_mark = None\n else:\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n\n pred = outputs.detach().cpu()\n true = batch_y.detach().cpu()\n\n loss = criterion(pred, true)\n\n total_loss.append(loss)\n total_loss = np.average(total_loss)\n self.model.train()\n return total_loss\n\n def train(self, setting):\n train_data, train_loader = self._get_data(flag='train')\n vali_data, vali_loader = self._get_data(flag='val')\n test_data, test_loader = self._get_data(flag='test')\n\n path = os.path.join(self.args.checkpoints, setting)\n if not os.path.exists(path):\n os.makedirs(path)\n\n time_now = time.time()\n\n train_steps = len(train_loader)\n early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)\n\n model_optim = self._select_optimizer()\n criterion = self._select_criterion()\n\n if self.args.use_amp:\n scaler = torch.cuda.amp.GradScaler()\n\n for epoch in range(self.args.train_epochs):\n iter_count = 0\n train_loss = []\n\n self.model.train()\n epoch_time = time.time()\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):\n iter_count += 1\n model_optim.zero_grad()\n batch_x = batch_x.float().to(self.device)\n\n batch_y = batch_y.float().to(self.device)\n if 'PEMS' in self.args.data or 'Solar' in self.args.data:\n batch_x_mark = None\n batch_y_mark = None\n else:\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n loss = criterion(outputs, batch_y)\n train_loss.append(loss.item())\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n loss = criterion(outputs, batch_y)\n train_loss.append(loss.item())\n\n if (i + 1) % 100 == 0:\n print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item()))\n speed = (time.time() - time_now) / iter_count\n left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)\n print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n iter_count = 0\n time_now = time.time()\n\n if self.args.use_amp:\n scaler.scale(loss).backward()\n scaler.step(model_optim)\n scaler.update()\n else:\n loss.backward()\n model_optim.step()\n\n print(\"Epoch: {} cost time: {}\".format(epoch + 1, time.time() - epoch_time))\n train_loss = np.average(train_loss)\n vali_loss = self.vali(vali_data, vali_loader, criterion)\n test_loss = self.vali(test_data, test_loader, criterion)\n\n print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format(\n epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n early_stopping(vali_loss, self.model, path)\n if early_stopping.early_stop:\n print(\"Early stopping\")\n break\n\n adjust_learning_rate(model_optim, epoch + 1, self.args)\n\n # get_cka(self.args, setting, self.model, train_loader, self.device, epoch)\n\n best_model_path = path + '/' + 'checkpoint.pth'\n self.model.load_state_dict(torch.load(best_model_path))\n\n return self.model\n\n def test(self, setting, test=0):\n test_data, test_loader = self._get_data(flag='test')\n if test:\n print('loading model')\n self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))\n\n preds = []\n trues = []\n folder_path = './test_results/' + setting + '/'\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n self.model.eval()\n with torch.no_grad():\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(test_loader):\n batch_x = batch_x.float().to(self.device)\n batch_y = batch_y.float().to(self.device)\n\n if 'PEMS' in self.args.data or 'Solar' in self.args.data:\n batch_x_mark = None\n batch_y_mark = None\n else:\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n outputs = outputs.detach().cpu().numpy()\n batch_y = batch_y.detach().cpu().numpy()\n if test_data.scale and self.args.inverse:\n shape = outputs.shape\n outputs = test_data.inverse_transform(outputs.squeeze(0)).reshape(shape)\n batch_y = test_data.inverse_transform(batch_y.squeeze(0)).reshape(shape)\n\n pred = outputs\n true = batch_y\n\n preds.append(pred)\n trues.append(true)\n if i % 20 == 0:\n input = batch_x.detach().cpu().numpy()\n if test_data.scale and self.args.inverse:\n shape = input.shape\n input = test_data.inverse_transform(input.squeeze(0)).reshape(shape)\n gt = np.concatenate((input[0, :, -1], true[0, :, -1]), axis=0)\n pd = np.concatenate((input[0, :, -1], pred[0, :, -1]), axis=0)\n visual(gt, pd, os.path.join(folder_path, str(i) + '.pdf'))\n\n preds = np.array(preds)\n trues = np.array(trues)\n print('test shape:', preds.shape, trues.shape)\n preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])\n print('test shape:', preds.shape, trues.shape)\n\n # result save\n folder_path = './results/' + setting + '/'\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n mae, mse, rmse, mape, mspe = metric(preds, trues)\n print('mse:{}, mae:{}'.format(mse, mae))\n f = open(\"result_long_term_forecast.txt\", 'a')\n f.write(setting + \" \\n\")\n f.write('mse:{}, mae:{}'.format(mse, mae))\n f.write('\\n')\n f.write('\\n')\n f.close()\n\n np.save(folder_path + 'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))\n np.save(folder_path + 'pred.npy', preds)\n np.save(folder_path + 'true.npy', trues)\n\n return\n\n\n def predict(self, setting, load=False):\n pred_data, pred_loader = self._get_data(flag='pred')\n\n if load:\n path = os.path.join(self.args.checkpoints, setting)\n best_model_path = path + '/' + 'checkpoint.pth'\n self.model.load_state_dict(torch.load(best_model_path))\n\n preds = []\n\n self.model.eval()\n with torch.no_grad():\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(pred_loader):\n batch_x = batch_x.float().to(self.device)\n batch_y = batch_y.float()\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n outputs = outputs.detach().cpu().numpy()\n if pred_data.scale and self.args.inverse:\n shape = outputs.shape\n outputs = pred_data.inverse_transform(outputs.squeeze(0)).reshape(shape)\n preds.append(outputs)\n\n preds = np.array(preds)\n preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n\n # result save\n folder_path = './results/' + setting + '/'\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n np.save(folder_path + 'real_prediction.npy', preds)\n\n return"
},
{
"identifier": "Exp_Long_Term_Forecast_Partial",
"path": "experiments/exp_long_term_forecasting_partial.py",
"snippet": "class Exp_Long_Term_Forecast_Partial(Exp_Basic):\n def __init__(self, args):\n super(Exp_Long_Term_Forecast_Partial, self).__init__(args)\n\n def _build_model(self):\n model = self.model_dict[self.args.model].Model(self.args).float()\n\n if self.args.use_multi_gpu and self.args.use_gpu:\n model = nn.DataParallel(model, device_ids=self.args.device_ids)\n return model\n\n def _get_data(self, flag):\n data_set, data_loader = data_provider(self.args, flag)\n return data_set, data_loader\n\n def _select_optimizer(self):\n model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)\n return model_optim\n\n def _select_criterion(self):\n criterion = nn.MSELoss()\n return criterion\n\n def vali(self, vali_data, vali_loader, criterion, partial_train=False):\n total_loss = []\n self.model.eval()\n with torch.no_grad():\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader):\n batch_x = batch_x.float().to(self.device)\n batch_y = batch_y.float()\n\n if 'PEMS' in self.args.data or 'Solar' in self.args.data:\n batch_x_mark = None\n batch_y_mark = None\n else:\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n if partial_train: # we train models with only partial variates from the dataset\n partial_start = self.args.partial_start_index\n partial_end = min(self.args.enc_in + partial_start, batch_x.shape[-1])\n batch_x = batch_x[:, :, partial_start:partial_end]\n batch_y = batch_y[:, :, partial_start:partial_end]\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n elif self.args.channel_independence:\n B, Tx, N = batch_x.shape\n _, Ty, _ = dec_inp.shape\n if batch_x_mark == None:\n outputs = self.model(batch_x.permute(0, 2, 1).reshape(B * N, Tx, 1), batch_x_mark, \\\n dec_inp.permute(0, 2, 1).reshape(B * N, Ty, 1), batch_y_mark).reshape(\n B, N, -1).permute(0, 2, 1)\n else:\n outputs = self.model(batch_x.permute(0, 2, 1).reshape(B * N, Tx, 1),\n batch_x_mark.repeat(N, 1, 1), \\\n dec_inp.permute(0, 2, 1).reshape(B * N, Ty, 1),\n batch_y_mark.repeat(N, 1, 1)) \\\n .reshape(B, N, -1).permute(0, 2, 1)\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n\n pred = outputs.detach().cpu()\n true = batch_y.detach().cpu()\n\n loss = criterion(pred, true)\n\n total_loss.append(loss)\n total_loss = np.average(total_loss)\n self.model.train()\n return total_loss\n\n def train(self, setting):\n train_data, train_loader = self._get_data(flag='train')\n vali_data, vali_loader = self._get_data(flag='val')\n test_data, test_loader = self._get_data(flag='test')\n\n path = os.path.join(self.args.checkpoints, setting)\n if not os.path.exists(path):\n os.makedirs(path)\n\n time_now = time.time()\n\n train_steps = len(train_loader)\n early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)\n\n model_optim = self._select_optimizer()\n criterion = self._select_criterion()\n\n if self.args.use_amp:\n scaler = torch.cuda.amp.GradScaler()\n\n for epoch in range(self.args.train_epochs):\n iter_count = 0\n train_loss = []\n\n self.model.train()\n epoch_time = time.time()\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):\n iter_count += 1\n model_optim.zero_grad()\n batch_x = batch_x.float().to(self.device)\n\n batch_y = batch_y.float().to(self.device)\n if 'PEMS' in self.args.data or 'Solar' in self.args.data:\n batch_x_mark = None\n batch_y_mark = None\n else:\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n # Variate Generalization training: \n # We train with partial variates (args.enc_in < number of dataset variates)\n # and test the obtained model directly on all variates.\n partial_start = self.args.partial_start_index\n partial_end = min(self.args.enc_in + partial_start, batch_x.shape[-1])\n batch_x = batch_x[:, :, partial_start:partial_end]\n batch_y = batch_y[:, :, partial_start:partial_end]\n # Efficient training strategy: randomly choose part of the variates\n # and only train the model with selected variates in each batch \n if self.args.efficient_training:\n _, _, N = batch_x.shape\n index = np.stack(random.sample(range(N), N))[-self.args.enc_in:]\n batch_x = batch_x[:, :, index]\n batch_y = batch_y[:, :, index]\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n loss = criterion(outputs, batch_y)\n train_loss.append(loss.item())\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n elif self.args.channel_independence:\n B, Tx, N = batch_x.shape\n _, Ty, _ = dec_inp.shape\n if batch_x_mark == None:\n outputs = self.model(batch_x.permute(0, 2, 1).reshape(B * N, Tx, 1), batch_x_mark, \\\n dec_inp.permute(0, 2, 1).reshape(B * N, Ty, 1), batch_y_mark).reshape(\n B, N, -1).permute(0, 2, 1)\n else:\n a = batch_x.permute(0, 2, 1)\n b = batch_x.permute(0, 2, 1).reshape(B * N, Tx, 1)\n outputs = self.model(batch_x.permute(0, 2, 1).reshape(B * N, Tx, 1),\n batch_x_mark.repeat(N, 1, 1), \\\n dec_inp.permute(0, 2, 1).reshape(B * N, Ty, 1),\n batch_y_mark.repeat(N, 1, 1)) \\\n .reshape(B, N, -1).permute(0, 2, 1)\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n loss = criterion(outputs, batch_y)\n train_loss.append(loss.item())\n\n if (i + 1) % 100 == 0:\n print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item()))\n speed = (time.time() - time_now) / iter_count\n left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)\n print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n iter_count = 0\n time_now = time.time()\n\n if self.args.use_amp:\n scaler.scale(loss).backward()\n scaler.step(model_optim)\n scaler.update()\n else:\n loss.backward()\n model_optim.step()\n\n print(\"Epoch: {} cost time: {}\".format(epoch + 1, time.time() - epoch_time))\n train_loss = np.average(train_loss)\n vali_loss = self.vali(vali_data, vali_loader, criterion, partial_train=True)\n test_loss = self.vali(test_data, test_loader, criterion, partial_train=False)\n\n print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format(\n epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n early_stopping(vali_loss, self.model, path)\n if early_stopping.early_stop:\n print(\"Early stopping\")\n break\n\n adjust_learning_rate(model_optim, epoch + 1, self.args)\n\n best_model_path = path + '/' + 'checkpoint.pth'\n self.model.load_state_dict(torch.load(best_model_path))\n\n return self.model\n\n def test(self, setting, test=0):\n\n test_data, test_loader = self._get_data(flag='test')\n if test:\n print('loading model')\n self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))\n\n preds = []\n trues = []\n folder_path = './test_results/' + setting + '/'\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n self.model.eval()\n with torch.no_grad():\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(test_loader):\n # During model inference, test the obtained model directly on all variates.\n batch_x = batch_x.float().to(self.device)\n batch_y = batch_y.float().to(self.device)\n\n if 'PEMS' in self.args.data or 'Solar' in self.args.data:\n batch_x_mark = None\n batch_y_mark = None\n else:\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n elif self.args.channel_independence: # compare the result with channel_independence\n B, Tx, N = batch_x.shape\n _, Ty, _ = dec_inp.shape\n if batch_x_mark == None:\n outputs = self.model(batch_x.permute(0, 2, 1).reshape(B * N, Tx, 1), batch_x_mark, \\\n dec_inp.permute(0, 2, 1).reshape(B * N, Ty, 1), batch_y_mark).reshape(\n B, N, -1).permute(0, 2, 1)\n else:\n outputs = self.model(batch_x.permute(0, 2, 1).reshape(B * N, Tx, 1),\n batch_x_mark.repeat(N, 1, 1), \\\n dec_inp.permute(0, 2, 1).reshape(B * N, Ty, 1),\n batch_y_mark.repeat(N, 1, 1)) \\\n .reshape(B, N, -1).permute(0, 2, 1)\n else:\n # directly test the trained model on all variates without fine-tuning.\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n outputs = outputs.detach().cpu().numpy()\n batch_y = batch_y.detach().cpu().numpy()\n if test_data.scale and self.args.inverse:\n shape = outputs.shape\n outputs = test_data.inverse_transform(outputs.squeeze(0)).reshape(shape)\n batch_y = test_data.inverse_transform(batch_y.squeeze(0)).reshape(shape)\n\n pred = outputs\n true = batch_y\n\n preds.append(pred)\n trues.append(true)\n if i % 20 == 0:\n input = batch_x.detach().cpu().numpy()\n if test_data.scale and self.args.inverse:\n shape = input.shape\n input = test_data.inverse_transform(input.squeeze(0)).reshape(shape)\n gt = np.concatenate((input[0, :, -1], true[0, :, -1]), axis=0)\n pd = np.concatenate((input[0, :, -1], pred[0, :, -1]), axis=0)\n visual(gt, pd, os.path.join(folder_path, str(i) + '.pdf'))\n\n preds = np.array(preds)\n trues = np.array(trues)\n print('test shape:', preds.shape, trues.shape)\n preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])\n print('test shape:', preds.shape, trues.shape)\n\n # result save\n folder_path = './results/' + setting + '/'\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n mae, mse, rmse, mape, mspe = metric(preds, trues)\n print('mse:{}, mae:{}'.format(mse, mae))\n f = open(\"result_long_term_forecast.txt\", 'a')\n f.write(setting + \" \\n\")\n f.write('mse:{}, mae:{}'.format(mse, mae))\n f.write('\\n')\n f.write('\\n')\n f.close()\n\n np.save(folder_path + 'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))\n np.save(folder_path + 'pred.npy', preds)\n np.save(folder_path + 'true.npy', trues)\n\n return\n\n def predict(self, setting, load=False):\n pred_data, pred_loader = self._get_data(flag='pred')\n\n if load:\n path = os.path.join(self.args.checkpoints, setting)\n best_model_path = path + '/' + 'checkpoint.pth'\n self.model.load_state_dict(torch.load(best_model_path))\n\n preds = []\n\n self.model.eval()\n with torch.no_grad():\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(pred_loader):\n batch_x = batch_x.float().to(self.device)\n batch_y = batch_y.float()\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n outputs = outputs.detach().cpu().numpy()\n if pred_data.scale and self.args.inverse:\n shape = outputs.shape\n outputs = pred_data.inverse_transform(outputs.squeeze(0)).reshape(shape)\n preds.append(outputs)\n\n preds = np.array(preds)\n preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n\n # result save\n folder_path = './results/' + setting + '/'\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n np.save(folder_path + 'real_prediction.npy', preds)\n\n return"
}
] | import argparse
import torch
import random
import numpy as np
from experiments.exp_long_term_forecasting import Exp_Long_Term_Forecast
from experiments.exp_long_term_forecasting_partial import Exp_Long_Term_Forecast_Partial | 9,483 |
if __name__ == '__main__':
fix_seed = 2023
random.seed(fix_seed)
torch.manual_seed(fix_seed)
np.random.seed(fix_seed)
parser = argparse.ArgumentParser(description='iTransformer')
# basic config
parser.add_argument('--is_training', type=int, required=True, default=1, help='status')
parser.add_argument('--model_id', type=str, required=True, default='test', help='model id')
parser.add_argument('--model', type=str, required=True, default='iTransformer',
help='model name, options: [iTransformer, iInformer, iReformer, iFlowformer, iFlashformer]')
# data loader
parser.add_argument('--data', type=str, required=True, default='custom', help='dataset type')
parser.add_argument('--root_path', type=str, default='./data/electricity/', help='root path of the data file')
parser.add_argument('--data_path', type=str, default='electricity.csv', help='data csv file')
parser.add_argument('--features', type=str, default='M',
help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')
parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')
parser.add_argument('--freq', type=str, default='h',
help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')
parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')
# forecasting task
parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')
parser.add_argument('--label_len', type=int, default=48, help='start token length') # no longer needed in inverted Transformers
parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')
# model define
parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')
parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')
parser.add_argument('--c_out', type=int, default=7, help='output size') # applicable on arbitrary number of variates in inverted Transformers
parser.add_argument('--d_model', type=int, default=512, help='dimension of model')
parser.add_argument('--n_heads', type=int, default=8, help='num of heads')
parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')
parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')
parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')
parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')
parser.add_argument('--factor', type=int, default=1, help='attn factor')
parser.add_argument('--distil', action='store_false',
help='whether to use distilling in encoder, using this argument means not using distilling',
default=True)
parser.add_argument('--dropout', type=float, default=0.1, help='dropout')
parser.add_argument('--embed', type=str, default='timeF',
help='time features encoding, options:[timeF, fixed, learned]')
parser.add_argument('--activation', type=str, default='gelu', help='activation')
parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')
parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data')
# optimization
parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')
parser.add_argument('--itr', type=int, default=1, help='experiments times')
parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')
parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')
parser.add_argument('--patience', type=int, default=3, help='early stopping patience')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')
parser.add_argument('--des', type=str, default='test', help='exp description')
parser.add_argument('--loss', type=str, default='MSE', help='loss function')
parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')
parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)
# GPU
parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
parser.add_argument('--gpu', type=int, default=0, help='gpu')
parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')
# iTransformer
parser.add_argument('--exp_name', type=str, required=False, default='MTSF',
help='experiemnt name, options:[MTSF, partial_train]')
parser.add_argument('--channel_independence', type=bool, default=False, help='whether to use channel_independence mechanism')
parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)
parser.add_argument('--class_strategy', type=str, default='projection', help='projection/average/cls_token')
parser.add_argument('--target_root_path', type=str, default='./data/electricity/', help='root path of the data file')
parser.add_argument('--target_data_path', type=str, default='electricity.csv', help='data file')
parser.add_argument('--efficient_training', type=bool, default=False, help='whether to use efficient_training (exp_name should be partial train)') # See Figure 8 of our paper for the detail
parser.add_argument('--use_norm', type=int, default=True, help='use norm and denorm')
parser.add_argument('--partial_start_index', type=int, default=0, help='the start index of variates for partial training, '
'you can select [partial_start_index, min(enc_in + partial_start_index, N)]')
args = parser.parse_args()
args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False
if args.use_gpu and args.use_multi_gpu:
args.devices = args.devices.replace(' ', '')
device_ids = args.devices.split(',')
args.device_ids = [int(id_) for id_ in device_ids]
args.gpu = args.device_ids[0]
print('Args in experiment:')
print(args)
if args.exp_name == 'partial_train': # See Figure 8 of our paper, for the detail
Exp = Exp_Long_Term_Forecast_Partial
else: # MTSF: multivariate time series forecasting
|
if __name__ == '__main__':
fix_seed = 2023
random.seed(fix_seed)
torch.manual_seed(fix_seed)
np.random.seed(fix_seed)
parser = argparse.ArgumentParser(description='iTransformer')
# basic config
parser.add_argument('--is_training', type=int, required=True, default=1, help='status')
parser.add_argument('--model_id', type=str, required=True, default='test', help='model id')
parser.add_argument('--model', type=str, required=True, default='iTransformer',
help='model name, options: [iTransformer, iInformer, iReformer, iFlowformer, iFlashformer]')
# data loader
parser.add_argument('--data', type=str, required=True, default='custom', help='dataset type')
parser.add_argument('--root_path', type=str, default='./data/electricity/', help='root path of the data file')
parser.add_argument('--data_path', type=str, default='electricity.csv', help='data csv file')
parser.add_argument('--features', type=str, default='M',
help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')
parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')
parser.add_argument('--freq', type=str, default='h',
help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')
parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')
# forecasting task
parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')
parser.add_argument('--label_len', type=int, default=48, help='start token length') # no longer needed in inverted Transformers
parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')
# model define
parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')
parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')
parser.add_argument('--c_out', type=int, default=7, help='output size') # applicable on arbitrary number of variates in inverted Transformers
parser.add_argument('--d_model', type=int, default=512, help='dimension of model')
parser.add_argument('--n_heads', type=int, default=8, help='num of heads')
parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')
parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')
parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')
parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')
parser.add_argument('--factor', type=int, default=1, help='attn factor')
parser.add_argument('--distil', action='store_false',
help='whether to use distilling in encoder, using this argument means not using distilling',
default=True)
parser.add_argument('--dropout', type=float, default=0.1, help='dropout')
parser.add_argument('--embed', type=str, default='timeF',
help='time features encoding, options:[timeF, fixed, learned]')
parser.add_argument('--activation', type=str, default='gelu', help='activation')
parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')
parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data')
# optimization
parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')
parser.add_argument('--itr', type=int, default=1, help='experiments times')
parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')
parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')
parser.add_argument('--patience', type=int, default=3, help='early stopping patience')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')
parser.add_argument('--des', type=str, default='test', help='exp description')
parser.add_argument('--loss', type=str, default='MSE', help='loss function')
parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')
parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)
# GPU
parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
parser.add_argument('--gpu', type=int, default=0, help='gpu')
parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')
# iTransformer
parser.add_argument('--exp_name', type=str, required=False, default='MTSF',
help='experiemnt name, options:[MTSF, partial_train]')
parser.add_argument('--channel_independence', type=bool, default=False, help='whether to use channel_independence mechanism')
parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)
parser.add_argument('--class_strategy', type=str, default='projection', help='projection/average/cls_token')
parser.add_argument('--target_root_path', type=str, default='./data/electricity/', help='root path of the data file')
parser.add_argument('--target_data_path', type=str, default='electricity.csv', help='data file')
parser.add_argument('--efficient_training', type=bool, default=False, help='whether to use efficient_training (exp_name should be partial train)') # See Figure 8 of our paper for the detail
parser.add_argument('--use_norm', type=int, default=True, help='use norm and denorm')
parser.add_argument('--partial_start_index', type=int, default=0, help='the start index of variates for partial training, '
'you can select [partial_start_index, min(enc_in + partial_start_index, N)]')
args = parser.parse_args()
args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False
if args.use_gpu and args.use_multi_gpu:
args.devices = args.devices.replace(' ', '')
device_ids = args.devices.split(',')
args.device_ids = [int(id_) for id_ in device_ids]
args.gpu = args.device_ids[0]
print('Args in experiment:')
print(args)
if args.exp_name == 'partial_train': # See Figure 8 of our paper, for the detail
Exp = Exp_Long_Term_Forecast_Partial
else: # MTSF: multivariate time series forecasting | Exp = Exp_Long_Term_Forecast | 0 | 2023-10-19 03:23:15+00:00 | 12k |
kylesargent/ZeroNVS | threestudio/models/geometry/base.py | [
{
"identifier": "IsosurfaceHelper",
"path": "threestudio/models/isosurface.py",
"snippet": "class IsosurfaceHelper(nn.Module):\n points_range: Tuple[float, float] = (0, 1)\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"N 3\"]:\n raise NotImplementedError"
},
{
"identifier": "MarchingCubeCPUHelper",
"path": "threestudio/models/isosurface.py",
"snippet": "class MarchingCubeCPUHelper(IsosurfaceHelper):\n def __init__(self, resolution: int) -> None:\n super().__init__()\n self.resolution = resolution\n import mcubes\n\n self.mc_func: Callable = mcubes.marching_cubes\n self._grid_vertices: Optional[Float[Tensor, \"N3 3\"]] = None\n self._dummy: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_dummy\", torch.zeros(0, dtype=torch.float32), persistent=False\n )\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"N3 3\"]:\n if self._grid_vertices is None:\n # keep the vertices on CPU so that we can support very large resolution\n x, y, z = (\n torch.linspace(*self.points_range, self.resolution),\n torch.linspace(*self.points_range, self.resolution),\n torch.linspace(*self.points_range, self.resolution),\n )\n x, y, z = torch.meshgrid(x, y, z, indexing=\"ij\")\n verts = torch.cat(\n [x.reshape(-1, 1), y.reshape(-1, 1), z.reshape(-1, 1)], dim=-1\n ).reshape(-1, 3)\n self._grid_vertices = verts\n return self._grid_vertices\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support deformation. Ignoring.\"\n )\n level = -level.view(self.resolution, self.resolution, self.resolution)\n v_pos, t_pos_idx = self.mc_func(\n level.detach().cpu().numpy(), 0.0\n ) # transform to numpy\n v_pos, t_pos_idx = (\n torch.from_numpy(v_pos).float().to(self._dummy.device),\n torch.from_numpy(t_pos_idx.astype(np.int64)).long().to(self._dummy.device),\n ) # transform back to torch tensor on CUDA\n v_pos = v_pos / (self.resolution - 1.0)\n return Mesh(v_pos=v_pos, t_pos_idx=t_pos_idx)"
},
{
"identifier": "MarchingTetrahedraHelper",
"path": "threestudio/models/isosurface.py",
"snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh"
},
{
"identifier": "Mesh",
"path": "threestudio/models/mesh.py",
"snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss"
},
{
"identifier": "BaseModule",
"path": "threestudio/utils/base.py",
"snippet": "class BaseModule(nn.Module, Updateable):\n @dataclass\n class Config:\n weights: Optional[str] = None\n\n cfg: Config # add this to every subclass of BaseModule to enable static type checking\n\n def __init__(\n self, cfg: Optional[Union[dict, DictConfig]] = None, *args, **kwargs\n ) -> None:\n super().__init__()\n self.cfg = parse_structured(self.Config, cfg)\n self.device = get_device()\n self.configure(*args, **kwargs)\n if self.cfg.weights is not None:\n # format: path/to/weights:module_name\n weights_path, module_name = self.cfg.weights.split(\":\")\n state_dict, epoch, global_step = load_module_weights(\n weights_path, module_name=module_name, map_location=\"cpu\"\n )\n self.load_state_dict(state_dict)\n self.do_update_step(\n epoch, global_step, on_load_weights=True\n ) # restore states\n # dummy tensor to indicate model state\n self._dummy: Float[Tensor, \"...\"]\n self.register_buffer(\"_dummy\", torch.zeros(0).float(), persistent=False)\n\n def configure(self, *args, **kwargs) -> None:\n pass"
},
{
"identifier": "chunk_batch",
"path": "threestudio/utils/ops.py",
"snippet": "def chunk_batch(func: Callable, chunk_size: int, *args, **kwargs) -> Any:\n if chunk_size <= 0:\n return func(*args, **kwargs)\n B = None\n for arg in list(args) + list(kwargs.values()):\n if isinstance(arg, torch.Tensor):\n B = arg.shape[0]\n break\n assert (\n B is not None\n ), \"No tensor found in args or kwargs, cannot determine batch size.\"\n out = defaultdict(list)\n out_type = None\n # max(1, B) to support B == 0\n for i in range(0, max(1, B), chunk_size):\n out_chunk = func(\n *[\n arg[i : i + chunk_size] if isinstance(arg, torch.Tensor) else arg\n for arg in args\n ],\n **{\n k: arg[i : i + chunk_size] if isinstance(arg, torch.Tensor) else arg\n for k, arg in kwargs.items()\n },\n )\n if out_chunk is None:\n continue\n out_type = type(out_chunk)\n if isinstance(out_chunk, torch.Tensor):\n out_chunk = {0: out_chunk}\n elif isinstance(out_chunk, tuple) or isinstance(out_chunk, list):\n chunk_length = len(out_chunk)\n out_chunk = {i: chunk for i, chunk in enumerate(out_chunk)}\n elif isinstance(out_chunk, dict):\n pass\n else:\n print(\n f\"Return value of func must be in type [torch.Tensor, list, tuple, dict], get {type(out_chunk)}.\"\n )\n exit(1)\n for k, v in out_chunk.items():\n v = v if torch.is_grad_enabled() else v.detach()\n out[k].append(v)\n\n if out_type is None:\n return None\n\n out_merged: Dict[Any, Optional[torch.Tensor]] = {}\n for k, v in out.items():\n if all([vv is None for vv in v]):\n # allow None in return value\n out_merged[k] = None\n elif all([isinstance(vv, torch.Tensor) for vv in v]):\n out_merged[k] = torch.cat(v, dim=0)\n else:\n raise TypeError(\n f\"Unsupported types in return value of func: {[type(vv) for vv in v if not isinstance(vv, torch.Tensor)]}\"\n )\n\n if out_type is torch.Tensor:\n return out_merged[0]\n elif out_type in [tuple, list]:\n return out_type([out_merged[i] for i in range(chunk_length)])\n elif out_type is dict:\n return out_merged"
},
{
"identifier": "scale_tensor",
"path": "threestudio/utils/ops.py",
"snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat"
}
] | from dataclasses import dataclass, field
from threestudio.models.isosurface import (
IsosurfaceHelper,
MarchingCubeCPUHelper,
MarchingTetrahedraHelper,
)
from threestudio.models.mesh import Mesh
from threestudio.utils.base import BaseModule
from threestudio.utils.ops import chunk_batch, scale_tensor
from threestudio.utils.typing import *
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio | 7,687 |
def contract_to_unisphere(
x: Float[Tensor, "... 3"], bbox: Float[Tensor, "2 3"], unbounded: bool = False
) -> Float[Tensor, "... 3"]:
if unbounded:
# import pdb
# pdb.set_trace()
x = scale_tensor(x, bbox, (0, 1))
x = x * 2 - 1 # aabb is at [-1, 1]
mag = x.norm(dim=-1, keepdim=True)
mask = mag.squeeze(-1) > 1
x = x.clone()
x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])
x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]
else:
x = scale_tensor(x, bbox, (0, 1))
return x
class BaseGeometry(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
@staticmethod
def create_from(
other: "BaseGeometry", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs
) -> "BaseGeometry":
raise TypeError(
f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}"
)
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
class BaseImplicitGeometry(BaseGeometry):
@dataclass
class Config(BaseGeometry.Config):
radius: float = 1.0
isosurface: bool = True
isosurface_method: str = "mt"
isosurface_resolution: int = 128
isosurface_threshold: Union[float, str] = 0.0
isosurface_chunk: int = 0
isosurface_coarse_to_fine: bool = True
isosurface_deformable_grid: bool = False
isosurface_remove_outliers: bool = True
isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01
cfg: Config
def configure(self) -> None:
self.bbox: Float[Tensor, "2 3"]
self.register_buffer(
"bbox",
torch.as_tensor(
[
[-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],
[self.cfg.radius, self.cfg.radius, self.cfg.radius],
],
dtype=torch.float32,
),
)
self.isosurface_helper: Optional[IsosurfaceHelper] = None
self.unbounded: bool = True
def _initilize_isosurface_helper(self):
if self.cfg.isosurface and self.isosurface_helper is None:
if self.cfg.isosurface_method == "mc-cpu":
self.isosurface_helper = MarchingCubeCPUHelper(
self.cfg.isosurface_resolution
).to(self.device)
elif self.cfg.isosurface_method == "mt":
|
def contract_to_unisphere(
x: Float[Tensor, "... 3"], bbox: Float[Tensor, "2 3"], unbounded: bool = False
) -> Float[Tensor, "... 3"]:
if unbounded:
# import pdb
# pdb.set_trace()
x = scale_tensor(x, bbox, (0, 1))
x = x * 2 - 1 # aabb is at [-1, 1]
mag = x.norm(dim=-1, keepdim=True)
mask = mag.squeeze(-1) > 1
x = x.clone()
x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])
x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]
else:
x = scale_tensor(x, bbox, (0, 1))
return x
class BaseGeometry(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
@staticmethod
def create_from(
other: "BaseGeometry", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs
) -> "BaseGeometry":
raise TypeError(
f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}"
)
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
class BaseImplicitGeometry(BaseGeometry):
@dataclass
class Config(BaseGeometry.Config):
radius: float = 1.0
isosurface: bool = True
isosurface_method: str = "mt"
isosurface_resolution: int = 128
isosurface_threshold: Union[float, str] = 0.0
isosurface_chunk: int = 0
isosurface_coarse_to_fine: bool = True
isosurface_deformable_grid: bool = False
isosurface_remove_outliers: bool = True
isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01
cfg: Config
def configure(self) -> None:
self.bbox: Float[Tensor, "2 3"]
self.register_buffer(
"bbox",
torch.as_tensor(
[
[-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],
[self.cfg.radius, self.cfg.radius, self.cfg.radius],
],
dtype=torch.float32,
),
)
self.isosurface_helper: Optional[IsosurfaceHelper] = None
self.unbounded: bool = True
def _initilize_isosurface_helper(self):
if self.cfg.isosurface and self.isosurface_helper is None:
if self.cfg.isosurface_method == "mc-cpu":
self.isosurface_helper = MarchingCubeCPUHelper(
self.cfg.isosurface_resolution
).to(self.device)
elif self.cfg.isosurface_method == "mt": | self.isosurface_helper = MarchingTetrahedraHelper( | 2 | 2023-10-24 19:02:44+00:00 | 12k |
princeton-nlp/LLM-Shearing | llmshearing/models/composer_pythia.py | [
{
"identifier": "L0Module",
"path": "llmshearing/models/l0_module.py",
"snippet": "class L0Module(nn.Module):\n def __init__(self, cfg, device):\n super(L0Module, self).__init__()\n\n # base and target model info\n n_matrix_mlp = 2 if \"pythia\" in cfg.name else 3\n self.base_model_info = self.set_model_info(cfg, n_matrix_mlp=n_matrix_mlp) \n l0_module_cfg = cfg.l0_module\n self.target_model_info = None\n target_model_cfg = getattr(l0_module_cfg, \"target_model\", None)\n if target_model_cfg is not None:\n self.target_model_info = self.set_model_info(target_model_cfg, n_matrix_mlp=n_matrix_mlp)\n \n # l0 config\n self.pruning_modules = l0_module_cfg.pruning_modules \n self.start_sparsity = l0_module_cfg.start_sparsity \n self.lagrangian_warmup_steps = Time.from_timestring(l0_module_cfg.lagrangian_warmup_steps).value\n self.device = device\n self.eval_target_model = l0_module_cfg.get(\"eval_target_model\", True)\n \n # l0 params\n self.lambdas = {}\n self.lambdas[\"lambda_1\"] = torch.nn.Parameter(torch.tensor(0.0, device=device))\n self.lambdas[\"lambda_2\"] = torch.nn.Parameter(torch.tensor(0.0, device=device))\n self.masks = {}\n for pruning_module in self.pruning_modules:\n self.initialize_one_module(pruning_module)\n self.masks = torch.nn.ModuleDict(self.masks)\n self.lambdas = torch.nn.ParameterDict(self.lambdas)\n \n # config after initialization\n self.prunable_model_size = self.calculate_prunable_model_size(self.base_model_info)\n if target_model_cfg is not None:\n self.prunable_target_model_size = self.calculate_prunable_model_size(self.target_model_info)\n self.target_sparsity = 1 - self.prunable_target_model_size / self.prunable_model_size\n else:\n self.target_sparsity = l0_module_cfg.target_sparsity\n\n print(\"********** Initializing L0 Module **********\") \n for pruning_module in self.pruning_modules:\n print(f\"***** {pruning_module} *****\")\n print(f\"z.shape\", self.masks[pruning_module].z_loga.shape)\n print(f\"size\", self.masks[pruning_module].mask_size)\n print(f\"prunable model size: {self.prunable_model_size}\")\n \n \n def set_model_info(self, cfg, n_matrix_mlp):\n ns = NS() \n ns.hidden_size = cfg.d_model\n ns.intermediate_size = cfg.intermediate_size\n ns.num_attention_heads = cfg.n_heads\n ns.mlp_num_per_layer = 1\n ns.dim_per_head = ns.hidden_size // ns.num_attention_heads \n ns.num_layers = cfg.n_layers\n ns.vocab_size = cfg.vocab_size\n\n ns.params_per_head_layer = ns.hidden_size * ns.hidden_size * 4\n ns.params_per_head = ns.params_per_head_layer // ns.num_attention_heads\n ns.params_per_mlp_layer = ns.hidden_size * ns.intermediate_size * n_matrix_mlp\n ns.params_per_intermediate_dim = ns.params_per_mlp_layer // ns.intermediate_size\n\n ns.full_model_size = (ns.params_per_head_layer + ns.params_per_mlp_layer) * ns.num_layers\n return ns\n \n def calculate_prunable_model_size(self, ns: NS):\n prunable_mlp_size = ns.params_per_mlp_layer * ns.num_layers\n prunable_head_layer_size = ns.params_per_head_layer * ns.num_layers\n prunable_model_size = 0\n if \"hidden\" in self.pruning_modules:\n return prunable_mlp_size + prunable_head_layer_size\n if \"head_layer\" in self.pruning_modules or \"head\" in self.pruning_modules:\n prunable_model_size += prunable_head_layer_size\n if \"mlp\" in self.pruning_modules or \"intermediate\" in self.pruning_modules:\n prunable_model_size += prunable_mlp_size\n return prunable_model_size\n \n def initialize_one_module(self, module_name: str):\n func_name = f\"initialize_{module_name}\"\n try:\n method = getattr(self, func_name)\n except AttributeError:\n raise NotImplementedError(\"Instance `{}` does not implement `{}`\".format(self, func_name))\n method()\n \n def initialize_hidden(self):\n mask_shape = [self.base_model_info.hidden_size]\n num_params_per_mask=self.base_model_info.hidden_size * 4 + self.base_model_info.hidden_size * 4 * 2\n \n target_hidden_sparsity = None; pd=None; target_mask_size=None; \n if self.target_model_info is not None:\n target_hidden_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n target_mask_size = self.target_model_info.hidden_size\n pd = {\"lambda_1_hidden\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_hidden\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n hidden_mask = Mask(name=\"hidden\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=[self.base_model_info.hidden_size],\n target_sparsity=target_hidden_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"hidden\"] = hidden_mask\n\n def initialize_head(self):\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads]\n num_params_per_mask = self.base_model_info.params_per_head\n mask_output_shape = [self.base_model_info.num_layers, 1, self.base_model_info.num_attention_heads, 1] \n \n target_head_sparsity = None; pd = {} ; target_mask_size=None; \n if self.target_model_info is not None:\n target_head_sparsity = 1 - self.target_model_info.num_attention_heads / self.base_model_info.num_attention_heads\n target_mask_size = self.target_model_info.num_attention_heads\n pd = {\"lambda_1_head\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_head\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n head_mask = Mask(name=\"head\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_head_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"head\"] = head_mask \n\n def initialize_qk_head_dim(self): # only campatible when target model info is available\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads, self.base_model_info.dim_per_head]\n num_params_per_mask = 2 * self.base_model_info.hidden_size\n mask_output_shape = [self.base_model_info.num_layers, self.base_model_info.hidden_size] \n \n target_qk_head_dim_sparsity = None; pd = {} \n if self.target_model_info is not None:\n target_qk_head_dim_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n pd = {\"lambda_1_qk_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_qk_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n qk_head_dim = Mask(name=\"qk_head_dim\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_qk_head_dim_sparsity,\n target_mask_size=self.target_model_info.hidden_size,\n device=self.device)\n self.masks[\"qk_head_dim\"] = qk_head_dim \n \n \n def initialize_vo_head_dim(self): # only campatible when target model info is available\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads, self.base_model_info.dim_per_head]\n num_params_per_mask = 2 * self.base_model_info.hidden_size\n mask_output_shape = [self.base_model_info.num_layers, self.base_model_info.hidden_size] \n \n target_vo_head_dim_sparsity = None; pd = {} \n if self.target_model_info is not None:\n target_vo_head_dim_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n pd = {\"lambda_1_vo_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_vo_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n vo_head_dim = Mask(name=\"vo_head_dim\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_vo_head_dim_sparsity,\n device=self.device)\n self.masks[\"vo_head_dim\"] = vo_head_dim \n \n def initialize_head_layer(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_head * self.base_model_info.num_attention_heads\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_head_layer_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_head_layer_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_head_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_head_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n head_layer_mask = Mask(name=\"head_layer\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_head_layer_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"head_layer\"] = head_layer_mask\n \n def initialize_intermediate(self):\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.intermediate_size]\n num_params_per_mask=self.base_model_info.params_per_intermediate_dim\n mask_output_shape = [self.base_model_info.num_layers, 1, 1, self.base_model_info.intermediate_size] \n \n target_int_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_int_sparsity = 1 - self.target_model_info.intermediate_size / self.base_model_info.intermediate_size\n target_mask_size = self.target_model_info.intermediate_size\n pd = {\"lambda_1_intermediate\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_intermediate\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n int_mask = Mask(name=\"intermediate\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_int_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"intermediate\"] = int_mask\n \n\n def initialize_mlp(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_mlp_layer\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_mlp_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_mlp_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_mlp\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_mlp\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n mlp_mask = Mask(name=\"mlp\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_mlp_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"mlp\"] = mlp_mask \n\n def initialize_layer(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_head * self.base_model_info.num_attention_heads + self.base_model_info.params_per_mlp_layer\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_layer_sparsity = None; target_mask_size=None; pd = {}\n if self.target_model_info is not None:\n target_layer_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n layer_mask = Mask(name=\"layer\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_layer_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model) \n self.masks[\"layer\"] = layer_mask \n \n def constrain_parameters(self):\n for key in self.masks:\n self.masks[key].constrain_parameters()\n\n def calculate_expected_score_sparsity(self):\n expected_scores = {}\n expected_sparsitys = {}\n for key in self.masks:\n score, sparsity = self.masks[key].calculate_expected_score_sparsity()\n expected_scores[key] = score\n expected_sparsitys[key] = sparsity\n return expected_scores, expected_sparsitys\n \n def transform_scores_for_head(self, expected_scores: dict):\n head_score = expected_scores[\"head\"] # 12 * 12\n\n head_layer_score = None\n if \"head_layer\" in expected_scores:\n head_layer_score = expected_scores[\"head_layer\"]\n elif \"layer\" in expected_scores:\n head_layer_score = expected_scores[\"layer\"] # 12\n if head_layer_score is not None:\n head_layer_score = head_layer_score.view(-1, 1) # 12 * 1\n \n return head_layer_score, head_score\n\n def transform_scores_for_mlp(self, expected_scores: dict):\n mlp_score = None\n if \"mlp\" in expected_scores:\n mlp_score = expected_scores[\"mlp\"] # 12\n elif \"layer\" in expected_scores:\n mlp_score = expected_scores[\"layer\"] # 12\n if mlp_score is not None:\n mlp_score = mlp_score.unsqueeze(-1)\n \n intermediate_score = expected_scores[\"intermediate\"] # 12 * 3072\n return mlp_score, intermediate_score\n\n\n def get_expected_num_params(self, expected_scores: dict): #! calculate the current parsity\n num_parameters = 0\n \n # 12 * 1 \n # 12 * 12\n head_layer_score, head_score = self.transform_scores_for_head(expected_scores)\n mlp_score, int_score = self.transform_scores_for_mlp(expected_scores)\n \n head_score = (head_layer_score * head_score) # 12 * 12\n int_score = (mlp_score * int_score) # 12 * 3072\n\n qk_score = None\n if \"qk_head_dim\" in expected_scores:\n qk_head_dim_score = expected_scores[\"qk_head_dim\"] # num_layer * hidden_size\n vo_head_dim_score = expected_scores[\"vo_head_dim\"] # num_layer * hidden_size\n qk_head_dim_score = qk_head_dim_score.view(qk_head_dim_score.shape[0], -1) # 12 * 768\n vo_head_dim_score = vo_head_dim_score.view(vo_head_dim_score.shape[0], -1) # 12 * 768\n head_score = torch.repeat_interleave(head_score, self.base_model_info.dim_per_head, dim=1) # 12 * 768\n\n qk_score = head_score * qk_head_dim_score # 12 * 768\n vo_score = head_score * vo_head_dim_score # 12 * 768\n \n if \"hidden\" in expected_scores:\n hidden_score = expected_scores[\"hidden\"] # 768 \n \n if qk_score is None:\n num_parameters += torch.outer(hidden_score, head_score.reshape(-1)).sum() * self.masks.head.num_params_per_mask / self.base_model_info.hidden_size # 768 * 144\n num_parameters += torch.outer(hidden_score, int_score.reshape(-1)).sum() * self.masks.intermediate.num_params_per_mask / self.base_model_info.hidden_size # 768 * 36864\n else:\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), qk_score.unsqueeze(1))) * 2 # 12 * 768 * 768\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), vo_score.unsqueeze(1))) * 2 # 12 * 768 * 768\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), int_score.unsqueeze(1))) * 3 # 12 * 768 * 3072\n else:\n num_parameters += torch.sum(head_score) * self.masks.head.num_params_per_mask\n num_parameters += torch.sum(int_score) * self.masks.intermediate.num_params_per_mask\n return num_parameters\n \n def get_target_sparsity(self, pruned_steps: int, full_sparsity: float = None):\n target_sparsity = full_sparsity\n if getattr(self, \"lagrangian_warmup_steps\", 0) > 0:\n target_sparsity = (target_sparsity - self.start_sparsity) * min(1, pruned_steps / self.lagrangian_warmup_steps) + self.start_sparsity\n return target_sparsity\n\n\n def lagrangian_regularization(self, pruned_steps: int):\n def _lag_loss(expected_sparsity: torch.tensor, target_sparsity: float, lambda_1: torch.tensor, lambda_2: torch.tensor):\n lagrangian_loss = lambda_1 * (expected_sparsity - target_sparsity) + lambda_2 * (expected_sparsity - target_sparsity) ** 2 \n lagrangian_loss = lagrangian_loss.mean()\n return lagrangian_loss\n\n target_sparsity = self.get_target_sparsity(pruned_steps, self.target_sparsity) \n expected_scores, expected_sparsitys = self.calculate_expected_score_sparsity()\n expected_size = self.get_expected_num_params(expected_scores) #! calculate \\bar s\n expected_sparsity = 1 - expected_size / self.prunable_model_size\n \n return_v = {}\n if self.target_model_info is None:\n lagrangian_loss = _lag_loss(expected_sparsity, target_sparsity, self.lambdas[\"lambda_1\"], self.lambdas[\"lambda_2\"])\n return_v = {\"expected_sparsity\": expected_sparsity.item(), \"target_sparsity\": target_sparsity}\n for key in expected_sparsitys:\n return_v[f\"expected_{key}_sparsity\"] = expected_sparsitys[key].mean().item()\n else:\n lagrangian_loss = 0\n return_v = {}\n for pruning_module in self.pruning_modules:\n ts = self.get_target_sparsity(pruned_steps, self.masks[pruning_module].target_sparsity)\n expected_ts = expected_sparsitys[pruning_module] \n lagrangian_loss += _lag_loss(expected_ts, ts, self.lambdas[f\"lambda_1_{pruning_module}\"], self.lambdas[f\"lambda_2_{pruning_module}\"])\n expected_ts = expected_ts.mean().item()\n return_v.update({\"expected_{}_sparsity\".format(pruning_module): expected_ts, \"target_{}_sparsity\".format(pruning_module): ts})\n return_v[\"expected_sparsity\"] = expected_sparsity.item()\n return_v[\"target_sparsity\"] = target_sparsity\n\n\n # return_v might not matter\n return lagrangian_loss, return_v\n \n def forward(self, calculate_lagrangian: bool = False, pruned_steps: int = 0):\n self.constrain_parameters()\n if calculate_lagrangian:\n return self.lagrangian_regularization(pruned_steps)\n \n zs = {f\"{pruning_module}_z\": [] for pruning_module in self.pruning_modules}\n \n if \"layer\" in self.pruning_modules:\n zs.pop(\"layer_z\")\n zs[\"mlp_z\"] = []\n zs[\"head_layer_z\"] = []\n \n if self.training:\n for pruning_module in self.pruning_modules:\n mask = self.masks[pruning_module]\n z = mask.sample_z()\n zs[f\"{pruning_module}_z\"] = z\n else: # removed layerwise! \n with torch.no_grad():\n for pruning_module in self.pruning_modules:\n mask = self.masks[pruning_module]\n z = mask.deterministic_z()\n zs[f\"{pruning_module}_z\"] = z\n if \"layer_z\" in zs:\n zs[\"mlp_z\"] = zs.pop(\"layer_z\")\n zs[\"head_layer_z\"] = zs[\"mlp_z\"]\n return zs "
},
{
"identifier": "ComposerMosaicLlama",
"path": "llmshearing/models/composer_llama.py",
"snippet": "class ComposerMosaicLlama(ComposerModel):\n \"\"\" Llama model with the Composer model interface. \"\"\"\n def __init__(self, cfg):\n super().__init__()\n self.model = LlamaModel(cfg)\n self.ref_model = None\n self.num_fwd_flops = self._compute_num_fwd_flops()\n self.train_metrics = {\n 'LanguageCrossEntropy': LanguageCrossEntropy(),\n 'Perplexity': LanguagePerplexity(),\n }\n self.eval_metrics = {\n 'LanguageCrossEntropy': LanguageCrossEntropy(),\n 'Perplexity': LanguagePerplexity(),\n }\n\n self.set_names = getattr(cfg, \"set_names\", None)\n if self.set_names is not None:\n self.set_name_to_id = {set_name: i for i, set_name in enumerate(self.set_names)}\n self.set_id_to_name = {i: set_name for i, set_name in enumerate(self.set_names)}\n \n for set_name in self.set_names:\n # add train and eval metrics for each set\n self.train_metrics[f'{set_name}_LanguageCrossEntropy'] = DomainLanguageCrossEntropy(set_name=set_name)\n self.eval_metrics[f'{set_name}_LanguageCrossEntropy'] = DomainLanguageCrossEntropy(set_name=set_name)\n self.train_metrics[f'{set_name}_count'] = DomainCount(set_name=set_name, set_index=self.set_name_to_id[set_name]) \n\n def prune_params(self, zs=None):\n self.model.prune_params(zs)\n \n def get_targets(self, batch):\n targets = torch.roll(batch['labels'], shifts=-1)\n targets[:, -1] = -100\n return targets\n \n def forward(self, batch):\n input_ids = batch['input_ids']\n key_padding_mask = batch['attention_mask'].bool(\n ) if 'attention_mask' in batch else None\n pruned_steps = batch.get('pruned_steps', None)\n if pruned_steps is not None:\n pruned_steps = pruned_steps[0].item()\n zs = {key: batch[key] for key in batch if \"_z\" in key}\n model_output = self.model(input_ids=input_ids, key_padding_mask=key_padding_mask, pruned_steps=pruned_steps, **zs)\n return model_output\n\n def eval_forward(self, batch, outputs=None):\n return outputs if outputs is not None else self.forward(batch)\n\n def loss(self, outputs, batch):\n logits = outputs[\"logits\"]\n l0_output = outputs[\"l0_output\"]\n targets = self.get_targets(batch)\n\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)),\n targets.view(-1),\n ignore_index=-100)\n return_loss = {\"ce_loss\": loss}\n if l0_output is not None:\n lag_loss = l0_output[0]\n return_loss[\"lag_loss\"] = lag_loss\n return_loss[\"total\"] = sum(return_loss.values())\n return return_loss\n\n def get_metrics(self, is_train=False):\n return self.train_metrics if is_train else self.eval_metrics\n\n def update_metric(self, batch, outputs, metric) -> None:\n logits = outputs[\"logits\"]\n if isinstance(metric, DomainLanguageCrossEntropy):\n targets = self.get_targets(batch)\n set_id = self.set_name_to_id[metric.set_name]\n targets[batch[\"set\"] != set_id] = -100\n metric.update(logits, targets)\n elif isinstance(metric, DomainCount):\n with torch.inference_mode():\n idx = None\n selected_sets = batch['set']\n metric.update(selected_sets, idx)\n else:\n logits = logits.view(-1, logits.size(-1))\n targets = self.get_targets(batch).view(-1)\n metric.update(logits, targets)\n\n def add_eval_metrics(self, evaluator):\n evaluator_metrics = {\n m: METRIC_DEFAULT_CTORS[m]() for m in evaluator.metric_names\n }\n if self.eval_metrics is not None:\n self.eval_metrics.update(evaluator_metrics)\n else:\n self.eval_metrics = evaluator_metrics\n\n def _compute_num_fwd_flops(self):\n # Might not be correct for LLaMA structures\n n_params = sum(p.numel() for p in self.parameters())\n # the number of paramters is approximately the number of multiply-accumulates (MAC) in the network\n # each MAC has 2 FLOPs - we multiply by 2 ie 2 * n_param\n # this gets us FLOPs / token\n params_flops_per_token = 2 * n_params\n params_flops_per_seq = params_flops_per_token * self.model.cfg.max_seq_len\n # there are 2 FLOPS per mac; there is A=Q*K^T and out=A*V ops (ie mult by 2)\n attn_flops_per_seq = self.model.cfg.n_layers * 2 * 2 * (\n self.model.cfg.d_model * (self.model.cfg.max_seq_len**2))\n return params_flops_per_seq + attn_flops_per_seq\n\n def flops_per_batch(self, batch):\n # Note: this computation does not take into account padding, and assumes\n # that the dataset has been constructed without padding. Additionally, we\n # assume the backward pass is approximately 2x the forward pass\n return self.num_fwd_flops * 3 * batch['input_ids'].shape[0]\n\n def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:\n if new_num_tokens is not None:\n self.model._resize_token_embeddings(new_num_tokens)"
},
{
"identifier": "prepare_decoder_attention_mask",
"path": "llmshearing/models/composer_llama.py",
"snippet": "def prepare_decoder_attention_mask(input_shape, inputs_embeds):\n # create causal mask\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = None\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(input_shape, inputs_embeds.dtype).to(inputs_embeds.device)\n\n return combined_attention_mask"
},
{
"identifier": "turn_head_z",
"path": "llmshearing/models/composer_llama.py",
"snippet": "def turn_head_z(head_z, head_layer_z):\n head_z = head_z.squeeze().clone()\n if head_layer_z is not None:\n head_z *= head_layer_z\n to_prune_heads = torch.where(head_z == 0)[0].view(-1).tolist()\n return to_prune_heads"
},
{
"identifier": "turn_mlp_z",
"path": "llmshearing/models/composer_llama.py",
"snippet": "def turn_mlp_z(intermediate_z, mlp_z):\n intermediate_z_layer = intermediate_z.squeeze().clone()\n if mlp_z is not None:\n intermediate_z_layer *= mlp_z\n keep_intermediate_dims = torch.where(intermediate_z_layer != 0)[0].tolist()\n return keep_intermediate_dims "
},
{
"identifier": "normal_attn_fn",
"path": "llmshearing/models/composer_llama.py",
"snippet": "def normal_attn_fn(\n query,\n key, \n value,\n attention_mask=None,\n head_z=None\n):\n bsz, n_heads, q_len, head_dim = query.shape\n dim = n_heads * head_dim\n attn_weights = torch.matmul(query, key.transpose(2, 3)) / math.sqrt(head_dim)\n attn_weights = attn_weights + attention_mask\n attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))\n\n # upcast attention to fp32\n attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)\n attn_output = torch.matmul(attn_weights, value) # (bsz, n_heads, q_len, head_dim)\n if head_z is not None:\n attn_output *= head_z.unsqueeze(-1)\n attn_output = attn_output.transpose(1, 2)\n attn_output = attn_output.reshape(bsz, q_len, dim)\n return attn_output"
},
{
"identifier": "flash_attn_fn",
"path": "llmshearing/models/composer_llama.py",
"snippet": "def flash_attn_fn(\n query,\n key,\n value,\n softmax_scale=None,\n attn_bias=None,\n query_padding_mask=None,\n key_padding_mask=None,\n is_causal=False,\n dropout_p=0.0,\n training=False,\n needs_weights=False,\n head_z=None,\n \n):\n try:\n from flash_attn import bert_padding # type: ignore\n from flash_attn import flash_attn_interface # type: ignore\n except ImportError as e:\n raise e\n\n # check_valid_inputs(query, key, value)\n\n if attn_bias is not None:\n raise NotImplementedError(f'attn_bias not implemented for flash attn.')\n\n batch_size, seqlen = query.shape[:2]\n\n if query_padding_mask is None:\n query_padding_mask = torch.ones((batch_size, seqlen), dtype=torch.bool, device=query.device)\n if key_padding_mask is None:\n key_padding_mask = torch.ones((batch_size, seqlen), dtype=torch.bool, device=key.device)\n\n query_unpad, indices_q, cu_seqlens_q, max_seqlen_q = bert_padding.unpad_input(\n query, query_padding_mask)\n # query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n key_unpad, _, cu_seqlens_k, max_seqlen_k = bert_padding.unpad_input(\n key, key_padding_mask)\n # key_unpad = rearrange(key_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n value_unpad, _, _, _ = bert_padding.unpad_input(value, key_padding_mask)\n # value_unpad = rearrange(value_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n dropout_p = dropout_p if training else 0.0\n \n output_unpad = flash_attn_interface.flash_attn_unpadded_func(\n query_unpad,\n key_unpad,\n value_unpad,\n cu_seqlens_q,\n cu_seqlens_k,\n max_seqlen_q,\n max_seqlen_k,\n dropout_p,\n softmax_scale=softmax_scale,\n causal=is_causal,\n return_attn_probs=needs_weights)\n\n if head_z is not None:\n output_unpad = output_unpad * head_z # 1 * h * 1\n output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)\n return output, None"
}
] | import math
import torch
import torch.nn as nn
from typing import List, Optional, Tuple
from einops import rearrange
from omegaconf import DictConfig
from torch.nn import functional as F
from transformers.pytorch_utils import (find_pruneable_heads_and_indices,
prune_linear_layer)
from llmshearing.models.l0_module import L0Module
from llmshearing.models.composer_llama import ComposerMosaicLlama, prepare_decoder_attention_mask, turn_head_z, turn_mlp_z, normal_attn_fn, flash_attn_fn
from transformers.models.gpt_neox.modeling_gpt_neox import apply_rotary_pos_emb | 8,487 |
class ComposerMosaicPythia(ComposerMosaicLlama):
def __init__(self, cfg):
super().__init__(cfg)
self.model = PythiaModel(cfg)
class CoFiLayerNorm(torch.nn.LayerNorm):
def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, device=None) -> None:
super().__init__(normalized_shape, eps, elementwise_affine, device)
def forward(self, input, hidden_z=None):
if hidden_z is not None:
remaining_index = torch.where(~hidden_z.eq(0))[0]
compressed_input = torch.index_select(
input, dim=-1, index=remaining_index)
compressed_weight = self.weight[remaining_index]
compressed_bias = self.bias[remaining_index]
normalized_shape = len(remaining_index)
normed_input = F.layer_norm(
compressed_input, [normalized_shape], compressed_weight, compressed_bias, self.eps)
output = input.clone()
normed_input = normed_input.to(output.dtype)
output[..., remaining_index] = normed_input
else:
output = F.layer_norm(
input, self.normalized_shape, self.weight, self.bias, self.eps)
return output
def prune_params(self, hidden_z):
remaining_index = torch.where(~hidden_z.eq(0))[0]
# self.weight = torch.nn.Parameter(self.weight.data.mul(hidden_z.squeeze())[remaining_index])
self.weight = torch.nn.parameter.Parameter(self.weight.index_select(0, remaining_index))
self.bias = torch.nn.parameter.Parameter(self.bias.index_select(0, remaining_index))
self.normalized_shape = (len(remaining_index),)
class PythiaEmbedding(nn.Embedding):
def forward(self, input, hidden_z=None):
embeddings = super().forward(input)
if hidden_z is not None:
embeddings = embeddings.mul(hidden_z)
return embeddings
def prune_params(self, hidden_z):
remaining_index = torch.where(~hidden_z.eq(0))[0]
self.weight.data = self.weight.data.mul(hidden_z)
self.weight = torch.nn.parameter.Parameter(self.weight.index_select(1, remaining_index).clone())
self.embedding_dim = len(remaining_index)
print(f" Embedding: {len(hidden_z)} -> {len(remaining_index)}")
class PythiaModel(nn.Module):
def __init__(self, cfg: DictConfig):
super().__init__()
print(f'Tried to build Pythia model with cfg.name={cfg.name}')
self.cfg = cfg
### added ###
self.l0_module = None
if getattr(self.cfg, "l0_module", None) is not None:
|
class ComposerMosaicPythia(ComposerMosaicLlama):
def __init__(self, cfg):
super().__init__(cfg)
self.model = PythiaModel(cfg)
class CoFiLayerNorm(torch.nn.LayerNorm):
def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, device=None) -> None:
super().__init__(normalized_shape, eps, elementwise_affine, device)
def forward(self, input, hidden_z=None):
if hidden_z is not None:
remaining_index = torch.where(~hidden_z.eq(0))[0]
compressed_input = torch.index_select(
input, dim=-1, index=remaining_index)
compressed_weight = self.weight[remaining_index]
compressed_bias = self.bias[remaining_index]
normalized_shape = len(remaining_index)
normed_input = F.layer_norm(
compressed_input, [normalized_shape], compressed_weight, compressed_bias, self.eps)
output = input.clone()
normed_input = normed_input.to(output.dtype)
output[..., remaining_index] = normed_input
else:
output = F.layer_norm(
input, self.normalized_shape, self.weight, self.bias, self.eps)
return output
def prune_params(self, hidden_z):
remaining_index = torch.where(~hidden_z.eq(0))[0]
# self.weight = torch.nn.Parameter(self.weight.data.mul(hidden_z.squeeze())[remaining_index])
self.weight = torch.nn.parameter.Parameter(self.weight.index_select(0, remaining_index))
self.bias = torch.nn.parameter.Parameter(self.bias.index_select(0, remaining_index))
self.normalized_shape = (len(remaining_index),)
class PythiaEmbedding(nn.Embedding):
def forward(self, input, hidden_z=None):
embeddings = super().forward(input)
if hidden_z is not None:
embeddings = embeddings.mul(hidden_z)
return embeddings
def prune_params(self, hidden_z):
remaining_index = torch.where(~hidden_z.eq(0))[0]
self.weight.data = self.weight.data.mul(hidden_z)
self.weight = torch.nn.parameter.Parameter(self.weight.index_select(1, remaining_index).clone())
self.embedding_dim = len(remaining_index)
print(f" Embedding: {len(hidden_z)} -> {len(remaining_index)}")
class PythiaModel(nn.Module):
def __init__(self, cfg: DictConfig):
super().__init__()
print(f'Tried to build Pythia model with cfg.name={cfg.name}')
self.cfg = cfg
### added ###
self.l0_module = None
if getattr(self.cfg, "l0_module", None) is not None: | self.l0_module = L0Module(self.cfg, device=cfg.init_device) | 0 | 2023-10-16 12:26:08+00:00 | 12k |
hkchengrex/Cutie | process_video.py | [
{
"identifier": "CUTIE",
"path": "cutie/model/cutie.py",
"snippet": "class CUTIE(nn.Module):\n def __init__(self, cfg: DictConfig, *, single_object=False):\n super().__init__()\n model_cfg = cfg.model\n self.ms_dims = model_cfg.pixel_encoder.ms_dims\n self.key_dim = model_cfg.key_dim\n self.value_dim = model_cfg.value_dim\n self.sensory_dim = model_cfg.sensory_dim\n self.pixel_dim = model_cfg.pixel_dim\n self.embed_dim = model_cfg.embed_dim\n self.single_object = single_object\n\n log.info(f'Single object: {self.single_object}')\n\n self.pixel_encoder = PixelEncoder(model_cfg)\n self.pix_feat_proj = nn.Conv2d(self.ms_dims[0], self.pixel_dim, kernel_size=1)\n self.key_proj = KeyProjection(model_cfg)\n self.mask_encoder = MaskEncoder(model_cfg, single_object=single_object)\n self.mask_decoder = MaskDecoder(model_cfg)\n self.pixel_fuser = PixelFeatureFuser(model_cfg, single_object=single_object)\n self.object_transformer = QueryTransformer(model_cfg)\n self.object_summarizer = ObjectSummarizer(model_cfg)\n self.aux_computer = AuxComputer(cfg)\n\n self.register_buffer(\"pixel_mean\", torch.Tensor(model_cfg.pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(model_cfg.pixel_std).view(-1, 1, 1), False)\n\n def _get_others(self, masks: torch.Tensor) -> torch.Tensor:\n # for each object, return the sum of masks of all other objects\n if self.single_object:\n return None\n\n num_objects = masks.shape[1]\n if num_objects >= 1:\n others = (masks.sum(dim=1, keepdim=True) - masks).clamp(0, 1)\n else:\n others = torch.zeros_like(masks)\n return others\n\n def encode_image(self, image: torch.Tensor) -> (Iterable[torch.Tensor], torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n ms_image_feat = self.pixel_encoder(image)\n return ms_image_feat, self.pix_feat_proj(ms_image_feat[0])\n\n def encode_mask(\n self,\n image: torch.Tensor,\n ms_features: List[torch.Tensor],\n sensory: torch.Tensor,\n masks: torch.Tensor,\n *,\n deep_update: bool = True,\n chunk_size: int = -1,\n need_weights: bool = False) -> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n others = self._get_others(masks)\n mask_value, new_sensory = self.mask_encoder(image,\n ms_features,\n sensory,\n masks,\n others,\n deep_update=deep_update,\n chunk_size=chunk_size)\n object_summaries, object_logits = self.object_summarizer(masks, mask_value, need_weights)\n return mask_value, new_sensory, object_summaries, object_logits\n\n def transform_key(self,\n final_pix_feat: torch.Tensor,\n *,\n need_sk: bool = True,\n need_ek: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n key, shrinkage, selection = self.key_proj(final_pix_feat, need_s=need_sk, need_e=need_ek)\n return key, shrinkage, selection\n\n # Used in training only.\n # This step is replaced by MemoryManager in test time\n def read_memory(self, query_key: torch.Tensor, query_selection: torch.Tensor,\n memory_key: torch.Tensor, memory_shrinkage: torch.Tensor,\n msk_value: torch.Tensor, obj_memory: torch.Tensor, pix_feat: torch.Tensor,\n sensory: torch.Tensor, last_mask: torch.Tensor,\n selector: torch.Tensor) -> (torch.Tensor, Dict[str, torch.Tensor]):\n \"\"\"\n query_key : B * CK * H * W\n query_selection : B * CK * H * W\n memory_key : B * CK * T * H * W\n memory_shrinkage: B * 1 * T * H * W\n msk_value : B * num_objects * CV * T * H * W\n obj_memory : B * num_objects * T * num_summaries * C\n pixel_feature : B * C * H * W\n \"\"\"\n batch_size, num_objects = msk_value.shape[:2]\n\n # read using visual attention\n with torch.cuda.amp.autocast(enabled=False):\n affinity = get_affinity(memory_key.float(), memory_shrinkage.float(), query_key.float(),\n query_selection.float())\n\n msk_value = msk_value.flatten(start_dim=1, end_dim=2).float()\n\n # B * (num_objects*CV) * H * W\n pixel_readout = readout(affinity, msk_value)\n pixel_readout = pixel_readout.view(batch_size, num_objects, self.value_dim,\n *pixel_readout.shape[-2:])\n pixel_readout = self.pixel_fusion(pix_feat, pixel_readout, sensory, last_mask)\n\n # read from query transformer\n mem_readout, aux_features = self.readout_query(pixel_readout, obj_memory, selector=selector)\n\n aux_output = {\n 'sensory': sensory,\n 'q_logits': aux_features['logits'] if aux_features else None,\n 'attn_mask': aux_features['attn_mask'] if aux_features else None,\n }\n\n return mem_readout, aux_output\n\n def pixel_fusion(self,\n pix_feat: torch.Tensor,\n pixel: torch.Tensor,\n sensory: torch.Tensor,\n last_mask: torch.Tensor,\n *,\n chunk_size: int = -1) -> torch.Tensor:\n last_mask = F.interpolate(last_mask, size=sensory.shape[-2:], mode='area')\n last_others = self._get_others(last_mask)\n fused = self.pixel_fuser(pix_feat,\n pixel,\n sensory,\n last_mask,\n last_others,\n chunk_size=chunk_size)\n return fused\n\n def readout_query(self,\n pixel_readout,\n obj_memory,\n *,\n selector=None,\n need_weights=False) -> (torch.Tensor, Dict[str, torch.Tensor]):\n return self.object_transformer(pixel_readout,\n obj_memory,\n selector=selector,\n need_weights=need_weights)\n\n def segment(self,\n ms_image_feat: List[torch.Tensor],\n memory_readout: torch.Tensor,\n sensory: torch.Tensor,\n *,\n selector: bool = None,\n chunk_size: int = -1,\n update_sensory: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n \"\"\"\n multi_scale_features is from the key encoder for skip-connection\n memory_readout is from working/long-term memory\n sensory is the sensory memory\n last_mask is the mask from the last frame, supplementing sensory memory\n selector is 1 if an object exists, and 0 otherwise. We use it to filter padded objects\n during training.\n \"\"\"\n sensory, logits = self.mask_decoder(ms_image_feat,\n memory_readout,\n sensory,\n chunk_size=chunk_size,\n update_sensory=update_sensory)\n\n prob = torch.sigmoid(logits)\n if selector is not None:\n prob = prob * selector\n\n # Softmax over all objects[]\n logits = aggregate(prob, dim=1)\n logits = F.interpolate(logits, scale_factor=4, mode='bilinear', align_corners=False)\n prob = F.softmax(logits, dim=1)\n\n return sensory, logits, prob\n\n def compute_aux(self, pix_feat: torch.Tensor, aux_inputs: Dict[str, torch.Tensor],\n selector: torch.Tensor) -> Dict[str, torch.Tensor]:\n return self.aux_computer(pix_feat, aux_inputs, selector)\n\n def forward(self, *args, **kwargs):\n raise NotImplementedError\n\n def load_weights(self, src_dict, init_as_zero_if_needed=False) -> None:\n if not self.single_object:\n # Map single-object weight to multi-object weight (4->5 out channels in conv1)\n for k in list(src_dict.keys()):\n if k == 'mask_encoder.conv1.weight':\n if src_dict[k].shape[1] == 4:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((64, 1, 7, 7), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif k == 'pixel_fuser.sensory_compress.weight':\n if src_dict[k].shape[1] == self.sensory_dim + 1:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((self.value_dim, 1, 1, 1), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif self.single_object:\n \"\"\"\n If the model is multiple-object and we are training in single-object, \n we strip the last channel of conv1.\n This is not supposed to happen in standard training except when users are trying to\n finetune a trained model with single object datasets.\n \"\"\"\n if src_dict['mask_encoder.conv1.weight'].shape[1] == 5:\n log.warning(f'Converting {k} from multiple objects to single object.'\n 'This is not supposed to happen in standard training.')\n src_dict[k] = src_dict[k][:, :-1]\n\n for k in src_dict:\n if k not in self.state_dict():\n log.info(f'Key {k} found in src_dict but not in self.state_dict()!!!')\n for k in self.state_dict():\n if k not in src_dict:\n log.info(f'Key {k} found in self.state_dict() but not in src_dict!!!')\n\n self.load_state_dict(src_dict, strict=False)\n\n @property\n def device(self) -> torch.device:\n return self.pixel_mean.device"
},
{
"identifier": "InferenceCore",
"path": "cutie/inference/inference_core.py",
"snippet": "class InferenceCore:\n def __init__(self,\n network: CUTIE,\n cfg: DictConfig,\n *,\n image_feature_store: ImageFeatureStore = None):\n self.network = network\n self.cfg = cfg\n self.mem_every = cfg.mem_every\n stagger_updates = cfg.stagger_updates\n self.chunk_size = cfg.chunk_size\n self.save_aux = cfg.save_aux\n self.max_internal_size = cfg.max_internal_size\n self.flip_aug = cfg.flip_aug\n\n self.curr_ti = -1\n self.last_mem_ti = 0\n # at which time indices should we update the sensory memory\n if stagger_updates >= self.mem_every:\n self.stagger_ti = set(range(1, self.mem_every + 1))\n else:\n self.stagger_ti = set(\n np.round(np.linspace(1, self.mem_every, stagger_updates)).astype(int))\n self.object_manager = ObjectManager()\n self.memory = MemoryManager(cfg=cfg, object_manager=self.object_manager)\n\n if image_feature_store is None:\n self.image_feature_store = ImageFeatureStore(self.network)\n else:\n self.image_feature_store = image_feature_store\n\n self.last_mask = None\n\n def clear_memory(self):\n self.curr_ti = -1\n self.last_mem_ti = 0\n self.memory = MemoryManager(cfg=self.cfg, object_manager=self.object_manager)\n\n def clear_non_permanent_memory(self):\n self.curr_ti = -1\n self.last_mem_ti = 0\n self.memory.clear_non_permanent_memory()\n\n def clear_sensory_memory(self):\n self.curr_ti = -1\n self.last_mem_ti = 0\n self.memory.clear_sensory_memory()\n\n def update_config(self, cfg):\n self.mem_every = cfg['mem_every']\n self.memory.update_config(cfg)\n\n def _add_memory(self,\n image: torch.Tensor,\n pix_feat: torch.Tensor,\n prob: torch.Tensor,\n key: torch.Tensor,\n shrinkage: torch.Tensor,\n selection: torch.Tensor,\n *,\n is_deep_update: bool = True,\n force_permanent: bool = False) -> None:\n \"\"\"\n Memorize the given segmentation in all memory stores.\n\n The batch dimension is 1 if flip augmentation is not used.\n image: RGB image, (1/2)*3*H*W\n pix_feat: from the key encoder, (1/2)*_*H*W\n prob: (1/2)*num_objects*H*W, in [0, 1]\n key/shrinkage/selection: for anisotropic l2, (1/2)*_*H*W\n selection can be None if not using long-term memory\n is_deep_update: whether to use deep update (e.g. with the mask encoder)\n force_permanent: whether to force the memory to be permanent\n \"\"\"\n if prob.shape[1] == 0:\n # nothing to add\n log.warn('Trying to add an empty object mask to memory!')\n return\n\n if force_permanent:\n as_permanent = 'all'\n else:\n as_permanent = 'first'\n\n self.memory.initialize_sensory_if_needed(key, self.object_manager.all_obj_ids)\n msk_value, sensory, obj_value, self.obj_logits = self.network.encode_mask(\n image,\n pix_feat,\n self.memory.get_sensory(self.object_manager.all_obj_ids),\n prob,\n deep_update=is_deep_update,\n chunk_size=self.chunk_size,\n need_weights=self.save_aux)\n self.memory.add_memory(key,\n shrinkage,\n msk_value,\n obj_value,\n self.object_manager.all_obj_ids,\n selection=selection,\n as_permanent=as_permanent)\n self.last_mem_ti = self.curr_ti\n if is_deep_update:\n self.memory.update_sensory(sensory, self.object_manager.all_obj_ids)\n\n def _segment(self,\n key: torch.Tensor,\n selection: torch.Tensor,\n pix_feat: torch.Tensor,\n ms_features: Iterable[torch.Tensor],\n update_sensory: bool = True) -> torch.Tensor:\n \"\"\"\n Produce a segmentation using the given features and the memory\n\n The batch dimension is 1 if flip augmentation is not used.\n key/selection: for anisotropic l2: (1/2) * _ * H * W\n pix_feat: from the key encoder, (1/2) * _ * H * W\n ms_features: an iterable of multiscale features from the encoder, each is (1/2)*_*H*W\n with strides 16, 8, and 4 respectively\n update_sensory: whether to update the sensory memory\n\n Returns: (num_objects+1)*H*W normalized probability; the first channel is the background\n \"\"\"\n bs = key.shape[0]\n if self.flip_aug:\n assert bs == 2\n else:\n assert bs == 1\n\n if not self.memory.engaged:\n log.warn('Trying to segment without any memory!')\n return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16),\n device=key.device,\n dtype=key.dtype)\n\n memory_readout = self.memory.read(pix_feat, key, selection, self.last_mask, self.network)\n memory_readout = self.object_manager.realize_dict(memory_readout)\n sensory, _, pred_prob_with_bg = self.network.segment(ms_features,\n memory_readout,\n self.memory.get_sensory(\n self.object_manager.all_obj_ids),\n chunk_size=self.chunk_size,\n update_sensory=update_sensory)\n # remove batch dim\n if self.flip_aug:\n # average predictions of the non-flipped and flipped version\n pred_prob_with_bg = (pred_prob_with_bg[0] +\n torch.flip(pred_prob_with_bg[1], dims=[-1])) / 2\n else:\n pred_prob_with_bg = pred_prob_with_bg[0]\n if update_sensory:\n self.memory.update_sensory(sensory, self.object_manager.all_obj_ids)\n return pred_prob_with_bg\n\n def step(self,\n image: torch.Tensor,\n mask: Optional[torch.Tensor] = None,\n objects: Optional[List[int]] = None,\n *,\n idx_mask: bool = True,\n end: bool = False,\n delete_buffer: bool = True,\n force_permanent: bool = False) -> torch.Tensor:\n \"\"\"\n Take a step with a new incoming image.\n If there is an incoming mask with new objects, we will memorize them.\n If there is no incoming mask, we will segment the image using the memory.\n In both cases, we will update the memory and return a segmentation.\n\n image: 3*H*W\n mask: H*W (if idx mask) or len(objects)*H*W or None\n objects: list of object ids that are valid in the mask Tensor.\n The ids themselves do not need to be consecutive/in order, but they need to be \n in the same position in the list as the corresponding mask\n in the tensor in non-idx-mask mode.\n objects is ignored if the mask is None. \n If idx_mask is False and objects is None, we sequentially infer the object ids.\n idx_mask: if True, mask is expected to contain an object id at every pixel.\n If False, mask should have multiple channels with each channel representing one object.\n end: if we are at the end of the sequence, we do not need to update memory\n if unsure just set it to False \n delete_buffer: whether to delete the image feature buffer after this step\n force_permanent: the memory recorded this frame will be added to the permanent memory\n \"\"\"\n if objects is None and mask is not None:\n assert not idx_mask\n objects = list(range(1, mask.shape[0] + 1))\n\n # resize input if needed -- currently only used for the GUI\n resize_needed = False\n if self.max_internal_size > 0:\n h, w = image.shape[-2:]\n min_side = min(h, w)\n if min_side > self.max_internal_size:\n resize_needed = True\n new_h = int(h / min_side * self.max_internal_size)\n new_w = int(w / min_side * self.max_internal_size)\n image = F.interpolate(image.unsqueeze(0),\n size=(new_h, new_w),\n mode='bilinear',\n align_corners=False)[0]\n if mask is not None:\n if idx_mask:\n mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0).float(),\n size=(new_h, new_w),\n mode='nearest',\n align_corners=False)[0, 0].round().long()\n else:\n mask = F.interpolate(mask.unsqueeze(0),\n size=(new_h, new_w),\n mode='bilinear',\n align_corners=False)[0]\n\n self.curr_ti += 1\n\n image, self.pad = pad_divide_by(image, 16)\n image = image.unsqueeze(0) # add the batch dimension\n if self.flip_aug:\n image = torch.cat([image, torch.flip(image, dims=[-1])], dim=0)\n\n # whether to update the working memory\n is_mem_frame = ((self.curr_ti - self.last_mem_ti >= self.mem_every) or\n (mask is not None)) and (not end)\n # segment when there is no input mask or when the input mask is incomplete\n need_segment = (mask is None) or (self.object_manager.num_obj > 0\n and not self.object_manager.has_all(objects))\n update_sensory = ((self.curr_ti - self.last_mem_ti) in self.stagger_ti) and (not end)\n\n # encoding the image\n ms_feat, pix_feat = self.image_feature_store.get_features(self.curr_ti, image)\n key, shrinkage, selection = self.image_feature_store.get_key(self.curr_ti, image)\n\n # segmentation from memory if needed\n if need_segment:\n pred_prob_with_bg = self._segment(key,\n selection,\n pix_feat,\n ms_feat,\n update_sensory=update_sensory)\n\n # use the input mask if provided\n if mask is not None:\n # inform the manager of the new objects, and get a list of temporary id\n # temporary ids -- indicates the position of objects in the tensor\n # (starts with 1 due to the background channel)\n corresponding_tmp_ids, _ = self.object_manager.add_new_objects(objects)\n\n mask, _ = pad_divide_by(mask, 16)\n if need_segment:\n # merge predicted mask with the incomplete input mask\n pred_prob_no_bg = pred_prob_with_bg[1:]\n # use the mutual exclusivity of segmentation\n if idx_mask:\n pred_prob_no_bg[:, mask > 0] = 0\n else:\n pred_prob_no_bg[:, mask.max(0) > 0.5] = 0\n\n new_masks = []\n for mask_id, tmp_id in enumerate(corresponding_tmp_ids):\n if idx_mask:\n this_mask = (mask == objects[mask_id]).type_as(pred_prob_no_bg)\n else:\n this_mask = mask[tmp_id]\n if tmp_id > pred_prob_no_bg.shape[0]:\n new_masks.append(this_mask.unsqueeze(0))\n else:\n # +1 for padding the background channel\n pred_prob_no_bg[tmp_id - 1] = this_mask\n # new_masks are always in the order of tmp_id\n mask = torch.cat([pred_prob_no_bg, *new_masks], dim=0)\n elif idx_mask:\n # simply convert cls to one-hot representation\n if len(objects) == 0:\n if delete_buffer:\n self.image_feature_store.delete(self.curr_ti)\n log.warn('Trying to insert an empty mask as memory!')\n return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16),\n device=key.device,\n dtype=key.dtype)\n mask = torch.stack(\n [mask == objects[mask_id] for mask_id, _ in enumerate(corresponding_tmp_ids)],\n dim=0)\n pred_prob_with_bg = aggregate(mask, dim=0)\n pred_prob_with_bg = torch.softmax(pred_prob_with_bg, dim=0)\n\n self.last_mask = pred_prob_with_bg[1:].unsqueeze(0)\n if self.flip_aug:\n self.last_mask = torch.cat(\n [self.last_mask, torch.flip(self.last_mask, dims=[-1])], dim=0)\n\n # save as memory if needed\n if is_mem_frame or force_permanent:\n self._add_memory(image,\n pix_feat,\n self.last_mask,\n key,\n shrinkage,\n selection,\n force_permanent=force_permanent)\n\n if delete_buffer:\n self.image_feature_store.delete(self.curr_ti)\n\n output_prob = unpad(pred_prob_with_bg, self.pad)\n if resize_needed:\n # restore output to the original size\n output_prob = F.interpolate(output_prob.unsqueeze(0),\n size=(h, w),\n mode='bilinear',\n align_corners=False)[0]\n\n return output_prob\n\n def get_aux_outputs(self, image: torch.Tensor) -> Dict[str, torch.Tensor]:\n image, pads = pad_divide_by(image, 16)\n image = image.unsqueeze(0) # add the batch dimension\n _, pix_feat = self.image_feature_store.get_features(self.curr_ti, image)\n\n aux_inputs = self.memory.aux\n aux_outputs = self.network.compute_aux(pix_feat, aux_inputs, selector=None)\n aux_outputs['q_weights'] = aux_inputs['q_weights']\n aux_outputs['p_weights'] = aux_inputs['p_weights']\n\n for k, v in aux_outputs.items():\n if len(v.shape) == 5:\n aux_outputs[k] = F.interpolate(v[0],\n size=image.shape[-2:],\n mode='bilinear',\n align_corners=False)\n elif 'weights' in k:\n b, num_objects, num_heads, num_queries, h, w = v.shape\n v = v.view(num_objects * num_heads, num_queries, h, w)\n v = F.interpolate(v, size=image.shape[-2:], mode='bilinear', align_corners=False)\n aux_outputs[k] = v.view(num_objects, num_heads, num_queries, *image.shape[-2:])\n else:\n aux_outputs[k] = F.interpolate(v,\n size=image.shape[-2:],\n mode='bilinear',\n align_corners=False)[0]\n aux_outputs[k] = unpad(aux_outputs[k], pads)\n if 'weights' in k:\n weights = aux_outputs[k]\n weights = weights / (weights.max(-1, keepdim=True)[0].max(-2, keepdim=True)[0] +\n 1e-8)\n aux_outputs[k] = (weights * 255).cpu().numpy()\n else:\n aux_outputs[k] = (aux_outputs[k].softmax(dim=0) * 255).cpu().numpy()\n\n self.image_feature_store.delete(self.curr_ti)\n return aux_outputs\n\n def get_aux_object_weights(self, image: torch.Tensor) -> np.ndarray:\n image, pads = pad_divide_by(image, 16)\n # B*num_objects*H*W*num_queries -> num_objects*num_queries*H*W\n # weights = F.softmax(self.obj_logits, dim=-1)[0]\n weights = F.sigmoid(self.obj_logits)[0]\n weights = weights.permute(0, 3, 1, 2).contiguous()\n weights = F.interpolate(weights,\n size=image.shape[-2:],\n mode='bilinear',\n align_corners=False)\n # weights = weights / (weights.max(-1, keepdim=True)[0].max(-2, keepdim=True)[0])\n weights = unpad(weights, pads)\n weights = (weights * 255).cpu().numpy()\n return weights"
},
{
"identifier": "ResultSaver",
"path": "cutie/inference/utils/results_utils.py",
"snippet": "class ResultSaver:\n def __init__(self,\n output_root,\n video_name,\n *,\n dataset,\n object_manager: ObjectManager,\n use_long_id,\n palette=None,\n save_mask=True,\n save_scores=False,\n score_output_root=None,\n visualize_output_root=None,\n visualize=False,\n init_json=None):\n self.output_root = output_root\n self.video_name = video_name\n self.dataset = dataset.lower()\n self.use_long_id = use_long_id\n self.palette = palette\n self.object_manager = object_manager\n self.save_mask = save_mask\n self.save_scores = save_scores\n self.score_output_root = score_output_root\n self.visualize_output_root = visualize_output_root\n self.visualize = visualize\n\n if self.visualize:\n if self.palette is not None:\n self.colors = np.array(self.palette, dtype=np.uint8).reshape(-1, 3)\n else:\n self.colors = davis_palette_np\n\n self.need_remapping = True\n self.json_style = None\n self.id2rgb_converter = ID2RGBConverter()\n\n if 'burst' in self.dataset:\n assert init_json is not None\n self.input_segmentations = init_json['segmentations']\n self.segmentations = [{} for _ in init_json['segmentations']]\n self.annotated_frames = init_json['annotated_image_paths']\n self.video_json = {k: v for k, v in init_json.items() if k != 'segmentations'}\n self.video_json['segmentations'] = self.segmentations\n self.json_style = 'burst'\n\n self.queue = Queue(maxsize=10)\n self.thread = Thread(target=save_result, args=(self.queue, ))\n self.thread.daemon = True\n self.thread.start()\n\n def process(self,\n prob: torch.Tensor,\n frame_name: str,\n resize_needed: bool = False,\n shape: Optional[Tuple[int, int]] = None,\n last_frame: bool = False,\n path_to_image: str = None):\n\n if resize_needed:\n prob = F.interpolate(prob.unsqueeze(1), shape, mode='bilinear', align_corners=False)[:,\n 0]\n # Probability mask -> index mask\n mask = torch.argmax(prob, dim=0)\n if self.save_scores:\n # also need to pass prob\n prob = prob.cpu()\n else:\n prob = None\n\n # remap indices\n if self.need_remapping:\n new_mask = torch.zeros_like(mask)\n for tmp_id, obj in self.object_manager.tmp_id_to_obj.items():\n new_mask[mask == tmp_id] = obj.id\n mask = new_mask\n\n args = ResultArgs(saver=self,\n prob=prob,\n mask=mask.cpu(),\n frame_name=frame_name,\n path_to_image=path_to_image,\n tmp_id_to_obj=copy.deepcopy(self.object_manager.tmp_id_to_obj),\n obj_to_tmp_id=copy.deepcopy(self.object_manager.obj_to_tmp_id),\n last_frame=last_frame)\n\n self.queue.put(args)\n\n def end(self):\n self.queue.put(None)\n self.queue.join()\n self.thread.join()"
},
{
"identifier": "image_to_torch",
"path": "gui/interactive_utils.py",
"snippet": "def image_to_torch(frame: np.ndarray, device: str = 'cuda'):\n # frame: H*W*3 numpy array\n frame = frame.transpose(2, 0, 1)\n frame = torch.from_numpy(frame).float().to(device, non_blocking=True) / 255\n return frame"
},
{
"identifier": "index_numpy_to_one_hot_torch",
"path": "gui/interactive_utils.py",
"snippet": "def index_numpy_to_one_hot_torch(mask: np.ndarray, num_classes: int):\n mask = torch.from_numpy(mask).long()\n return F.one_hot(mask, num_classes=num_classes).permute(2, 0, 1).float()"
}
] | from os import path, listdir
from omegaconf import DictConfig, open_dict
from hydra import compose, initialize
from cutie.model.cutie import CUTIE
from cutie.inference.inference_core import InferenceCore
from cutie.inference.utils.results_utils import ResultSaver
from tqdm import tqdm
from time import perf_counter
from gui.interactive_utils import image_to_torch, index_numpy_to_one_hot_torch
from PIL import Image
from argparse import ArgumentParser
import torch
import cv2
import numpy as np | 7,943 |
def process_video(cfg: DictConfig):
# general setup
torch.set_grad_enabled(False)
if cfg['device'] == 'cuda' and torch.cuda.is_available():
device = 'cuda'
elif cfg['device'] == 'mps' and torch.backends.mps.is_available():
device = 'mps'
else:
device = 'cpu'
print(f'Using device: {device}')
use_amp = cfg.amp
# Load the network weights
print(f'Loading Cutie and weights')
cutie = CUTIE(cfg).to(device).eval()
if cfg.weights is not None:
model_weights = torch.load(cfg.weights, map_location=device)
cutie.load_weights(model_weights)
else:
print('No model weights loaded. Are you sure about this?')
# Open video
video = cfg['video']
if video is None:
print('No video defined. Please specify!')
exit()
video_name = path.splitext(video)[0]
print(f'Opening video {video}')
cap = cv2.VideoCapture(video)
if not cap.isOpened():
print(f'Unable to open video {video}!')
exit()
total_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Initial mask handling
mask_dir = cfg['mask_dir']
if mask_dir is None:
print('No mask_dir defined. Please specify!')
exit()
# determine if the mask uses 3-channel long ID or 1-channel (0~255) short ID
all_mask_frames = sorted(listdir(mask_dir))
first_mask_frame = all_mask_frames[0]
first_mask = Image.open(path.join(mask_dir, first_mask_frame))
if first_mask.mode == 'P':
use_long_id = False
palette = first_mask.getpalette()
elif first_mask.mode == 'RGB':
use_long_id = True
palette = None
elif first_mask.mode == 'L':
use_long_id = False
palette = None
else:
print(f'Unknown mode {first_mask.mode} in {first_mask_frame}.')
exit()
num_objects = cfg['num_objects']
if num_objects is None or num_objects < 1:
num_objects = len(np.unique(first_mask)) - 1
|
def process_video(cfg: DictConfig):
# general setup
torch.set_grad_enabled(False)
if cfg['device'] == 'cuda' and torch.cuda.is_available():
device = 'cuda'
elif cfg['device'] == 'mps' and torch.backends.mps.is_available():
device = 'mps'
else:
device = 'cpu'
print(f'Using device: {device}')
use_amp = cfg.amp
# Load the network weights
print(f'Loading Cutie and weights')
cutie = CUTIE(cfg).to(device).eval()
if cfg.weights is not None:
model_weights = torch.load(cfg.weights, map_location=device)
cutie.load_weights(model_weights)
else:
print('No model weights loaded. Are you sure about this?')
# Open video
video = cfg['video']
if video is None:
print('No video defined. Please specify!')
exit()
video_name = path.splitext(video)[0]
print(f'Opening video {video}')
cap = cv2.VideoCapture(video)
if not cap.isOpened():
print(f'Unable to open video {video}!')
exit()
total_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Initial mask handling
mask_dir = cfg['mask_dir']
if mask_dir is None:
print('No mask_dir defined. Please specify!')
exit()
# determine if the mask uses 3-channel long ID or 1-channel (0~255) short ID
all_mask_frames = sorted(listdir(mask_dir))
first_mask_frame = all_mask_frames[0]
first_mask = Image.open(path.join(mask_dir, first_mask_frame))
if first_mask.mode == 'P':
use_long_id = False
palette = first_mask.getpalette()
elif first_mask.mode == 'RGB':
use_long_id = True
palette = None
elif first_mask.mode == 'L':
use_long_id = False
palette = None
else:
print(f'Unknown mode {first_mask.mode} in {first_mask_frame}.')
exit()
num_objects = cfg['num_objects']
if num_objects is None or num_objects < 1:
num_objects = len(np.unique(first_mask)) - 1
| processor = InferenceCore(cutie, cfg=cfg) | 1 | 2023-10-19 17:49:24+00:00 | 12k |
MolecularAI/REINVENT4 | tests/chemistry/library_design/test_fragment_reactions_slice_enumerator.py | [
{
"identifier": "Conversions",
"path": "reinvent/chemistry/conversions.py",
"snippet": "class Conversions:\n @staticmethod\n def smiles_to_mols_and_indices(query_smiles: List[str]) -> Tuple[List[Mol], List[int]]:\n mols = [MolFromSmiles(smile) for smile in query_smiles]\n valid_mask = [mol is not None for mol in mols]\n valid_idxs = [idx for idx, is_valid in enumerate(valid_mask) if is_valid]\n valid_mols = [mols[idx] for idx in valid_idxs]\n return valid_mols, valid_idxs\n\n @staticmethod\n def mols_to_fingerprints(\n molecules: List[Mol], radius: int = 3, use_counts: bool = True, use_features: bool = True\n ) -> List[UIntSparseIntVect]:\n fingerprints = [\n AllChem.GetMorganFingerprint(\n mol, radius, useCounts=use_counts, useFeatures=use_features\n )\n for mol in molecules\n ]\n return fingerprints\n\n @staticmethod\n def smiles_to_mols(query_smiles: List[str]) -> List[Mol]:\n mols = [MolFromSmiles(smile) for smile in query_smiles]\n valid_mask = [mol is not None for mol in mols]\n valid_idxs = [idx for idx, is_valid in enumerate(valid_mask) if is_valid]\n valid_mols = [mols[idx] for idx in valid_idxs]\n return valid_mols\n\n def smiles_to_fingerprints(\n self, query_smiles: List[str], radius=3, use_counts=True, use_features=True\n ) -> List[UIntSparseIntVect]:\n mols = self.smiles_to_mols(query_smiles)\n fingerprints = self.mols_to_fingerprints(\n mols, radius=radius, use_counts=use_counts, use_features=use_features\n )\n return fingerprints\n\n def smile_to_mol(self, smile: str) -> Mol:\n \"\"\"\n Creates a Mol object from a SMILES string.\n :param smile: SMILES string.\n :return: A Mol object or None if it's not valid.\n \"\"\"\n if smile:\n return MolFromSmiles(smile)\n\n def mols_to_smiles(\n self, molecules: List[Mol], isomericSmiles=False, canonical=True\n ) -> List[str]:\n \"\"\"This method assumes that all molecules are valid.\"\"\"\n valid_smiles = [\n MolToSmiles(mol, isomericSmiles=isomericSmiles, canonical=canonical)\n for mol in molecules\n ]\n return valid_smiles\n\n def mol_to_smiles(self, molecule: Mol, isomericSmiles=False, canonical=True) -> str:\n \"\"\"\n Converts a Mol object into a canonical SMILES string.\n :param molecule: Mol object.\n :return: A SMILES string.\n \"\"\"\n if molecule:\n return MolToSmiles(molecule, isomericSmiles=isomericSmiles, canonical=canonical)\n\n def mol_to_random_smiles(self, molecule: Mol) -> str:\n \"\"\"\n Converts a Mol object into a random SMILES string.\n :return: A SMILES string.\n \"\"\"\n if molecule:\n new_atom_order = list(range(molecule.GetNumAtoms()))\n random.shuffle(new_atom_order)\n random_mol = RenumberAtoms(molecule, newOrder=new_atom_order)\n return MolToSmiles(random_mol, canonical=False, isomericSmiles=False)\n\n def convert_to_rdkit_smiles(\n self, smiles: str, allowTautomers=True, sanitize=False, isomericSmiles=False\n ) -> str:\n \"\"\"\n :param smiles: Converts a smiles string into a canonical SMILES string.\n :type allowTautomers: allows having same molecule represented in different tautomeric forms\n \"\"\"\n if allowTautomers:\n return MolToSmiles(\n MolFromSmiles(smiles, sanitize=sanitize), isomericSmiles=isomericSmiles\n )\n else:\n return MolStandardize.canonicalize_tautomer_smiles(smiles)\n\n def convert_to_standardized_smiles(self, smiles: str) -> Optional[str]:\n \"\"\"Standardize SMILES for Mol2Mol\n\n This should only be used to validate and transform user input\n because the code will abort execution on any error it finds.\n\n param smiles: single SMILES string\n return: single SMILES string\n \"\"\"\n\n mol = MolFromSmiles(smiles, sanitize=True)\n\n if not mol: # RDKit fails silently\n raise RuntimeError(f\"RDKit does not accept SMILES: {smiles}\")\n\n standardizer = Standardizer() # MolVS\n\n try:\n smol = standardizer(mol) # runs SanitizeMol() first\n smol = standardizer.charge_parent(smol) # largest fragment uncharged\n smi = MolToSmiles(smol, isomericSmiles=True)\n except Exception as error: # RDKit may raise multiple exceptions\n raise RuntimeError(f\"RDKit does not accept SMILES: {smiles} {error}\")\n\n # Sometimes when standardizing ChEMBL [H] are not removed so try a\n # second call\n if \"[H]\" in smi:\n return self.convert_to_standardized_smiles(smi)\n else:\n return smi\n\n def copy_mol(self, molecule: Mol) -> Mol:\n \"\"\"\n Copies, sanitizes, canonicalizes and cleans a molecule.\n :param molecule: A Mol object to copy.\n :return : Another Mol object copied, sanitized, canonicalized and cleaned.\n \"\"\"\n return self.smile_to_mol(self.mol_to_smiles(molecule))\n\n def randomize_smiles(self, smiles: str) -> str:\n \"\"\"\n Returns a random SMILES given a SMILES of a molecule.\n :param smiles: A smiles string\n :returns: A random SMILES string of the same molecule or None if the molecule is invalid.\n \"\"\"\n mol = MolFromSmiles(smiles)\n if mol:\n new_atom_order = list(range(mol.GetNumHeavyAtoms()))\n random.shuffle(new_atom_order)\n random_mol = RenumberAtoms(mol, newOrder=new_atom_order)\n return MolToSmiles(random_mol, canonical=False, isomericSmiles=False)\n\n def mol_to_inchi_key(self, molecule: Mol) -> str:\n \"\"\"Returns the standard InChI key for a molecule\"\"\"\n if molecule:\n inchi_key = MolToInchiKey(molecule)\n return inchi_key\n\n def mol_to_sdf(self, molecules: List, input_sdf_path: str):\n \"\"\"Write a set of molecules to sdf file\"\"\"\n writer = SDWriter(input_sdf_path)\n for mol in molecules:\n writer.write(mol)"
},
{
"identifier": "AttachmentPoints",
"path": "reinvent/chemistry/library_design/attachment_points.py",
"snippet": "class AttachmentPoints:\n def __init__(self):\n self._conversions = Conversions()\n self._tokens = TransformationTokens()\n\n def add_attachment_point_numbers(self, mol_or_smi, canonicalize=True):\n \"\"\"\n Adds the numbers for the attachment points throughout the molecule.\n :param mol_or_smi: SMILES string to convert.\n :param canonicalize: Canonicalize the SMILES so that the attachment points are always in the same order.\n :return : A converted SMILES string.\n \"\"\"\n if isinstance(mol_or_smi, str):\n smi = mol_or_smi\n if canonicalize:\n smi = self._conversions.mol_to_smiles(self._conversions.smile_to_mol(mol_or_smi))\n # only add numbers ordered by the SMILES ordering\n num = -1\n\n def _ap_callback(_):\n nonlocal num\n num += 1\n return \"[{}:{}]\".format(self._tokens.ATTACHMENT_POINT_TOKEN, num)\n\n return re.sub(self._tokens.ATTACHMENT_POINT_REGEXP, _ap_callback, smi)\n else:\n mol = mol_or_smi\n if canonicalize:\n mol = self._conversions.smile_to_mol(self._conversions.mol_to_smiles(mol))\n idx = 0\n for atom in mol.GetAtoms():\n if atom.GetSymbol() == self._tokens.ATTACHMENT_POINT_TOKEN:\n atom.SetProp(\"molAtomMapNumber\", str(idx))\n idx += 1\n return self._conversions.mol_to_smiles(mol)\n\n def get_attachment_points(self, smile: str) -> List:\n \"\"\"\n Gets all attachment points from SMILES string.\n :param smile: A SMILES string\n :return : A list with the numbers ordered by appearance.\n \"\"\"\n return [\n int(match.group(1))\n for match in re.finditer(self._tokens.ATTACHMENT_POINT_NUM_REGEXP, smile)\n ]\n\n def get_attachment_points_for_molecule(self, molecule: Mol) -> List:\n \"\"\"\n Gets all attachment points from RDKit Mol.\n :param molecule: A Mol object.\n :return : A list with the numbers ordered by appearance.\n \"\"\"\n if isinstance(molecule, Mol):\n return [\n int(atom.GetProp(\"molAtomMapNumber\"))\n for atom in molecule.GetAtoms()\n if atom.GetSymbol() == self._tokens.ATTACHMENT_POINT_TOKEN\n and atom.HasProp(\"molAtomMapNumber\")\n ]\n\n def add_first_attachment_point_number(self, smi, num):\n \"\"\"\n Changes/adds a number to the first attachment point.\n :param smi: SMILES string with the molecule.\n :param num: Number to add.\n :return: A SMILES string with the number added.\n \"\"\"\n return re.sub(\n self._tokens.ATTACHMENT_POINT_REGEXP,\n \"[{}:{}]\".format(self._tokens.ATTACHMENT_POINT_TOKEN, num),\n smi,\n count=1,\n )\n\n def remove_attachment_point_numbers(self, smile: str) -> str:\n \"\"\"\n Removes the numbers for the attachment points throughout the molecule.\n :param smile: SMILES string.\n :return : A converted SMILES string.\n \"\"\"\n result = re.sub(\n self._tokens.ATTACHMENT_POINT_NUM_REGEXP,\n \"[{}]\".format(self._tokens.ATTACHMENT_POINT_TOKEN),\n smile,\n )\n return result\n\n def remove_attachment_point_numbers_from_mol(self, molecule: Mol) -> Mol:\n \"\"\"\n Removes the numbers for the attachment points throughout the molecule.\n :param molecule: RDKit molecule.\n :return : A molecule.\n \"\"\"\n if isinstance(molecule, Mol):\n for atom in molecule.GetAtoms():\n atom.ClearProp(\"molAtomMapNumber\")\n return molecule\n\n def add_brackets_to_attachment_points(self, scaffold: str):\n \"\"\"\n Adds brackets to the attachment points (if they don't have them).\n :param scaffold: SMILES string.\n :return: A SMILES string with attachments in brackets.\n \"\"\"\n return re.sub(\n self._tokens.ATTACHMENT_POINT_NO_BRACKETS_REGEXP,\n \"[{}]\".format(self._tokens.ATTACHMENT_POINT_TOKEN),\n scaffold,\n )"
},
{
"identifier": "BondMaker",
"path": "reinvent/chemistry/library_design/bond_maker.py",
"snippet": "class BondMaker:\n def __init__(self):\n self._conversions = Conversions()\n self._tokens = TransformationTokens()\n self._attachment_points = AttachmentPoints()\n\n def join_scaffolds_and_decorations(\n self, scaffold_smi: str, decorations_smi, keep_labels_on_atoms=False\n ) -> Optional[Mol]:\n decorations_smi = [\n self._attachment_points.add_first_attachment_point_number(dec, i)\n for i, dec in enumerate(decorations_smi.split(self._tokens.ATTACHMENT_SEPARATOR_TOKEN))\n ]\n num_attachment_points = len(self._attachment_points.get_attachment_points(scaffold_smi))\n if len(decorations_smi) != num_attachment_points:\n return None\n\n mol = self._conversions.smile_to_mol(scaffold_smi)\n for decoration in decorations_smi:\n mol = self.join_molecule_fragments(\n mol,\n self._conversions.smile_to_mol(decoration),\n keep_label_on_atoms=keep_labels_on_atoms,\n )\n if not mol:\n return None\n return mol\n\n def join_molecule_fragments(self, scaffold: Mol, decoration: Mol, keep_label_on_atoms=False):\n \"\"\"\n Joins a RDKit MOL scaffold with a decoration. They must be labelled.\n :param scaffold: RDKit MOL of the scaffold.\n :param decoration: RDKit MOL of the decoration.\n :param keep_label_on_atoms: Add the labels to the atoms after attaching the molecule.\n This is useful when debugging, but it can give problems.\n :return: A Mol object of the joined scaffold.\n \"\"\"\n\n if scaffold and decoration:\n # obtain id in the decoration\n try:\n attachment_points = [\n atom.GetProp(\"molAtomMapNumber\")\n for atom in decoration.GetAtoms()\n if atom.GetSymbol() == self._tokens.ATTACHMENT_POINT_TOKEN\n ]\n if len(attachment_points) != 1:\n return None # more than one attachment point...\n attachment_point = attachment_points[0]\n except KeyError:\n return None\n\n combined_scaffold = RWMol(CombineMols(decoration, scaffold))\n attachments = [\n atom\n for atom in combined_scaffold.GetAtoms()\n if atom.GetSymbol() == self._tokens.ATTACHMENT_POINT_TOKEN\n and atom.HasProp(\"molAtomMapNumber\")\n and atom.GetProp(\"molAtomMapNumber\") == attachment_point\n ]\n if len(attachments) != 2:\n return None # something weird\n\n neighbors = []\n for atom in attachments:\n if atom.GetDegree() != 1:\n return None # the attachment is wrongly generated\n neighbors.append(atom.GetNeighbors()[0])\n\n bonds = [atom.GetBonds()[0] for atom in attachments]\n bond_type = BondType.SINGLE\n if any(bond for bond in bonds if bond.GetBondType() == BondType.DOUBLE):\n bond_type = BondType.DOUBLE\n\n combined_scaffold.AddBond(neighbors[0].GetIdx(), neighbors[1].GetIdx(), bond_type)\n combined_scaffold.RemoveAtom(attachments[0].GetIdx())\n combined_scaffold.RemoveAtom(attachments[1].GetIdx())\n\n if keep_label_on_atoms:\n for neigh in neighbors:\n self._add_attachment_point_num(neigh, attachment_point)\n\n # Label the atoms in the bond\n bondNumbers = [\n int(atom.GetProp(\"bondNum\"))\n for atom in combined_scaffold.GetAtoms()\n if atom.HasProp(\"bondNum\")\n ]\n\n if bondNumbers:\n bondNum = max(bondNumbers) + 1\n else:\n bondNum = 0\n\n for neighbor in neighbors:\n idx = neighbor.GetIdx()\n atom = combined_scaffold.GetAtomWithIdx(idx)\n atom.SetIntProp(\"bondNum\", bondNum)\n ##########################################\n\n scaffold = combined_scaffold.GetMol()\n try:\n SanitizeMol(scaffold)\n except ValueError: # sanitization error\n return None\n else:\n return None\n\n return scaffold\n\n def _add_attachment_point_num(self, atom, idx):\n idxs = []\n if atom.HasProp(\"molAtomMapNumber\"):\n idxs = atom.GetProp(\"molAtomMapNumber\").split(\",\")\n idxs.append(str(idx))\n idxs = sorted(list(set(idxs)))\n atom.SetProp(\"molAtomMapNumber\", \",\".join(idxs))\n # Fixme: This way of annotating fails in case of several attachment points when the mol is converted back to a\n # SMILES string (RuntimeError: boost::bad_any_cast: failed conversion using boost::any_cast)\n # For example combining scaffold '*C(*)CC' and warhead pair '*OC|*C' would result in\n # C[O:0][CH:0,1]([CH3:1])CC, which results in an error due to the '0,1'\n\n def randomize_scaffold(self, scaffold: Mol):\n smi = self._conversions.mol_to_random_smiles(scaffold)\n conv_smi = None\n if smi:\n conv_smi = self._attachment_points.add_brackets_to_attachment_points(smi)\n return conv_smi"
},
{
"identifier": "FragmentReactionSliceEnumerator",
"path": "reinvent/chemistry/library_design/fragment_reaction_slice_enumerator.py",
"snippet": "class FragmentReactionSliceEnumerator:\n def __init__(\n self,\n chemical_reactions: List[ReactionDTO],\n scaffold_conditions: List[FilteringConditionDTO],\n decoration_conditions: List[FilteringConditionDTO],\n ):\n \"\"\"\n Class to enumerate slicings given certain conditions.\n :param chemical_reactions: A list of ChemicalReaction objects.\n :param scaffold_conditions: Conditions to use when filtering scaffolds obtained from slicing molecules (see FragmentFilter).\n :param decoration_conditions: Conditions to use when filtering decorations obtained from slicing molecules.\n \"\"\"\n self._tockens = TransformationTokens()\n self._chemical_reactions = chemical_reactions\n self._scaffold_filter = FragmentFilter(scaffold_conditions)\n self._decoration_filter = FragmentFilter(decoration_conditions)\n self._reactions = FragmentReactions()\n self._conversions = Conversions()\n\n def enumerate(self, molecule: Mol, cuts: int) -> List[FragmentedMolecule]:\n \"\"\"\n Enumerates all possible combination of slicings of a molecule given a number of cuts.\n :param molecule: A mol object with the molecule to slice.\n :param cuts: The number of cuts to perform.\n :return : A list with all the possible (scaffold, decorations) pairs as SlicedMol objects.\n \"\"\"\n original_smiles = self._conversions.mol_to_smiles(molecule)\n sliced_mols = set()\n for cut in range(1, cuts + 1):\n if cut == 1:\n fragment_pairs = self._reactions.slice_molecule_to_fragments(\n molecule, self._chemical_reactions\n )\n\n for pair in fragment_pairs:\n for indx, _ in enumerate(pair):\n decorations = self._select_all_except(pair, indx)\n decoration = self._conversions.copy_mol(decorations[0])\n labeled_decoration = OrderedDict()\n labeled_decoration[0] = decoration # [ for decoration in decorations]\n\n scaffold = self._conversions.copy_mol(pair[indx])\n labeled_scaffold = self._label_scaffold(scaffold)\n\n # TODO: filtering should take place after scaffold is generated\n sliced_mol = FragmentedMolecule(\n labeled_scaffold, labeled_decoration, original_smiles\n )\n if sliced_mol.original_smiles == sliced_mol.reassembled_smiles:\n sliced_mols.add(sliced_mol)\n else:\n for slice in sliced_mols:\n to_add = self._scaffold_slicing(slice, cut)\n sliced_mols = sliced_mols.union(to_add)\n\n return list(filter(self._filter, sliced_mols))\n\n def _scaffold_slicing(self, slice: FragmentedMolecule, cut: int) -> Set[FragmentedMolecule]:\n to_add = set()\n if slice.decorations_count() == cut - 1:\n fragment_pairs = self._reactions.slice_molecule_to_fragments(\n slice.scaffold, self._chemical_reactions\n )\n\n for pair in fragment_pairs:\n scaffold, decoration = self._split_scaffold_from_decorations(pair, cut)\n if scaffold:\n labeled_scaffold = self._label_scaffold(scaffold)\n labeled_scaffold = self._conversions.copy_mol(labeled_scaffold)\n decoration = self._conversions.copy_mol(decoration)\n sliced_mol = self._create_sliced_molecule(slice, labeled_scaffold, decoration)\n\n if sliced_mol.original_smiles == sliced_mol.reassembled_smiles:\n to_add.add(sliced_mol)\n return to_add\n\n def _select_all_except(self, fragments: Tuple[Mol], to_exclude: int) -> List[Mol]:\n return [fragment for indx, fragment in enumerate(fragments) if indx != to_exclude]\n\n def _filter(self, sliced_mol: FragmentedMolecule) -> bool:\n return self._scaffold_filter.filter(sliced_mol.scaffold) and all(\n self._decoration_filter.filter(dec) for dec in sliced_mol.decorations.values()\n )\n\n def _split_scaffold_from_decorations(self, pair: Tuple[Mol], cuts: int) -> Tuple[Mol, Mol]:\n decoration = None\n scaffold = None\n for frag in pair:\n num_att = len(\n [\n atom\n for atom in frag.GetAtoms()\n if atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN\n ]\n )\n # detect whether there is one fragment with as many attachment points as cuts (scaffold)\n # the rest are decorations\n if num_att == cuts and not scaffold:\n scaffold = frag\n if num_att == 1:\n decoration = frag\n if decoration and scaffold:\n return scaffold, decoration\n else:\n return (None, None)\n\n def _label_scaffold(self, scaffold: Mol) -> Mol:\n highest_number = self._find_highest_number(scaffold)\n\n for atom in scaffold.GetAtoms():\n if atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN:\n try:\n atom_number = int(atom.GetProp(\"molAtomMapNumber\"))\n except:\n highest_number += 1\n num = atom.GetIsotope()\n atom.SetIsotope(0)\n atom.SetProp(\"molAtomMapNumber\", str(highest_number))\n scaffold.UpdatePropertyCache()\n\n return scaffold\n\n def _find_highest_number(self, cut_mol: Mol) -> int:\n highest_number = -1\n\n for atom in cut_mol.GetAtoms():\n if atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN:\n try:\n atom_number = int(atom.GetProp(\"molAtomMapNumber\"))\n if highest_number < atom_number:\n highest_number = atom_number\n except:\n pass\n return highest_number\n\n def _create_sliced_molecule(\n self, original_sliced_mol: FragmentedMolecule, scaffold: Mol, decoration: Mol\n ) -> FragmentedMolecule:\n old_decorations = OrderedDict()\n for k, v in original_sliced_mol.decorations.items():\n old_decorations[k] = v\n old_decorations[original_sliced_mol.decorations_count()] = decoration\n sliced_mol = FragmentedMolecule(\n scaffold, old_decorations, original_sliced_mol.original_smiles\n )\n return sliced_mol"
},
{
"identifier": "FilteringConditionDTO",
"path": "reinvent/chemistry/library_design/dtos/filtering_condition_dto.py",
"snippet": "class FilteringConditionDTO:\n name: str\n min: float = None\n max: float = None\n equals: float = None"
},
{
"identifier": "MolecularDescriptorsEnum",
"path": "reinvent/chemistry/library_design/enums/molecular_descriptors_enum.py",
"snippet": "class MolecularDescriptorsEnum:\n HEAVY_ATOM_COUNT = \"heavy_atom_count\"\n MOLECULAR_WEIGHT = \"molecular_weight\"\n CLOGP = \"clogp\"\n HYDROGEN_BOND_DONORS = \"hydrogen_bond_donors\"\n HYDROGEN_BOND_ACCEPTORS = \"hydrogen_bond_acceptors\"\n ROTATABLE_BONDS = \"rotatable_bonds\"\n RING_COUNT = \"ring_count\""
},
{
"identifier": "FragmentReactions",
"path": "reinvent/chemistry/library_design/fragment_reactions.py",
"snippet": "class FragmentReactions:\n def __init__(self):\n self._conversions = Conversions()\n self._tokens = TransformationTokens()\n self._bond_mapper = BondMapper()\n\n def create_reactions_from_smarts(self, smarts: List[str]) -> List[ChemicalReaction]:\n reactions = [AllChem.ReactionFromSmarts(smirks) for smirks in smarts]\n return reactions\n\n def create_reaction_from_smirk(self, smirks: str) -> ReactionDTO:\n reaction = ReactionDTO(smirks, AllChem.ReactionFromSmarts(smirks))\n return reaction\n\n def create_reactions_from_smirks(self, smirks: List[str]) -> List[ReactionDTO]:\n reactions = [self.create_reaction_from_smirk(smirk) for smirk in smirks]\n return reactions\n\n def slice_molecule_to_fragments(\n self, molecule: Mol, reaction_dtos: List[ReactionDTO]\n ) -> List[Tuple[Mol]]:\n \"\"\"\n This method applies a list of chemical reactions on a molecule and\n decomposes the input molecule to complementary fragments.\n :param molecule:\n :param reaction_dtos:\n :return: Different slicing combinations are returned.\n \"\"\"\n list_of_outcomes = self.apply_reactions_on_molecule(molecule, reaction_dtos)\n all_outcomes = []\n\n for outcome in list_of_outcomes:\n all_outcomes.extend(outcome.reaction_outcomes)\n # TODO: the overall data processing is extremely slow. consider reducing redundancy here.\n return all_outcomes\n\n def apply_reactions_on_molecule(\n self, molecule: Mol, reaction_dtos: List[ReactionDTO]\n ) -> List[ReactionOutcomeDTO]:\n \"\"\"Build list of possible splits of a molecule given multiple reactions.\"\"\"\n list_of_outcomes = []\n for reaction_dto in reaction_dtos:\n outcome_dto = self.apply_reaction_on_molecule(molecule, reaction_dto)\n purged_outcome_dto = self._filter_pairs_with_no_ring_count_change(outcome_dto)\n list_of_outcomes.append(purged_outcome_dto)\n return list_of_outcomes\n\n def apply_reaction_on_molecule(\n self, molecule: Mol, reaction_dto: ReactionDTO\n ) -> ReactionOutcomeDTO:\n \"\"\"Build list of possible splits of a molecule given a single reaction.\"\"\"\n molecule = self._conversions.copy_mol(molecule)\n outcomes = reaction_dto.chemical_reaction.RunReactant(molecule, 0)\n outcome_dto = ReactionOutcomeDTO(reaction_dto.reaction_smarts, list(outcomes), molecule)\n return outcome_dto\n\n def _filter_pairs_with_no_ring_count_change(\n self, outcome_dto: ReactionOutcomeDTO\n ) -> ReactionOutcomeDTO:\n molecule_rings = RingCount(outcome_dto.targeted_molecule)\n acceptable_pairs = []\n for pair in outcome_dto.reaction_outcomes:\n if not self._detect_ring_break(molecule_rings, pair) and len(pair) == 2:\n acceptable_pairs.append(pair)\n outcome_dto.reaction_outcomes = acceptable_pairs\n return outcome_dto\n\n def _detect_ring_break(self, molecule_ring_count: int, pair: Tuple[Mol]) -> bool:\n reagent_rings = 0\n for reagent in pair:\n reagent_smiles = self._conversions.mol_to_smiles(reagent)\n reagent_mol = self._conversions.smile_to_mol(reagent_smiles)\n try:\n reagent_rings = reagent_rings + RingCount(reagent_mol)\n except:\n return True\n return molecule_ring_count != reagent_rings"
},
{
"identifier": "FRAGMENT_REACTION_SUZUKI",
"path": "tests/chemistry/library_design/fixtures.py",
"snippet": "FRAGMENT_REACTION_SUZUKI = [\"[*;$(c2aaaaa2),$(c2aaaa2):1]-!@[*;$(c2aaaaa2),$(c2aaaa2):2]>>[*:1][*].[*:2][*]\"]"
},
{
"identifier": "FRAGMENT_REACTIONS",
"path": "tests/chemistry/library_design/fixtures.py",
"snippet": "FRAGMENT_REACTIONS = [\n'[#6;$(C[C;$(C([#6]))]):4]-!@[N;$([NH1;D2](C)C);!$(N-[#6]=[*]);$(N([C])):3]>>[#6:4][*].[N:3][*]',\n'[C;$([CH;$(C([#6])[#6])]),$([CH2;$(C[#6])]):1]-!@[N;$(N(C=O)C=O):2]>>[*:1][*].[*:2][*]',\n'[C;$([CH;$(C([#6])[#6])]),$([CH2;$(C[#6])]):1]-!@[O;$(Oc1ccccc1):2]>>[*:1][*].[*:2][*]',\n'[C;$([CH;$(C([#6])[#6])]),$([CH2;$(C[#6])]):1]-!@[N;$(N([#6])S(=O)=O):2]>>[*:1][*].[*:2][*]',\n'[S;$(S(=O)(=O)[C,N]):1]-!@[N+0;$(NC):2]>>[*:1][*].[*:2][*]',\n'[N;$(N-[#6]):3]-!@[C;$(C=O):1]-!@[N+0;$(N[#6]);!$(N=*);!$([N-]);!$(N#*);!$([ND1]);!$(N[O,N]):2]>>[*:1][*].[*:2][*]',\n'[#6;!$([#6]=*);!$([#6]~[O,N,S]);$([#6]~[#6]):1][c:2]>>[*:2][*].[*:1][*]',\n'[#6;$(C=[#6!H0]):1][C;$(C#N):2]>>[*:1][*].[*][*:2]',\n'[#6:1]([N+]([O-])=O)=[#6:2]>>[*:1][*][N+]([O-])=O.[*:2][*]',\n'[#6;!$(A(A=[O,S]));!$(A=*);!$([A-]);!$(A~[P,S,O,N]):3][C:1](=[#7:2])[N!H0;!$(A(A=[O,S]));!$(A=*);!$([A-]);!$(A~[P,S,O,N]):4]>>[#6:3][C:1]([*])=[N:2].[#7!H0:4][*]',\n'[#6;!$(C(C=*)(C=*));!$([#6]~[O,N,S]);$([#6]~[#6]):1][C:2](=[O:3])[N;D2;$(N(C=[O,S]));!$(N~[O,P,S,N]):4][#6;!$(C=*);!$([#6](~[O,N,S])N);$([#6]~[#6]):5]>>[#6:1][C:2](=[O:3])[*].[*][N:4][#6:5]',\n'[#6;!R;!$(C=*);!$([#6]~[O,N,S]);$([#6]~[#6]):1][#6;!R;!$(C=*);!$([#6]~[O,N,S]);$([#6]~[#6]):2]>>[#6:1][*].[#6:2][*]',\n'[N;!H0:1]([C:2]([#7:5][#6:6])=[#8:3])[#6:4]>>[#8:3]=[C:2]([#7:1][#6:4])[*].[*][#7:5][#6:6]',\n'[#6;!$(C(C=*)(C=*));!$([#6]~[O,N,S]);$([#6]~[#6]):1][C:2](=[O:3])[N;D2;$(N(C=[O,S]));!$(N~[O,P,S,N]):4][#6;!$(C=*);!$([#6](~[O,N,S])N);$([#6]~[#6]):5]>>[#6:1][C:2](=[O:3])[*].[*][N:4][#6:5]',\n'[#6;!$([#6]=*);!$([#6]~[O,N,S,P]);$([#6]~[#6]):2]-!@[#6;!$([#6]=*);!$([#6]~[O,N,S,P]);$([#6]~[#6]):1]>>[#6;$([#6]~[#6]);!$([#6]~[S,N,O,P]):1][*].[*][#6;$([#6]~[#6]);!$([#6]~[S,N,O,P]):2]',\n'[CH2;$([#6]~[#6]):4]-!@[O:3]-!@[#6;$([#6]~[#6]);!$([#6]=O):2]>>[#6;$([#6]~[#6]);!$([#6]=O):2][#8][*].[*][#6;H2;$([#6]~[#6]):4]',\n'[*;$(c2aaaaa2),$(c2aaaa2):1]-!@[*;$(c2aaaaa2),$(c2aaaa2):2]>>[*:1][*].[*:2][*]',\n'[*;$(c2aaaaa2),$(c2aaaa2):4]/[#6:1]=!@[#6:2]/[*;$(c2aaaaa2),$(c2aaaa2):3]>>[#6;c,$(C(=O)O),$(C#N):3][#6;H1:2]=[#6;H1:1][*].[#6;$([#6]=[#6]),$(c:c):4][*]',\n'[#6:4][#6;H0:1]=!@[#6:2]([#6:5])[#6:3]>>[#6;c,$(C(=O)O),$(C#N):3][#6:2]([#6:5])=[#6;$([#6][#6]):1][*].[#6;$([#6]=[#6]),$(c:c):4][*]',\n'[*;$(c);$(C=C-[#6]),$(c):1]-!@[*;$(c):2]>>[#6;$(C=C-[#6]),$(c):1][*].[*][*;$(c):2]',\n'[C;$(C([#6])[#6]):1]([#6:5])([#6:2])([O;H1:3])[#6;!R:4]>>[#6:2][#6:1](*)([#6:5])[O:3].[*][#6:4]',\n'[#6;$(C=C-[#6]),$(c:c):1]-!@[C;$(C#CC):2]>>[#6;$(C=C-[#6]),$(c:c):1][*].[*][CH1;$(C#CC):2]',\n'[c;$(c1:[c,n]:[c,n]:[c,n]:[c,n]:[c,n]:1):1]-!@[N;$(NC)&!$(N=*)&!$([N-])&!$(N#*)&!$([ND1])&!$(N[O])&!$(N[C,S]=[S,O,N]),H2&$(Nc1:[c,n]:[c,n]:[c,n]:[c,n]:[c,n]:1):2]>>[*][c;$(c1:[c,n]:[c,n]:[c,n]:[c,n]:[c,n]:1):1].[*][N:2]',\n'[*;!$(c1ccccc1);$(c1[n,c]c[n,c]c[n,c]1):1]-!@[N;$(NC);!$(N=*);!$([N-]);!$(N#*);!$([ND3]);!$([ND4]);!$(n[c,O]);!$(N[C,S]=[S,O,N]):2]>>[*;!$(c1ccccc1);$(c1[n,c]c[n,c]c[n,c]1):1][*].[*][N:2]',\n'[*;$(c1c(N(~O)~O)cccc1):1]-!@[N;$(NC);!$(N=*);!$([N-]);!$(N#*);!$([ND1]);!$(N[O]);!$(N[C,S]=[S,O,N]):2]>>[*;$(c1c(N(~O)~O)cccc1):1][*].[*][N:2]',\n'[*;$(c1ccc(N(~O)~O)cc1):1]-!@[N;$(NC);!$(N=*);!$([N-]);!$(N#*);!$([ND1]);!$(N[O]);!$(N[C,S]=[S,O,N]):2]>>[*;$(c1ccc(N(~O)~O)cc1):1][*].[*][N:2]',\n'[#6;!$([#6]=*);!$([#6]~[O,N,S]);$([#6]~[#6]):1][#6;!$([#6]=*);!$([#6]~[O,N,S]);$([#6]~[#6]):2]>>[#6;!$([#6]=*);!$([#6]~[O,N,S]);$([#6]~[#6]):1][*].[#6;!$([#6]=*);!$([#6]~[O,N,S]);$([#6]~[#6]):2][*]',\n'[C:2]([#7;!D4:1])(=[O:3])[#6:4]>>[#7:1][*].[C,$(C=O):2](=[O:3])([*])[#6:4]',\n'[#6;$(C(=O)):1][#7,#8,#16:2]>>[*:1][*].[*:2][*]',\n'[O:2]=[#6:1][#7:5]>>[O:2]=[#6:1][*].[N:5][*]',\n'[#6;$(C=[O]):1][#8,#16:2]>>[*:1][*].[*][*:2]',\n'[N;!$(n1****1);!$(n1*****1);!$(N=*);!$(N(A=A));!$([N-]);!$(N~[O,P,S,N]):1]-!@[#6;!$(C=*);!$(C(A=A));!$([C-]);!$(C~[O,P,S]):2]>>[N:1][*].[*][#6:2]',\n'[#6:8][O:7][C:5](=[O:6])[C:4]([C:2](=[O:3])[#6:1])[#6:9]>>[#6:1][C:2]([C:4]([*])[C:5]([O:7][#6:8])=[O:6])=[O:3].[#6:9][*]',\n'[#6:1][C:2]([#6:7])[C:3](=[O:4])[O:5][#6:6]>>[C;!H0:2]([*])([C:3]([O:5][#6:6])=[O:4])[#6:1].[#6:7][*]',\n'[N;!$(n1****1);!$(n1*****1);!$(N(A=A));!$(N=*);!$([N-]);!$(N~[O,P,S,N]):1][*;$(c1aaaaa1),$(c1aaaa1);!$(C=*);!$(C(A=A));!$([C-]);!$(C~[O,P,S]):2]>>[N:1][*].[#6:2][*]',\n'[C:3]([C:1]([#8:5][#6:6])=[O:2])[#6:7]=[O:8]>>[#6:6][#8:5][C:1](=[O:2])[C!H0:3][*].[#6:7](=[O:8])[*]',\n'[N+:1]([#6:2])([#6:4])([#6:5])[#6:3]>>[N;!$(N=*);!$([N-]);!$(N~[O,P,S,N]):1]([#6:2])([#6:3])([*])[#6:4].[*][#6:5]',\n# '[c:1][C,N,S,O:2]>>[c:1][*].[*:2]'\n]"
},
{
"identifier": "CELECOXIB",
"path": "tests/chemistry/fixtures/test_data.py",
"snippet": "CELECOXIB = 'O=S(=O)(c3ccc(n1nc(cc1c2ccc(cc2)C)C(F)(F)F)cc3)N'"
}
] | import unittest
from reinvent.chemistry import Conversions
from reinvent.chemistry.library_design import (
FragmentReactionSliceEnumerator,
BondMaker,
AttachmentPoints,
)
from reinvent.chemistry.library_design.dtos import FilteringConditionDTO
from reinvent.chemistry.library_design.enums import MolecularDescriptorsEnum
from reinvent.chemistry.library_design.fragment_reactions import FragmentReactions
from tests.chemistry.library_design.fixtures import FRAGMENT_REACTION_SUZUKI, FRAGMENT_REACTIONS
from tests.chemistry.fixtures.test_data import CELECOXIB | 9,913 |
class TestSingleFragmentReactionsSliceEnumerator(unittest.TestCase):
def setUp(self):
self.chemistry = Conversions()
self.reactions = FragmentReactions()
self._bond_maker = BondMaker()
self._attachment_points = AttachmentPoints()
self._suzuki_reaction_dto_list = self.reactions.create_reactions_from_smirks(
|
class TestSingleFragmentReactionsSliceEnumerator(unittest.TestCase):
def setUp(self):
self.chemistry = Conversions()
self.reactions = FragmentReactions()
self._bond_maker = BondMaker()
self._attachment_points = AttachmentPoints()
self._suzuki_reaction_dto_list = self.reactions.create_reactions_from_smirks( | FRAGMENT_REACTION_SUZUKI | 7 | 2023-10-20 06:43:16+00:00 | 12k |
jhejna/cpl | research/algs/off_policy_algorithm.py | [
{
"identifier": "ReplayBuffer",
"path": "research/datasets/replay_buffer/buffer.py",
"snippet": "class ReplayBuffer(torch.utils.data.IterableDataset):\n \"\"\"\n Generic Replay Buffer Class.\n\n This class adheres to the following conventions to support multiprocessing:\n 1. Variables/functions starting with \"_\", like \"_help\" are to be used only by the replay buffer internaly. They\n are carefully setup for multiprocesing.\n 2. variables/functions named regularly without a leading \"_\" are to be used by the main thread. This includes\n standard functions like \"add\".\n\n There are a few critical setup options.\n 1. Capacity: determines if the buffer is setup upon creation. If it is set to a known value, then we can add data\n online with `add`, or by pulling more data from disk. If is set to None, the dataset is initialized to the full\n size of the offline dataset.\n 2. path: path to offline data that will be loaded\n 3. _data_generator\n\n Some options are mutually exclusive. For example, it is bad to use a non-distributed layout with\n workers and online data. This will generate a bunch of copy on writes.\n\n Data is expected to be stored in a \"next\" format. This means that data is stored like this:\n s_0, dummy, dummy, dummy\n s_1, a_0 , r_0 , d_0\n s_2, a_1 , r_1 , d_1\n s_3, a_2 , r_2 , d_2 ... End of episode!\n s_0, dummy, dummy, dummy\n s_1, a_0 , r_0 , d_0\n s_2, a_1 , r_1 , d_1\n\n This format is expected from the load(path) funciton.\n\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n sample_fn: Union[str, Callable] = \"sample\",\n sample_kwargs: Optional[Dict] = None,\n epoch_ratio: float = 1.0,\n path: Optional[str] = None,\n capacity: Optional[int] = None,\n exclude_keys: Optional[List[str]] = None,\n include_keys: Optional[Dict] = None,\n stacked_obs: bool = False,\n stacked_action: bool = False,\n distributed: bool = False,\n fetch_every: int = 1000,\n cleanup: bool = True,\n ) -> None:\n # Remove stacking if present.\n self.stacked_obs = stacked_obs\n if self.stacked_obs:\n observation_space = remove_stack_dim(observation_space)\n self.stacked_action = stacked_action\n if self.stacked_action:\n action_space = remove_stack_dim(action_space)\n\n self.observation_space = observation_space\n self.action_space = action_space\n\n # Construct the space for the buffer\n self.exclude_keys = [] if exclude_keys is None else exclude_keys # keys to exclude in the storage buffer\n buffer_space = {\n \"obs\": self.observation_space,\n \"action\": self.action_space,\n \"reward\": 0.0,\n \"done\": False,\n \"discount\": 1.0,\n }\n flattened_buffer_space = utils.flatten_dict(buffer_space)\n if include_keys is not None:\n flattened_buffer_space.update(include_keys)\n print(\"FLATTENED BUFFER SPACE\", flattened_buffer_space)\n for k in self.exclude_keys:\n if k in flattened_buffer_space:\n del flattened_buffer_space[k]\n self.buffer_space = utils.nest_dict(flattened_buffer_space)\n\n self.dummy_action = self.action_space.sample()\n self.capacity = capacity\n\n # Setup the sampler\n if isinstance(sample_fn, str):\n sample_fn = vars(sampling)[sample_fn]\n # Use functools partial to override the default args.\n sample_kwargs = {} if sample_kwargs is None else sample_kwargs\n self.sample_fn = functools.partial(sample_fn, **sample_kwargs)\n # Add sampling parameters\n self.epoch_ratio = epoch_ratio\n\n # Path for preloaded data\n self.path = path\n\n # Setup based on distributed value\n self.distributed = distributed\n if self.distributed:\n self.cleanup = cleanup\n self.fetch_every = fetch_every\n if self.capacity is not None:\n self.storage_path = tempfile.mkdtemp(prefix=\"replay_buffer_\")\n print(\"[research] Replay Buffer Storage Path\", self.storage_path)\n self.current_ep = utils.nest_dict({k: list() for k in flattened_buffer_space.keys()})\n self.num_episodes = 0\n else:\n self._alloc(self.capacity) # Alloc immediately\n\n def _alloc(self, capacity):\n # Create the data generator\n self._current_data_generator = self._data_generator()\n\n if capacity is None:\n # Allocte the entire dataset\n data = utils.concatenate(*list(self._current_data_generator), dim=0)\n self._storage = storage.FixedStorage(data)\n else:\n # Construct the buffer space. Remember to exclude any exclude keys\n self._storage = storage.CircularStorage(self.buffer_space, capacity)\n # Fill the storage.\n # if self.path is not None:\n for data in self._current_data_generator:\n self._storage.extend(data)\n if self._storage.size >= self._storage.capacity:\n break\n\n print(\"[ReplayBuffer] Allocated {:.2f} GB\".format(self._storage.bytes / 1024**3))\n\n def _data_generator(self):\n \"\"\"\n Can be overridden in order to load the initial data differently.\n By default assumes the data to be the standard format, and returned as a data dictionary.\n or\n None\n\n This function can be overriden by sub-classes in order to produce data batches.\n It should do the following:\n 1. split data across torch data workers\n 2. randomize the order of data\n 3. yield data of the form dicts\n \"\"\"\n if self.path is None:\n return\n\n # By default get all of the file names that are distributed at the correct index\n worker_info = torch.utils.data.get_worker_info()\n num_workers = 1 if worker_info is None else worker_info.num_workers\n worker_id = 0 if worker_info is None else worker_info.id\n\n ep_filenames = [os.path.join(self.path, f) for f in os.listdir(self.path) if f.endswith(\".npz\")]\n random.shuffle(ep_filenames) # Shuffle all the filenames\n\n if num_workers > 1 and len(ep_filenames) == 1:\n print(\n \"[ReplayBuffer] Warning: using multiple workers but single replay file. Reduce memory usage by sharding\"\n \" data with `save` instead of `save_flat`.\"\n )\n elif num_workers > 1 and len(ep_filenames) < num_workers:\n print(\"[ReplayBuffer] Warning: using more workers than dataset files.\")\n\n for ep_filename in ep_filenames:\n ep_idx, _ = [int(x) for x in os.path.splitext(ep_filename)[0].split(\"_\")[-2:]]\n # Spread loaded data across workers if we have multiple workers and files.\n if ep_idx % num_workers != worker_id and len(ep_filenames) > 1:\n continue # Only yield the files belonging to this worker.\n data = storage.load_data(ep_filename, exclude_keys=self.exclude_keys)\n yield data\n\n def _fetch_offline(self) -> int:\n \"\"\"\n This simple function fetches a new episode from the offline dataset and adds it to the buffer.\n This is done for each worker.\n \"\"\"\n try:\n data = next(self._current_data_generator)\n except StopIteration:\n self._current_data_generator = self._data_generator()\n data = next(self._current_data_generator)\n self._storage.extend(data)\n # Return the fetched size\n return len(data[\"done\"]) # data must have the done key for storage\n\n def _fetch_online(self) -> int:\n worker_info = torch.utils.data.get_worker_info()\n assert worker_info is not None, \"Must use distributed buffer for online fetching.\"\n\n ep_filenames = sorted([os.path.join(self.storage_path, f) for f in os.listdir(self.storage_path)], reverse=True)\n fetched_size = 0\n for ep_filename in ep_filenames:\n ep_idx, ep_len = [int(x) for x in os.path.splitext(ep_filename)[0].split(\"_\")[-2:]]\n if ep_idx % worker_info.num_workers != worker_info.id:\n continue\n if ep_filename in self._episode_filenames:\n break # We found something we have already loaded\n if fetched_size + ep_len > self._storage.capacity:\n break # do not fetch more than the size of the replay buffer\n\n data = storage.load_data(ep_filename, exclude_keys=self.exclude_keys)\n self._storage.extend(data)\n self._episode_filenames.add(ep_filename)\n if self.cleanup:\n try:\n os.remove(ep_filename)\n except OSError:\n pass\n\n return fetched_size\n\n def _get_dummy_transition(self, obs):\n flattened_buffer_space = utils.flatten_dict(self.buffer_space)\n dummy_transition = {\n k: v.sample() if isinstance(v, gym.Space) else v\n for k, v in flattened_buffer_space.items()\n if not k.startswith(\"obs\") and not k.startswith(\"action\")\n }\n dummy_transition = utils.nest_dict(dummy_transition)\n dummy_transition[\"obs\"] = obs\n dummy_transition[\"action\"] = self.dummy_action\n return dummy_transition\n\n def _reset_current_ep(self):\n ep_idx = self.num_episodes\n ep_len = len(self.current_ep[\"done\"])\n self.num_episodes += 1\n ts = datetime.datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n ep_filename = f\"{ts}_{ep_idx}_{ep_len}.npz\"\n storage.save_data(self.current_ep, os.path.join(self.storage_path, ep_filename))\n\n flattened_buffer_space = utils.flatten_dict(self.buffer_space)\n ep = {k: list() for k in flattened_buffer_space.keys()}\n self.current_ep = utils.nest_dict(ep)\n\n def add(self, **kwargs):\n assert self.capacity is not None, \"Tried to extend to a static size buffer.\"\n # Preprocess here before adding to storage\n if len(kwargs) == 1:\n assert \"obs\" in kwargs\n kwargs = self._get_dummy_transition(kwargs[\"obs\"])\n if self.stacked_obs:\n kwargs[\"obs\"] = utils.get_from_batch(kwargs[\"obs\"], -1)\n else:\n # We have a full transitions\n if self.stacked_obs:\n kwargs[\"obs\"] = utils.get_from_batch(kwargs[\"obs\"], -1)\n if self.stacked_action:\n kwargs[\"action\"] = utils.get_from_batch(kwargs[\"action\"], -1)\n\n assert \"done\" in kwargs, \"Need done key for ReplayBuffer\"\n\n # This function is overwritten for distributed / local buffers\n if self.distributed:\n # Add to the current thread, and dump to disk\n utils.append(self.current_ep, kwargs)\n if kwargs[\"done\"]:\n self._reset_current_ep()\n else:\n # Add directly\n self._learning_online = True\n self._storage.add(kwargs)\n\n def extend(self, **kwargs):\n assert \"done\" in kwargs, \"Need done key for ReplayBuffer\"\n assert self.capacity is not None, \"Tried to extend to a static size buffer.\"\n # TODO: There is a chance that if we add a full sequence we will end up with (B, T, stack, ...)\n # which is not what we want. We could compare the shapes of the observation space to fix it\n # but this code might be unnecesary, as this class shouldn't really be used like that anyways.\n if self.distributed:\n # Add to the current thread, and dump to disk\n utils.extend(self.current_ep, kwargs)\n if kwargs[\"done\"][-1]:\n self._reset_current_ep()\n else:\n # Add directly\n self._learning_online = True\n self._storage.extend(kwargs)\n\n def save(self, path):\n os.makedirs(path, exist_ok=True)\n if self.distributed:\n if self.cleanup:\n print(\"[research] Warning, attempting to save a cleaned up replay buffer. There are likely no files\")\n srcs = os.listdir(self.storage_path)\n for src in srcs:\n shutil.move(os.path.join(self.storage_path, src), os.path.join(path, src))\n print(\"Successfully saved\", len(srcs), \"episodes.\")\n else:\n ep_len = self._storage.size\n ep_idx = 0\n ts = datetime.datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n ep_filename = f\"{ts}_{ep_idx}_{ep_len}.npz\"\n save_path = os.path.join(path, ep_filename)\n self._storage.save(save_path)\n\n def sample(self, *args, **kwargs):\n return self.sample_fn(self._storage, *args, **kwargs)\n\n def __iter__(self):\n assert not hasattr(self, \"_iterated\"), \"__iter__ called twice!\"\n self._iterated = True\n worker_info = torch.utils.data.get_worker_info()\n assert (worker_info is not None) == self.distributed, \"ReplayBuffer.distributed not set correctly!\"\n\n # allocate the buffer with the given capacity\n if self.distributed:\n self._alloc(None if self.capacity is None else self.capacity // worker_info.num_workers)\n self._episode_filenames = set()\n\n self._learning_online = False\n\n samples_since_last_offline_fetch = 0\n samples_since_last_online_fetch = 0\n last_offline_fetch_size = 0\n\n batch_size = self.sample_fn.keywords.get(\"batch_size\", 1)\n stack_size = self.sample_fn.keywords.get(\"stack\", 1)\n seq_size = self.sample_fn.keywords.get(\"seq_length\", 1)\n\n while True:\n if self._storage.size < seq_size * stack_size + 1:\n yield {} # If the buffer is too small for sampling, continue.\n else:\n sample = self.sample_fn(self._storage)\n if batch_size == 1:\n sample = utils.squeeze(sample, 0)\n yield sample\n\n # Fetch new data if we have a circular buffer.\n if isinstance(self._storage, storage.CircularStorage):\n if self.distributed: # Always check for online data\n # We fetch from the online buffer\n samples_since_last_online_fetch += 1\n if samples_since_last_online_fetch >= self.fetch_every:\n fetch_size = self._fetch_online()\n self._learning_online = self._learning_online or (fetch_size > 0)\n samples_since_last_online_fetch = 0\n\n if not self._learning_online and self.path is not None:\n # We fetch from the offline buffer\n samples_since_last_offline_fetch += 1\n data_pts_since_last_offline_fetch = (\n samples_since_last_offline_fetch * batch_size * seq_size * stack_size\n )\n if data_pts_since_last_offline_fetch >= last_offline_fetch_size * self.epoch_ratio:\n last_offline_fetch_size = self._fetch_offline()\n samples_since_last_offline_fetch = 0\n\n def __del__(self):\n if not self.distributed:\n return\n if self.cleanup:\n return\n else:\n paths = [os.path.join(self.storage_path, f) for f in os.listdir(self.storage_path)]\n for path in paths:\n try:\n os.remove(path)\n except OSError:\n pass\n try:\n os.rmdir(self.storage_path)\n except OSError:\n pass"
},
{
"identifier": "storage",
"path": "research/datasets/replay_buffer/storage.py",
"snippet": "def load_data(path: str, exclude_keys: Optional[List[str]]) -> Dict:\ndef save_data(data: Dict, path: str) -> None:\ndef get_bytes(buffer: Union[Dict, np.ndarray]) -> int:\n def capacity(self):\n def size(self):\n def starts(self):\n def ends(self):\n def lengths(self):\n def bytes(self):\n def save(self, path):\n def __getitem__(self, key):\n def __getattr__(self, name):\n def __contains__(self, key):\n def add(self, data):\n def extend(self, data):\n def __init__(self, buffers: Dict) -> None:\n def add(self, data):\n def extend(self, data):\n def __init__(self, initial_capacity: int = 100, dtype=np.int64):\n def _reset(self):\n def append(self, value):\n def pop(self):\n def popleft(self):\n def view(self):\n def __len__(self):\n def first(self):\n def last(self):\n def __str__(self):\n def __init__(self, buffer_space: Union[Dict, gym.spaces.Dict], capacity: Optional[int] = None) -> None:\n def _update_markers(self, new_ends: Iterable = ()):\n def add(self, data):\n def extend(self, data):\nclass Storage(abc.ABC):\nclass FixedStorage(Storage):\nclass NPQueue(object):\nclass CircularStorage(Storage):"
},
{
"identifier": "EmptyEnv",
"path": "research/envs/base.py",
"snippet": "class EmptyEnv(gym.Env):\n\n \"\"\"\n An empty holder for defining supervised learning problems\n It works by specifying the ranges and shapes.\n \"\"\"\n\n def __init__(\n self,\n observation_low=None,\n observation_high=None,\n observation_shape=None,\n observation_dtype=np.float32,\n observation_space=None,\n action_low=None,\n action_high=None,\n action_shape=None,\n action_dtype=np.float32,\n action_space=None,\n ):\n if observation_space is not None:\n self.observation_space = observation_space\n else:\n self.observation_space = _get_space(observation_low, observation_high, observation_shape, observation_dtype)\n if action_space is not None:\n self.action_space = action_space\n else:\n self.action_space = _get_space(action_low, action_high, action_shape, action_dtype)\n\n def step(self, action):\n raise NotImplementedError(\"Empty Env does not have step\")\n\n def reset(self, **kwargs):\n raise NotImplementedError(\"Empty Env does not have reset\")"
},
{
"identifier": "ModuleContainer",
"path": "research/networks/base.py",
"snippet": "class ModuleContainer(torch.nn.Module):\n CONTAINERS = []\n\n def __init__(self, observation_space: gym.Space, action_space: gym.Space, **kwargs) -> None:\n super().__init__()\n # save the classes and containers\n base_kwargs = {k: v for k, v in kwargs.items() if not k.endswith(\"_class\") and not k.endswith(\"_kwargs\")}\n\n output_space = observation_space\n for container in self.CONTAINERS:\n module_class = kwargs.get(container + \"_class\", torch.nn.Identity)\n module_class = vars(research.networks)[module_class] if isinstance(module_class, str) else module_class\n if module_class is torch.nn.Identity:\n module_kwargs = dict()\n else:\n module_kwargs = base_kwargs.copy()\n module_kwargs.update(kwargs.get(container + \"_kwargs\", dict()))\n # Create the module, and attach it to self\n module = module_class(output_space, action_space, **module_kwargs)\n setattr(self, container, module)\n\n # Set a reset function\n setattr(self, \"reset_\" + container, partial(self._reset, container))\n\n if hasattr(getattr(self, container), \"output_space\"):\n # update the output space\n output_space = getattr(self, container).output_space\n\n # Done creating all sub-modules.\n\n @classmethod\n def create_subset(cls, containers):\n assert all([container in cls.CONTAINERS for container in containers])\n name = \"\".join([container.capitalize() for container in containers]) + \"Subset\"\n return type(name, (ModuleContainer,), {\"CONTAINERS\": containers})\n\n def _reset(self, container: str) -> None:\n module = getattr(self, container)\n with torch.no_grad():\n module.apply(reset)\n\n def compile(self, **kwargs):\n for container in self.CONTAINERS:\n attr = getattr(self, container)\n if type(attr).forward == torch.nn.Module.forward:\n assert hasattr(attr, \"compile\"), (\n \"container \" + container + \" is nn.Module without forward() but didn't define `compile`.\"\n )\n attr.compile(**kwargs)\n else:\n setattr(self, container, torch.compile(attr, **kwargs))\n\n def forward(self, x):\n # Use all of the modules in order\n for container in self.CONTAINERS:\n x = getattr(self, container)(x)\n return x"
},
{
"identifier": "runners",
"path": "research/utils/runners.py",
"snippet": "class CloudpickleWrapper:\nclass AsyncState(Enum):\nclass AsyncEnv(gym.Env):\nclass MPRunner(object):\n def __init__(self, fn: Callable):\n def __getstate__(self):\n def __setstate__(self, ob):\n def __call__(self):\ndef alloc_shared_buffer(space: Any):\ndef read_shared_buffer(shared_buffer: Any, space: gym.Space):\ndef write_shared_buffer(shared_buffer: Any, space: gym.Space, value: Any):\n def __init__(\n self, env_fn: Callable, observation_space: Optional[gym.Space] = None, action_space: Optional[gym.Space] = None\n ):\n def step_send(self, action):\n def step_recv(self):\n def step(self, action):\n def reset_send(self):\n def reset_recv(self):\n def reset(self):\n def close(self):\ndef _async_env_worker(env_fn, pipe, parent_pipe, obs_buffer, action_buffer):\n def __init__(\n self,\n env_fn,\n fn: Optional[Callable] = None,\n observation_space: Optional[gym.Space] = None,\n action_space: Optional[gym.Space] = None,\n **kwargs,\n ):\n def start(self, fn: Optional[Callable] = None, **kwargs):\n def started(self):\n def __call__(self, block=False):\n def step(self, *args, **kwargs):\n def reset(self, *args, **kwargs):\n def close(self):\n DEFAULT = \"default\"\n WAITING_RESET = \"reset\"\n WAITING_STEP = \"step\""
},
{
"identifier": "utils",
"path": "research/utils/utils.py",
"snippet": "def to_device(batch: Any, device: torch.device) -> Any:\ndef to_tensor(batch: Any) -> Any:\ndef to_np(batch: Any) -> Any:\ndef remove_float64(batch: Any):\ndef unsqueeze(batch: Any, dim: int) -> Any:\ndef squeeze(batch: Any, dim: int) -> Any:\ndef get_from_batch(batch: Any, start: Union[int, np.ndarray, torch.Tensor], end: Optional[int] = None) -> Any:\ndef set_in_batch(batch: Any, value: Any, start: int, end: Optional[int] = None) -> None:\ndef batch_copy(batch: Any) -> Any:\ndef space_copy(space: gym.Space):\ndef contains_tensors(batch: Any) -> bool:\ndef get_device(batch: Any) -> Optional[torch.device]:\ndef concatenate(*args, dim: int = 0):\ndef append(lst, item):\ndef extend(lst1, lst2):\n def __init__(self, name: str = \"\"):\n def forward(self, x: Any) -> Any:\ndef np_dataset_alloc(\n space: gym.Space, capacity: int, begin_pad: Tuple[int] = tuple(), end_pad: Tuple[int] = tuple()\n) -> np.ndarray:\ndef np_bytes_per_instance(space: gym.Space) -> int:\ndef _flatten_dict_helper(flat_dict: Dict, value: Any, prefix: str, separator: str = \".\") -> None:\ndef flatten_dict(d: Dict, separator: str = \".\") -> Dict:\ndef nest_dict(d: Dict, separator: str = \".\") -> Dict:\ndef fetch_from_dict(d: Dict, keys: Union[str, List, Tuple], separator=\".\") -> List[Any]:\ndef create_optim_groups(params, kwargs):\nclass PrintNode(torch.nn.Module):"
},
{
"identifier": "Algorithm",
"path": "research/algs/base.py",
"snippet": "class Algorithm(ABC):\n _save_keys: Set[str]\n _compiled: bool\n\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n network_class: Type[torch.nn.Module],\n dataset_class: Union[Type[torch.utils.data.IterableDataset], Type[torch.utils.data.Dataset]],\n network_kwargs: Optional[Dict] = None,\n dataset_kwargs: Optional[Dict] = None,\n validation_dataset_class: Optional[\n Union[Type[torch.utils.data.IterableDataset], Type[torch.utils.data.Dataset]]\n ] = None,\n validation_dataset_kwargs: Optional[Dict] = None,\n optim_class: Type[torch.optim.Optimizer] = torch.optim.Adam,\n optim_kwargs: Optional[Dict] = None,\n schedulers_class: Optional[Dict] = None,\n schedulers_kwargs: Optional[Dict[str, Dict]] = None,\n processor_class: Optional[Type[Processor]] = None,\n processor_kwargs: Optional[Dict] = None,\n checkpoint: Optional[str] = None,\n device: Union[str, torch.device] = \"auto\",\n ):\n # Initialize the _save_keys attribute using the superclass.\n # These are used for automatically identifying keys for saving/loading.\n super().__setattr__(\"_save_keys\", set())\n super().__setattr__(\"_module_keys\", set())\n super().__setattr__(\"_compiled\", False)\n\n # Save relevant values\n self.observation_space = observation_space\n self.action_space = action_space\n self.optim = {}\n\n # setup devices\n if device == \"auto\":\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self._device = torch.device(device)\n\n # Setup the data preprocessor first. Thus, if we need to reference it in network setup we can.\n # Everything here is saved in self.processor\n self.setup_processor(processor_class, {} if processor_kwargs is None else processor_kwargs)\n\n # Create the network.\n network_kwargs = {} if network_kwargs is None else network_kwargs\n self.setup_network(network_class, network_kwargs)\n\n # Save values for optimizers, which will be lazily initialized later\n self.optim = {}\n self.optim_class = optim_class\n self.optim_kwargs = {\"lr\": 0.0001} if optim_kwargs is None else optim_kwargs\n\n # Save values for schedulers, which will be lazily initialized later\n self.schedulers = {}\n self.schedulers_class = {} if schedulers_class is None else schedulers_class\n self.schedulers_kwargs = {} if schedulers_kwargs is None else schedulers_kwargs\n\n # Save values for datasets, which will be lazily initialized later\n self.dataset_class = dataset_class\n self.dataset_kwargs = {} if dataset_kwargs is None else dataset_kwargs\n self.validation_dataset_class = validation_dataset_class\n self.validation_dataset_kwargs = validation_dataset_kwargs\n\n self._training = False\n\n # Load a check point if we have one -- using non-strict enforcement.\n # NOTE: this only loads the network and will _not_ load the optimizer checkpoint.\n if checkpoint is not None:\n self.load(checkpoint, strict=False)\n\n @property\n def device(self):\n return self._device\n\n @property\n def training(self) -> bool:\n return self._training\n\n def __setattr__(self, name: str, value: Any) -> None:\n # Check to see if the value is a module etc.\n if (hasattr(self, \"_save_keys\") and name in self._save_keys) or (\n hasattr(self, \"_module_keys\") and name in self._module_keys\n ):\n pass\n elif isinstance(value, torch.nn.Parameter):\n self._save_keys.add(name)\n elif isinstance(value, torch.nn.Module):\n self._module_keys.add(name)\n if sum(p.numel() for p in value.parameters()) > 0:\n self._save_keys.add(name) # store if we have a module with more than zero parameters.\n return super().__setattr__(name, value)\n\n @property\n def save_keys(self) -> List[str]:\n return self._save_keys\n\n @property\n def module_keys(self) -> List[str]:\n return self._module_keys\n\n @property\n def compiled(self) -> bool:\n return self._compiled\n\n def to(self, device) -> \"Algorithm\":\n for k in self.save_keys:\n if k == \"processor\" and not self.processor.supports_gpu:\n continue\n else:\n setattr(self, k, getattr(self, k).to(device))\n return self\n\n def compile(self, **kwargs):\n for k in self.save_keys:\n attr = getattr(self, k)\n if isinstance(attr, torch.nn.Module):\n if type(attr).forward == torch.nn.Module.forward:\n # In this case, the forward method hasn't been overriden.\n # Thus we assume there is a compile argument.\n assert hasattr(attr, \"compile\"), (\n \"save key \" + k + \" is nn.Module without forward() but didn't define `compile`.\"\n )\n attr.compile(**kwargs)\n else:\n setattr(self, k, torch.compile(attr, **kwargs))\n # indicate that we have compiled the models.\n self._compiled = True\n\n def train(self) -> None:\n for k in self._module_keys:\n getattr(self, k).train()\n self._training = True\n\n def eval(self) -> None:\n for k in self._module_keys:\n getattr(self, k).eval()\n self._training = False\n\n @property\n def num_params(self):\n _num_params = 0\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n _num_params += sum(p.numel() for p in attr.parameters() if p.requires_grad)\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n if attr.requires_grad:\n _num_params += attr.numel()\n return _num_params\n\n @property\n def nbytes(self):\n # Returns the size of all the parameters in bytes\n _bytes = 0\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n for p in attr.parameters():\n _bytes += p.nelement() * p.element_size()\n if hasattr(attr, \"buffers\"):\n for b in attr.buffers():\n _bytes += b.nelement() * b.element_size()\n return _bytes\n\n def setup_processor(self, processor_class: Optional[Type[Processor]], processor_kwargs: Dict) -> None:\n if processor_class is None:\n processor = Identity(self.observation_space, self.action_space)\n else:\n processor = processor_class(self.observation_space, self.action_space, **processor_kwargs)\n\n if processor.supports_gpu: # move it to device if it supports GPU computation.\n self.processor = processor.to(self.device)\n else:\n self.processor = processor\n\n def setup_network(self, network_class: Type[torch.nn.Module], network_kwargs: Dict) -> None:\n self.network = network_class(\n self.processor.observation_space, self.processor.action_space, **network_kwargs\n ).to(self.device)\n\n def setup_optimizers(self) -> None:\n \"\"\"\n This is only called by the Trainer, and not called when we load the model.\n This is done so that inference jobs don't load the optimizer state.\n \"\"\"\n # Setup Optimizers\n assert len(self.optim) == 0, \"setup_optimizers called twice!\"\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n parameters = attr.parameters()\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n parameters = [attr]\n # Constrcut the optimizer\n self.optim[k] = self.optim_class(parameters, **self.optim_kwargs)\n\n def setup_schedulers(self):\n assert len(self.schedulers) == 0, \"setup_schedulers called twice!\"\n for k in self.schedulers_class.keys():\n if self.schedulers_class[k] is not None:\n assert k in self.optim, \"Did not find schedule key in optimizers dict.\"\n self.schedulers[k] = self.schedulers_class[k](self.optim[k], **self.schedulers_kwargs.get(k, dict()))\n\n def setup_datasets(self, env: gym.Env, total_steps: int):\n \"\"\"\n Called after everything else has been setup, right before training starts\n This is _only_ called by the trainer and is not called by default.\n This function is responsible for creating the following attributes:\n self.dataset (required)\n self.validation_dataset\n \"\"\"\n assert not hasattr(self, \"dataset\"), \"setup_datasets called twice!\"\n assert not hasattr(self, \"validation_dataset\"), \"setup_datasets called twice!\"\n # Setup the train dataset\n self.dataset = self.dataset_class(self.observation_space, self.action_space, **self.dataset_kwargs)\n # Setup the validation dataset\n if self.validation_dataset_class is not None:\n self.validation_dataset = self.validation_dataset_class(\n self.observation_space, self.action_space, **self.validation_dataset_kwargs\n )\n elif self.validation_dataset_kwargs is not None:\n validation_dataset_kwargs = copy.deepcopy(self.dataset_kwargs)\n validation_dataset_kwargs.update(self.validation_dataset_kwargs)\n self.validation_dataset = self.dataset_class(\n self.observation_space, self.action_space, **validation_dataset_kwargs\n )\n else:\n self.validation_dataset = None\n\n def save(self, path: str, extension: str, metadata: Optional[Dict] = None) -> None:\n \"\"\"\n Saves a checkpoint of the model and the optimizers\n \"\"\"\n save_dict = {}\n if len(self.optim) > 0:\n save_dict[\"optim\"] = {k: v.state_dict() for k, v in self.optim.items()}\n if len(self.schedulers) > 0:\n save_dict[\"schedulers\"] = {k: v.state_dict() for k, v in self.schedulers.items()}\n for k in self._save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"state_dict\"):\n save_dict[k] = attr.state_dict()\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n save_dict[k] = attr\n\n # Add the metadata\n save_dict[\"metadata\"] = {} if metadata is None else metadata\n save_path = os.path.join(path, extension)\n if not save_path.endswith(\".pt\"):\n save_path += \".pt\"\n torch.save(save_dict, save_path)\n\n def load(self, checkpoint: str, strict: bool = True) -> Dict:\n \"\"\"\n Loads the model and its associated checkpoints.\n If we haven't created the optimizers and schedulers, do not load those.\n \"\"\"\n print(\"[research] loading checkpoint:\", checkpoint)\n checkpoint = torch.load(checkpoint, map_location=self.device)\n remaining_checkpoint_keys = set(checkpoint.keys())\n\n # First load everything except for the optim\n for k in self.save_keys: # Loop through keys in the Algorithm.\n if k not in checkpoint:\n if strict:\n raise ValueError(\"Checkpoint did not have key \" + str(k))\n else:\n print(\"[research] Warning: Checkpoint did not have key\", k)\n continue\n\n if isinstance(getattr(self, k), torch.nn.Parameter):\n # directly set the data, this is for nn.Parameters\n getattr(self, k).data = checkpoint[k].data\n else:\n # Otherwise, load via state dict\n getattr(self, k).load_state_dict(checkpoint[k], strict=strict)\n remaining_checkpoint_keys.remove(k)\n\n # Now load the optimizer and its associated keys\n for k in self.optim.keys():\n if strict and k not in checkpoint[\"optim\"]:\n raise ValueError(\"Strict mode was enabled, but couldn't find optimizer key\")\n elif k not in checkpoint[\"optim\"]:\n print(\"[research] Warning: Checkpoint did not have optimizer key\", k)\n continue\n self.optim[k].load_state_dict(checkpoint[\"optim\"][k])\n if \"optim\" in checkpoint:\n remaining_checkpoint_keys.remove(\"optim\")\n\n # Now load the schedulers\n for k in self.schedulers.keys():\n if strict and k not in checkpoint[\"schedulers\"]:\n raise ValueError(\"Strict mode was enabled, but couldn't find scheduler key\")\n elif k not in checkpoint[\"schedulers\"]:\n print(\"[research] Warning: Checkpoint did not have scheduler key\", k)\n continue\n self.schedulers[k].load_state_dict(checkpoint[\"schedulers\"][k])\n if \"schedulers\" in checkpoint:\n remaining_checkpoint_keys.remove(\"schedulers\")\n\n remaining_checkpoint_keys.remove(\"metadata\") # Do not count metadata key, which is always addded.\n if strict and len(remaining_checkpoint_keys) > 0:\n raise ValueError(\"Algorithm did not have keys \", +str(remaining_checkpoint_keys))\n elif len(remaining_checkpoint_keys) > 0:\n print(\"[research] Warning: Checkpoint keys\", remaining_checkpoint_keys, \"were not loaded.\")\n\n return checkpoint[\"metadata\"]\n\n def format_batch(self, batch: Any) -> Any:\n # Convert items to tensor if they are not.\n # Checking first makes sure we do not distrub memory pinning\n if not utils.contains_tensors(batch):\n batch = utils.to_tensor(batch)\n if self.processor.supports_gpu:\n # Move to CUDA first.\n batch = utils.to_device(batch, self.device)\n batch = self.processor(batch)\n else:\n batch = self.processor(batch)\n batch = utils.to_device(batch, self.device)\n return batch\n\n @abstractmethod\n def train_step(self, batch: Any, step: int, total_steps: int) -> Dict:\n \"\"\"\n Train the model. Should return a dict of loggable values\n \"\"\"\n return {}\n\n def validation_step(self, batch: Any) -> Dict:\n \"\"\"\n perform a validation step. Should return a dict of loggable values.\n \"\"\"\n raise NotImplementedError\n\n def env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict:\n \"\"\"\n Perform any extra training operations. This is done before the train step is called.\n A common use case for this would be stepping the environment etc.\n \"\"\"\n return {}\n\n def validation_extras(self, path: str, step: int) -> Dict:\n \"\"\"\n Perform any extra validation operations.\n A common usecase for this is saving visualizations etc.\n \"\"\"\n return {}\n\n def _predict(self, batch: Any, **kwargs) -> Any:\n \"\"\"\n Internal prediction function, can be overridden\n By default, we call torch.no_grad(). If this behavior isn't desired,\n override the _predict funciton in your algorithm.\n \"\"\"\n with torch.no_grad():\n if len(kwargs) > 0:\n raise ValueError(\"Default predict method does not accept key word args, but they were provided.\")\n pred = self.network(batch)\n return pred\n\n def predict(self, batch: Any, is_batched: bool = False, **kwargs) -> Any:\n is_np = not utils.contains_tensors(batch)\n if not is_batched:\n # Unsqeeuze everything\n batch = utils.unsqueeze(batch, 0)\n batch = self.format_batch(batch)\n pred = self._predict(batch, **kwargs)\n if not is_batched:\n pred = utils.get_from_batch(pred, 0)\n if is_np:\n pred = utils.to_np(pred)\n return pred"
}
] | import datetime
import functools
import os
import sys
import tempfile
import gym
import numpy as np
import torch
from abc import abstractmethod
from typing import Any, Dict, Optional, Union
from research.datasets import ReplayBuffer
from research.datasets.replay_buffer import storage
from research.envs.base import EmptyEnv
from research.networks.base import ModuleContainer
from research.utils import runners, utils
from .base import Algorithm
from research.utils.config import Config | 9,758 |
class OffPolicyAlgorithm(Algorithm):
def __init__(
self,
*args,
offline_steps: int = 0, # Run fully offline by setting to -1
random_steps: int = 1000,
async_runner_ep_lag: int = 1,
**kwargs,
):
super().__init__(*args, **kwargs)
self.offline_steps = offline_steps
self.random_steps = random_steps
self.async_runner_ep_lag = async_runner_ep_lag
def setup_datasets(self, env: gym.Env, total_steps: int):
super().setup_datasets(env, total_steps)
# Assign the correct update function based on what is passed in.
|
class OffPolicyAlgorithm(Algorithm):
def __init__(
self,
*args,
offline_steps: int = 0, # Run fully offline by setting to -1
random_steps: int = 1000,
async_runner_ep_lag: int = 1,
**kwargs,
):
super().__init__(*args, **kwargs)
self.offline_steps = offline_steps
self.random_steps = random_steps
self.async_runner_ep_lag = async_runner_ep_lag
def setup_datasets(self, env: gym.Env, total_steps: int):
super().setup_datasets(env, total_steps)
# Assign the correct update function based on what is passed in. | if env is None or isinstance(env, EmptyEnv) or self.offline_steps < 0: | 2 | 2023-10-19 17:25:45+00:00 | 12k |
nbasyl/LLM-FP4 | configs/FPQ_baseline_config_llama.py | [
{
"identifier": "FPPTQSLBatchingQuantLinear_fpq_baseline",
"path": "quant_layers/fp_linear.py",
"snippet": "class FPPTQSLBatchingQuantLinear_fpq_baseline(FPPTQSLQuantLinear):\n def __init__(self, \n in_features: int,\n out_features: int,\n bias: bool = True,\n mode = \"raw\",\n w_bit = 8,\n a_bit = 8,\n w_exponent_bit = 4, a_exponent_bit = 4,\n bias_bit = None,\n bias_correction = False,\n metric=\"L2_norm\", search_round=1, eq_alpha=0, eq_beta=1, eq_n=100, parallel_eq_n=10, n_H=1, n_V=1, n_a=1):\n super().__init__(in_features, out_features, bias=bias, mode=mode, w_bit=w_bit, a_bit=a_bit, w_exponent_bit= w_exponent_bit, a_exponent_bit=a_exponent_bit, bias_bit=bias_bit, bias_correction=bias_correction, metric=metric, search_round=search_round, eq_alpha=eq_alpha, eq_beta=eq_beta, eq_n=eq_n, parallel_eq_n=parallel_eq_n, n_H=n_H, n_V=n_V, n_a=n_a)\n self.calib_size = None\n self.calib_batch_size = None\n self.calib_need_batching = False\n self.w_maxval = None\n self.w_intervals = None\n self.a_maxval = None\n self.a_intervals = None\n\n def _initialize_calib_parameters(self):\n \"\"\" \n set parameters for feeding calibration data\n \"\"\"\n self.calib_size = int(self.raw_input.shape[0])\n self.calib_batch_size = int(self.raw_input.shape[0])\n i = 0\n while True:\n numel = (2*(self.raw_input.numel()+self.raw_out.numel())/self.calib_size*self.calib_batch_size) # number of parameters on GPU\n self.parallel_eq_n = int((3*1024*1024*1024/4)//numel)\n if self.parallel_eq_n <= 1:\n self.calib_need_batching = True\n self.calib_batch_size //= 2\n else:\n break\n \n def _initialize_intervals(self):\n # weight intervals \n print(\"channel-wise weight\")\n self.n_V = self.out_features\n self.crb_rows = self.out_features // self.n_V\n w_maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.w_maxval = w_maxval\n self.w_interval=(2**self.w_exponent_bit - torch.log2(w_maxval) + math.log2(2 - 2 ** (-self.w_mantissa_bit)) - 1)\n self.w_intervals = []\n if self.w_bit == 8:\n for i in range(self.w_bit-3):\n M = i + 2\n E = self.w_bit - 1 - M\n self.w_intervals.append(2**E - torch.log2(self.w_maxval) + math.log2(2 - 2 ** (-M)) - 1)\n\n else:\n for i in range(self.w_bit-1):\n M = i\n E = self.w_bit - 1 - M\n self.w_intervals.append(2**E - torch.log2(self.w_maxval) + math.log2(2 - 2 ** (-M)) - 1)\n\n # activation intervals\n tmp_a_maxvals = []\n for b_st in range(0,self.calib_size,self.calib_batch_size):\n b_ed = min(self.calib_size, b_st+self.calib_batch_size)\n x_ = self.raw_input[b_st:b_ed].to(self.weight.device)\n x_maxval = x_.abs().max()\n tmp_a_maxvals.append(x_maxval)\n \n # print(f'tmp_a_intervals[0] {tmp_a_intervals[0].shape}')\n tmp_a_maxvals = torch.tensor(tmp_a_maxvals).to(x_.device)\n # print(f'tmp_a_maxvals {tmp_a_maxvals.shape}')\n self.a_maxval = tmp_a_maxvals.amax(dim=0, keepdim=True)\n self.a_interval = (2**self.a_exponent_bit - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-self.a_mantissa_bit)) - 1).detach().view(1,1).repeat(self.n_a,1)\n\n self.a_intervals = []\n if self.a_bit == 8:\n for i in range(self.a_bit-3):\n M = i + 2\n E = self.a_bit - 1 - M\n a_interval_=(2**E - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-M)) - 1).detach().view(1,1).repeat(self.n_a,1)\n\n self.a_intervals.append(a_interval_.clone())\n else:\n for i in range(self.a_bit-1):\n M = i\n E = self.a_bit - 1 - M\n a_interval_=(2**E - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-M)) - 1).detach().view(1,1).repeat(self.n_a,1)\n self.a_intervals.append(a_interval_.clone())\n\n def _initialize_intervals_eval(self):\n self._initialize_calib_parameters()\n print(\"channel-wise weight\")\n self.n_V = self.out_features\n self.crb_rows = self.out_features // self.n_V\n w_maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.w_maxval = w_maxval\n self.w_interval=(2**self.w_exponent_bit - torch.log2(w_maxval) + math.log2(2 - 2 ** (-self.w_mantissa_bit)) - 1)\n\n # activation intervals\n tmp_a_maxvals = []\n for b_st in range(0,self.calib_size,self.calib_batch_size):\n b_ed = min(self.calib_size, b_st+self.calib_batch_size)\n x_ = self.raw_input[b_st:b_ed].to(self.weight.device)\n x_maxval = x_.abs().max()\n tmp_a_maxvals.append(x_maxval)\n \n tmp_a_maxvals = torch.tensor(tmp_a_maxvals).to(x_.device)\n self.a_maxval = tmp_a_maxvals.amax(dim=0, keepdim=True)\n self.a_interval = (2**self.a_exponent_bit - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-self.a_mantissa_bit)) - 1).detach().view(1,1).repeat(self.n_a,1)\n\n self.calibrated = True\n\n def _get_similarity(self, tensor_raw, tensor_sim, metric=None, raw_grad=None):\n \"\"\"\n tensor_raw: *, features\n tensor_sim: *, features\n similarity: *\n It's your job to calculate mean on * dims!\n \"\"\"\n if metric == \"cosine\":\n similarity = F.cosine_similarity(tensor_raw, tensor_sim, dim=-1)\n else:\n if metric == \"L1_norm\":\n similarity = -torch.abs(tensor_raw - tensor_sim)\n elif metric == \"linear_weighted_L2_norm\":\n similarity = -tensor_raw.abs() * (tensor_raw - tensor_sim) ** 2\n elif metric == \"square_weighted_L2_norm\":\n similarity = -(tensor_raw * (tensor_raw - tensor_sim)) ** 2\n elif metric == \"L2_norm\":\n similarity = -(tensor_raw - tensor_sim) ** 2\n else:\n raise NotImplementedError(f\"metric {metric} not implemented!\")\n similarity = torch.mean(similarity, dim=-1)\n return similarity\n\n def _get_pearson_w(self, tensor_raw, tensor_sim):\n \"\"\"\n Quick implementation of similarity-aware linear quantization\n tensor_sim: b,*,parallel_eq_n,n_V,crb_rows\n tensor_raw: b,*,1,n_V,crb_rows\n \"\"\"\n b, parallel_eq_n, n_V = tensor_sim.shape[0],tensor_sim.shape[-3],tensor_sim.shape[-2]\n tensor_sim = tensor_sim.transpose(-1,-3).contiguous_().view(b,-1,n_V,parallel_eq_n)\n tensor_raw = tensor_raw.transpose(-1,-3).view(b,-1,n_V,1)\n tensor_sim_mean = tensor_sim.mean(dim=[0,1],keepdim=True)\n tensor_raw_mean = tensor_raw.mean(dim=[0,1],keepdim=True)\n similarity = torch.cosine_similarity(tensor_raw-tensor_raw_mean, tensor_sim-tensor_sim_mean, dim=1) # shape: b,n_V,parallel_eq_n\n similarity = similarity.permute(0,2,1).contiguous_()\n return similarity\n \n def _get_pearson_a(self, tensor_raw, tensor_sim):\n \"\"\"\n Quick implementation of similarity-aware linear quantization\n tensor_sim: b,*,parallel_eq_n,oc\n tensor_raw: b,*,1,oc\n \"\"\"\n b, parallel_eq_n = tensor_sim.shape[0],tensor_sim.shape[-2]\n tensor_sim = tensor_sim.transpose(-1,-2).contiguous_().view(b,-1,parallel_eq_n)\n tensor_raw = tensor_raw.transpose(-1,-2).view(b,-1,1)\n tensor_sim_mean = tensor_sim.mean(dim=[0,1],keepdim=True)\n tensor_raw_mean = tensor_raw.mean(dim=[0,1],keepdim=True)\n similarity = torch.cosine_similarity(tensor_raw-tensor_raw_mean, tensor_sim-tensor_sim_mean, dim=1) # shape: b,parallel_eq_n\n return similarity\n\n def _search_best_w_interval(self, weight_interval_candidates):\n \n # tmp_w_interval = self.w_interval.unsqueeze(0) # shape: 1,n_V,1,n_H,1\n # print(f\"weight_interval_candidates shape {weight_interval_candidates.shape}\")\n for man in range(weight_interval_candidates.shape[0]):\n tmp_w_interval = self.w_intervals[man].unsqueeze(0) # shape: 1,n_V,1,n_H,1\n for h in range(self.n_H):\n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n # print(f\"before search E{self.w_bit-1-man}M{man} self.w_intervals[man] {self.w_intervals[man][0][0]}\")\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out_expanded = self.raw_out[b_st:b_ed].to(self.weight.device).unsqueeze(-2) # shape: b,*,1,oc\n raw_out_expanded = torch.cat(torch.chunk(raw_out_expanded.unsqueeze(-2), chunks=self.n_V, dim=-1), dim=-2) # shape: b,*,1,n_V,crb_rows\n raw_grad = self.raw_grad\n similarities = []\n for p_st in range(0,self.eq_n,self.parallel_eq_n):\n p_ed = min(self.eq_n, p_st+self.parallel_eq_n)\n cur_w_interval = tmp_w_interval.repeat(p_ed-p_st,1,1,1,1)\n # print(f\"cur_w_interval {cur_w_interval.shape}\")\n cur_w_interval[:,:,:,h:h+1,:] = weight_interval_candidates[man][p_st:p_ed,:,:,h:h+1,:]\n # quantize weight and bias \n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols).unsqueeze(0) # shape: 1,n_V,crb_rows,n_H,crb_cols\n\n if self.w_bit == 8:\n w, cur_w_scale = self.get_scale(w_sim, bits = self.w_bit, mantissa_bit= man+2, bias= cur_w_interval)\n else:\n w, cur_w_scale = self.get_scale(w_sim, bits = self.w_bit, mantissa_bit= man, bias= cur_w_interval)\n \n \n w_sim = (w/cur_w_scale).round_().mul_(cur_w_scale) # shape: parallel_eq_n,n_V,crb_rows,n_H,crb_cols\n w_sim = w_sim.view(-1,self.in_features) # shape: parallel_eq_n*oc,ic\n bias_sim = self.bias.repeat(p_ed-p_st) if self.bias is not None else None\n # quantize input\n x_sim = self.quant_input(x)\n # calculate similarity and store them\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: b,*,parallel_eq_n*oc\n out_sim = torch.cat(torch.chunk(out_sim.unsqueeze(-2), chunks=p_ed-p_st, dim=-1), dim=-2) # shape: b,*,parallel_eq_n,oc\n out_sim = torch.cat(torch.chunk(out_sim.unsqueeze(-2), chunks=self.n_V, dim=-1), dim=-2) # shape: b,*,parallel_eq_n,n_V,crb_rows\n if self.metric != \"pearson\":\n similarity = self._get_similarity(raw_out_expanded, out_sim, self.metric, raw_grad) # shape: b,*,parallel_eq_n,n_V\n if len(similarity.shape) > 3:\n similarity = torch.mean(similarity, dim=list(range(1,len(similarity.shape)-2))) # shape: b, parallel_eq_n, n_V\n else:\n similarity = self._get_pearson_w(raw_out_expanded, out_sim)\n similarity = similarity.sum(dim=0, keepdim=True) # shape: 1, parallel_eq_n, n_V\n similarities.append(similarity)\n # store best weight interval of h into tmp_w_interval\n similarities = torch.cat(similarities, dim=1) # shape: 1, eq_n, n_V\n batch_similarities.append(similarities)\n batch_similarities = torch.cat(batch_similarities, dim=0).sum(dim=0, keepdim=False) # shape: eq_n, n_V\n h_best_index = batch_similarities.argmax(dim=0).reshape(1,-1,1,1,1) # shape: 1,n_V,1,1,1\n tmp_w_interval[:,:,:,h:h+1,:] = torch.gather(weight_interval_candidates[man][:,:,:,h:h+1,:],dim=0,index=h_best_index)\n self.w_intervals[man] = tmp_w_interval.squeeze(dim=0)\n\n def _search_best_w_format(self):\n \n # print(f\"before search linear weight E{self.w_exponent_bit}M{self.w_mantissa_bit}\")\n \n # format candidate\n w_mantissa_bits_candidate = [i for i in range(self.w_bit-1)]\n \n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out = self.raw_out[b_st:b_ed].to(self.weight.device) # shape: b,*,1,oc\n raw_grad = self.raw_grad\n similarities = []\n # quantize input\n x_sim = self.quant_input(x)\n for w_mantissa_bit in w_mantissa_bits_candidate:\n if self.w_bit == 8:\n w_mantissa_bit = w_mantissa_bit + 2\n else:\n w_mantissa_bit = w_mantissa_bit\n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols)\n w,cur_w_scale = self.get_scale(w_sim, bits = self.w_bit, mantissa_bit= w_mantissa_bit, bias= self.w_intervals[w_mantissa_bit])\n w_sim = (w/cur_w_scale).round_().mul_(cur_w_scale)\n w_sim = w_sim.view(-1,self.in_features)\n bias_sim = self.bias if self.bias is not None else None\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: B,*,oc\n similarity = self._get_similarity(raw_out, out_sim, self.metric, raw_grad) #B,*,oc\n # print(f\"weight similarity shape {similarity.shape}\")\n similarity = torch.mean(similarity) # shape: 1\n similarities.append(similarity)\n similarities = torch.tensor(similarities)\n # print(f\"weight similarities {similarities}\")\n batch_similarities.append(similarities)\n batch_similarities = torch.vstack(batch_similarities)\n # print(f\"weight batch_similarities {batch_similarities}\")\n best_mantissa_bit = batch_similarities.sum(dim=0, keepdim=True).argmax(dim=1).item()\n \n if self.w_bit == 8:\n self.w_mantissa_bit = torch.tensor(best_mantissa_bit + 2).to(self.weight.device)\n self.w_exponent_bit = torch.tensor(self.w_bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n else:\n self.w_mantissa_bit = torch.tensor(best_mantissa_bit).to(self.weight.device)\n self.w_exponent_bit = torch.tensor(self.w_bit - 1 - self.w_mantissa_bit).to(self.weight.device) \n \n self.w_interval = self.w_intervals[self.w_mantissa_bit]\n # print(f\"search result E{self.w_exponent_bit}M{self.w_mantissa_bit}\")\n # print(f\"after calibrate bias {self.w_interval[[0,10,30,40,50]]}\")\n # print(\"finish searching fp format for linear weight\")\n\n def _search_best_a_interval(self, input_interval_candidates):\n \n # print(f\"input_interval_candidates shape {input_interval_candidates.shape}\")\n for man in range(input_interval_candidates.shape[0]):\n tmp_a_interval = self.a_intervals[man].unsqueeze(-1) # shape: n_a,1,1\n # print(f\"tmp_a_interval.shape {tmp_a_interval.shape}\")\n for a in range(self.n_a):\n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out_expanded = self.raw_out[b_st:b_ed].to(self.weight.device).unsqueeze(-2) # shape: b,*,1,oc\n raw_grad = self.raw_grad\n similarities = []\n for p_st in range(0,self.eq_n,self.parallel_eq_n):\n p_ed = min(self.eq_n, p_st+self.parallel_eq_n)\n cur_a_interval = tmp_a_interval.repeat(1,1,p_ed-p_st) # shape: n_a,1,parallel_eq_n\n cur_a_interval[a:a+1,:,:] = input_interval_candidates[man][a:a+1,:,p_st:p_ed]\n # quantize weight and bias \n w_sim, bias_sim = self.quant_weight_bias()\n # quantize input\n x_sim=torch.cat(torch.chunk(x.unsqueeze(-2), chunks=self.n_a, dim=-1), dim=-2).unsqueeze(-1)\n \n if self.a_bit == 8:\n # print(f\"CUR a E{self.a_bit - 1 - man -2}M{man+2}\")\n cur_a, cur_a_scale = self.get_scale(x_sim, bits = self.a_bit, mantissa_bit= man+2, bias= cur_a_interval)\n else:\n cur_a, cur_a_scale = self.get_scale(x_sim, bits = self.a_bit, mantissa_bit= man, bias= cur_a_interval)\n\n x_sim=(cur_a/(cur_a_scale)).round_()*(cur_a_scale) # shape: b,*,n_a,crb_acts,parallel_eq_n\n x_sim = x_sim.permute(*list(range(len(x_sim.shape)-3)),-1,-3,-2).reshape(*x.shape[:-1],p_ed-p_st,x.shape[-1]) # shape: b,*,parallel_eq_n,ic\n # calculate similarity and store them\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: b,*,parallel_eq_n,oc\n if self.metric != \"pearson\":\n similarity = self._get_similarity(raw_out_expanded, out_sim, self.metric, raw_grad) # shape: b,*,parallel_eq_n\n if len(similarity.shape) > 2:\n similarity = torch.mean(similarity, dim=list(range(1,len(similarity.shape)-1))) # shape: b, parallel_eq_n\n else:\n similarity = self._get_pearson_a(raw_out_expanded, out_sim)\n similarity = torch.sum(similarity, dim=0, keepdim=True) # shape: 1, parallel_eq_n\n similarities.append(similarity)\n # store best input interval and store in tmp_a_interval\n similarities = torch.cat(similarities, dim=1) # shape: 1, eq_n\n batch_similarities.append(similarities)\n batch_similarities = torch.cat(batch_similarities, dim=0).sum(dim=0, keepdim=False) # shape: eq_n\n a_best_index = batch_similarities.argmax(dim=0, keepdim=True).reshape(1,1,-1)\n tmp_a_interval[a:a+1,:,:] = torch.gather(input_interval_candidates[man][a:a+1,:,:],dim=2,index=a_best_index)\n self.a_intervals[man] = tmp_a_interval.squeeze(-1)\n\n def _search_best_a_format(self):\n \n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n\n # format candidate\n if self.a_bit == 8:\n a_mantissa_bits_candidate = [i for i in range(self.a_bit-3)]\n else:\n a_mantissa_bits_candidate = [i for i in range(self.a_bit-1)]\n # quantize input\n w_sim, bias_sim = self.quant_weight_bias()\n # print(f\"before search linear activation E{self.a_exponent_bit}M{self.a_mantissa_bit}\")\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out = self.raw_out[b_st:b_ed].to(self.weight.device) # shape: b,*,oc\n raw_grad = self.raw_grad\n similarities = []\n \n for a_mantissa_bit in a_mantissa_bits_candidate:\n if self.a_bit == 8:\n a_mantissa_bit = a_mantissa_bit + 2\n \n x_sim = torch.cat(torch.chunk(x.unsqueeze(-2), chunks=self.n_a, dim=-1), dim=-2)\n cur_a, cur_a_scale = self.get_scale(x_sim, bits = self.a_bit, mantissa_bit= a_mantissa_bit, bias= self.a_intervals[a_mantissa_bit])\n x_sim=(cur_a/(cur_a_scale)).round_()*(cur_a_scale) # shape: B,*,n_a,crb_acts\n # print(f\"x_sim shape {x_sim.shape}\")\n if len(x.shape) == 3:\n x_sim = x_sim.view(x.shape[0],x.shape[1],x.shape[2])\n else:\n # print(f\"x {x.shape}\")\n # print(f\"raw_out {raw_out.shape}\")\n x_sim = x_sim.view(x.shape[0],1,x.shape[1])\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: B,*,oc \n # print(f\"E{self.a_bit - 1 - a_mantissa_bit}M{a_mantissa_bit}\")\n # print(f\"search act out_sim {out_sim.shape}\")\n # print(f\"search act out_sim {out_sim[0][2][0:10]}\")\n # print(f\"raw_out {raw_out[0][2][0:10]}\")\n similarity = self._get_similarity(raw_out, out_sim, self.metric, raw_grad) #B,*,oc\n # print(f\"activation similarity shape {similarity.shape}\")\n similarity = torch.mean(similarity)\n # print(f\"similarity: {similarity}\")\n similarities.append(similarity)\n similarities = torch.tensor(similarities)\n batch_similarities.append(similarities)\n \n batch_similarities = torch.vstack(batch_similarities)\n best_mantissa_bit = batch_similarities.sum(dim=0, keepdim=True).argmax(dim=1).item()\n \n if self.a_bit == 8:\n self.a_mantissa_bit = torch.tensor(best_mantissa_bit + 2).to(self.weight.device)\n self.a_exponent_bit = torch.tensor(self.a_bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n else:\n self.a_mantissa_bit = torch.tensor(best_mantissa_bit).to(self.weight.device)\n self.a_exponent_bit = torch.tensor(self.a_bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n self.a_interval = self.a_intervals[self.a_mantissa_bit]\n # print(f\"search result linear activation E{self.a_exponent_bit}M{self.a_mantissa_bit}\")\n # print(f\"after calibrate bias {self.w_interval[[0,10,30,40,50]]}\")\n # print(\"finish searching fp format for linear activation\")\n\n def calibration_step2(self):\n \"\"\"\n Only use cached raw inputs/outs/grads\n \"\"\"\n self._initialize_calib_parameters()\n self._initialize_intervals()\n\n # prepare weight intervals and similarities\n weight_interval_candidates = []\n if self.w_bit == 8:\n for m in range(self.w_bit-3):\n weight_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.w_intervals[m].unsqueeze(0)\n weight_interval_candidates.append(weight_interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n else:\n for m in range(self.w_bit-1):\n weight_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.w_intervals[m].unsqueeze(0)\n weight_interval_candidates.append(weight_interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n weight_interval_candidates = torch.vstack(weight_interval_candidates)\n\n input_interval_candidates = []\n if self.a_bit == 8:\n for m in range(self.a_bit-3): \n input_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(1,1,-1) * self.a_intervals[m].unsqueeze(-1)\n input_interval_candidates.append(input_interval_candidate.unsqueeze(0)) # shape: n_a,1,eq_n\n else:\n for m in range(self.a_bit-1): \n input_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(1,1,-1) * self.a_intervals[m].unsqueeze(-1)\n input_interval_candidates.append(input_interval_candidate.unsqueeze(0)) # shape: n_a,1,eq_n\n input_interval_candidates = torch.vstack(input_interval_candidates)\n \n for e in range(self.search_round):\n # search for best weight interval\n self._search_best_w_interval(weight_interval_candidates)\n # search for best weight format\n self._search_best_w_format()\n # search for best input interval\n self._search_best_a_interval(input_interval_candidates)\n # search for best input format\n self._search_best_a_format()\n\n\n self.calibrated = True\n del self.raw_input, self.raw_out, self.raw_grad\n return None"
},
{
"identifier": "FPPTQSLQuantEmbedding_fpq_baseline",
"path": "quant_layers/fp_embed.py",
"snippet": "class FPPTQSLQuantEmbedding_fpq_baseline(FPPTQSLQuantEmbedding):\n def __init__(self, \n num_embeddings: int,\n embedding_dim: int,\n padding_idx: int,\n mode = \"raw\",\n bit = 8,\n exponent_bit = 4,\n bias_bit = None,\n bias_correction = False,\n metric=\"L2_norm\", search_round=1, eq_alpha=0, eq_beta=1, eq_n=100, parallel_eq_n=1, n_H=1, n_V=1):\n super().__init__(num_embeddings, embedding_dim, padding_idx, mode=mode, bit=bit, exponent_bit= exponent_bit, bias_bit=bias_bit, bias_correction=bias_correction, metric=metric, search_round=search_round, eq_alpha=eq_alpha, eq_beta=eq_beta, eq_n=eq_n, parallel_eq_n=parallel_eq_n, n_H=n_H, n_V=n_V)\n self.maxval = None\n self.intervals = None\n\n def _initialize_intervals_eval(self):\n\n self.n_V = self.num_embeddings\n self.crb_rows = self.num_embeddings // self.n_V\n maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.maxval = maxval\n self.interval=(2**self.exponent_bit - torch.log2(maxval) + math.log2(2 - 2 ** (-self.mantissa_bit)) - 1)\n self.calibrated = True\n\n def _initialize_intervals(self):\n\n self.n_V = self.num_embeddings\n self.crb_rows = self.num_embeddings // self.n_V\n maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.maxval = maxval\n self.interval=(2**self.exponent_bit - torch.log2(maxval) + math.log2(2 - 2 ** (-self.mantissa_bit)) - 1)\n self.intervals = []\n if self.bit == 8: ## need to constrain the exponent as too big exponent bits will result in overflow\n # E7M0, E6M1, E5M2, E4M3, E3M4, E2M5, E1M6, start with E5M2 as E7M0 and E6M1 usually performs quite bad and results in overflow\n for i in range(self.bit-3):\n M = i + 2\n E = self.bit - 1 - M\n self.intervals.append(2**E - torch.log2(self.maxval) + math.log2(2 - 2 ** (-M)) - 1)\n\n else:\n for i in range(self.bit-1):\n M = i\n E = self.bit - 1 - M\n self.intervals.append(2**E - torch.log2(self.maxval) + math.log2(2 - 2 ** (-M)) - 1)\n\n def _get_similarity(self, tensor_raw, tensor_sim, metric=None):\n \"\"\"\n tensor_raw: *, features\n tensor_sim: *, features\n similarity: *\n It's your job to calculate mean on * dims!\n \"\"\"\n if metric == \"cosine\":\n similarity = F.cosine_similarity(tensor_raw, tensor_sim, dim=-1)\n else:\n if metric == \"L1_norm\":\n similarity = -torch.abs(tensor_raw - tensor_sim)\n elif metric == \"L2_norm\":\n similarity = -(tensor_raw - tensor_sim) ** 2\n elif metric == \"linear_weighted_L2_norm\":\n similarity = -tensor_raw.abs() * (tensor_raw - tensor_sim) ** 2\n elif metric == \"square_weighted_L2_norm\":\n similarity = -(tensor_raw * (tensor_raw - tensor_sim)) ** 2\n else:\n raise NotImplementedError(f\"metric {metric} not implemented!\")\n similarity = torch.mean(similarity, dim=-1)\n return similarity\n\n def _search_best_interval(self, interval_candidates):\n \n # print(f\"interval_candidates shape {interval_candidates.shape}\")\n for man in range(interval_candidates.shape[0]):\n tmp_interval = self.intervals[man].unsqueeze(0) # shape: 1,n_V,1,n_H,1\n for h in range(self.n_H):\n similarities = []\n for p_st in range(0,self.eq_n,self.parallel_eq_n):\n p_ed = min(self.eq_n, p_st+self.parallel_eq_n)\n cur_w_interval = tmp_interval.repeat(p_ed-p_st,1,1,1,1)\n cur_w_interval[:,:,:,h:h+1,:] = interval_candidates[man][p_st:p_ed,:,:,h:h+1,:]\n # quantize weight and bias \n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols).unsqueeze(0) # shape: 1,n_V,crb_rows,n_H,crb_cols\n \n if self.bit >= 8:\n w, cur_w_scale = self.get_scale(w_sim, bits = self.bit, mantissa_bit= man+2, bias= cur_w_interval)\n else:\n w, cur_w_scale = self.get_scale(w_sim, bits = self.bit, mantissa_bit= man, bias= cur_w_interval)\n\n w_sim = (w/cur_w_scale).round_().mul_(cur_w_scale) # shape: parallel_eq_n,n_V,crb_rows,n_H,crb_cols\n w_sim = w_sim.view(-1,self.num_embeddings,self.embedding_dim) # shape: parallel_eq_n*oc,ic\n \n\n similarity = self._get_similarity(self.weight.unsqueeze(0), w_sim, self.metric) # shape: B,*,parallel_eq_n,n_V\n if self.n_V == 1:\n similarity = similarity.sum(dim=1, keepdim=True)\n \n similarities.append(similarity)\n # store best weight interval of h into tmp_interval\n similarities = torch.cat(similarities, dim=0) # shape: eq_n, n_V\n h_best_index = similarities.argmax(dim=0).reshape(1,-1,1,1,1) # shape: 1,n_V,1,1,1\n tmp_interval[:,:,:,h:h+1,:] = torch.gather(interval_candidates[man][:,:,:,h:h+1,:],dim=0,index=h_best_index)\n self.intervals[man] = tmp_interval.squeeze(dim=0)\n\n def _search_best_format(self):\n \n # print(f\"before search linear weight E{self.w_exponent_bit}M{self.w_mantissa_bit}\")\n \n # format candidate\n if self.bit >= 8:\n mantissa_bits_candidate = [i for i in range(self.bit-3)]\n else:\n mantissa_bits_candidate = [i for i in range(self.bit-1)]\n \n similarities = []\n for mantissa_bit in mantissa_bits_candidate:\n if self.bit >= 8:\n shift_mantissa_bit = mantissa_bit + 2\n else:\n shift_mantissa_bit = mantissa_bit\n \n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols)\n w, cur_w_scale = self.get_scale(w_sim, bits = self.bit, mantissa_bit= shift_mantissa_bit, bias= self.intervals[mantissa_bit])\n \n w_sim = (w/cur_w_scale)\n \n w_sim = w_sim.round_().mul_(cur_w_scale)\n\n \n w_sim = w_sim.view(-1,self.num_embeddings,self.embedding_dim)\n\n similarity = self._get_similarity(self.weight.unsqueeze(0), w_sim, self.metric) #B,*,oc\n similarity = torch.mean(similarity) # shape: 1\n similarities.append(similarity)\n similarities = torch.tensor(similarities)\n best_mantissa_bit = similarities.argmax(dim=0).item()\n \n if self.bit >= 8:\n self.mantissa_bit = torch.tensor(best_mantissa_bit + 2).to(self.weight.device)\n self.exponent_bit = torch.tensor(self.bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n else:\n self.mantissa_bit = torch.tensor(best_mantissa_bit).to(self.weight.device) \n self.exponent_bit = torch.tensor(self.bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n self.interval = self.intervals[best_mantissa_bit]\n\n def calibration_step2(self):\n\n self._initialize_intervals()\n\n # prepare intervals and similarities\n interval_candidates = []\n if self.bit >=8:\n for m in range(self.bit-3): #m 2 ~ 6\n interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.intervals[m].unsqueeze(0)\n interval_candidates.append(interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n \n else:\n for m in range(self.bit-1): #m 0 ~ 6\n interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.intervals[m].unsqueeze(0)\n interval_candidates.append(interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n interval_candidates = torch.vstack(interval_candidates)\n\n for e in range(self.search_round):\n # search for best weight interval\n self._search_best_interval(interval_candidates)\n # search for best weight format\n self._search_best_format()\n\n print(f\"search format E{self.exponent_bit}M{self.mantissa_bit}\")\n\n self.calibrated = True\n return None"
}
] | from quant_layers.fp_linear import FPPTQSLBatchingQuantLinear_fpq_baseline
from quant_layers.fp_embed import FPPTQSLQuantEmbedding_fpq_baseline | 9,607 |
bit = 8
exp_bit = 4
embed_name_list = ["qembedding"]
fc_name_list = [ "qlinear_query", "qlinear_key", "qlinear_value", "qlinear_o","qlinear_gate","qlinear_down","qlinear_up","qlinear_score"]
matmul_name_list = [ "qmatmul_qk", "qmatmul_scorev"]
w_bit = {name: bit for name in fc_name_list}
a_bit = {name: bit for name in fc_name_list}
embed_bit = {name: bit for name in embed_name_list}
A_bit = {name: bit for name in matmul_name_list}
B_bit = {name: bit for name in matmul_name_list}
w_exp_bit = {name: exp_bit for name in fc_name_list}
a_exp_bit = {name: exp_bit for name in fc_name_list}
embed_exp_bit = {name: exp_bit for name in embed_name_list}
A_exp_bit = {name: exp_bit for name in matmul_name_list}
B_exp_bit = {name: exp_bit for name in matmul_name_list}
ptqsl_embedding_kwargs = {
"metric": "L2_norm",
"eq_alpha": 0.01,
"eq_beta": 1.2,
"eq_n": 100,
'search_round': 3,
"n_V": 1,
"n_H": 1
}
ptqsl_linear_kwargs = {
"metric": "L2_norm",
"eq_alpha": 0.01,
"eq_beta": 1.2,
"eq_n": 100,
'search_round': 3,
"n_V": 1,
"n_H": 1,
"n_a": 1,
"bias_correction":True # Conventionally I'll not add an actual bias correction in linear
}
def get_module(module_type, *args, **kwargs):
if "embedding" in module_type:
kwargs.update(ptqsl_embedding_kwargs)
|
bit = 8
exp_bit = 4
embed_name_list = ["qembedding"]
fc_name_list = [ "qlinear_query", "qlinear_key", "qlinear_value", "qlinear_o","qlinear_gate","qlinear_down","qlinear_up","qlinear_score"]
matmul_name_list = [ "qmatmul_qk", "qmatmul_scorev"]
w_bit = {name: bit for name in fc_name_list}
a_bit = {name: bit for name in fc_name_list}
embed_bit = {name: bit for name in embed_name_list}
A_bit = {name: bit for name in matmul_name_list}
B_bit = {name: bit for name in matmul_name_list}
w_exp_bit = {name: exp_bit for name in fc_name_list}
a_exp_bit = {name: exp_bit for name in fc_name_list}
embed_exp_bit = {name: exp_bit for name in embed_name_list}
A_exp_bit = {name: exp_bit for name in matmul_name_list}
B_exp_bit = {name: exp_bit for name in matmul_name_list}
ptqsl_embedding_kwargs = {
"metric": "L2_norm",
"eq_alpha": 0.01,
"eq_beta": 1.2,
"eq_n": 100,
'search_round': 3,
"n_V": 1,
"n_H": 1
}
ptqsl_linear_kwargs = {
"metric": "L2_norm",
"eq_alpha": 0.01,
"eq_beta": 1.2,
"eq_n": 100,
'search_round': 3,
"n_V": 1,
"n_H": 1,
"n_a": 1,
"bias_correction":True # Conventionally I'll not add an actual bias correction in linear
}
def get_module(module_type, *args, **kwargs):
if "embedding" in module_type:
kwargs.update(ptqsl_embedding_kwargs) | module= FPPTQSLQuantEmbedding_fpq_baseline(*args,**kwargs,bit= embed_bit[module_type], exponent_bit=embed_exp_bit[module_type], padding_idx=0) | 1 | 2023-10-15 06:05:13+00:00 | 12k |
bcmi/libcom | libcom/shadow_generation/source/PostProcessModel.py | [
{
"identifier": "ControlLDM",
"path": "libcom/shadow_generation/source/cldm/cldm.py",
"snippet": "class ControlLDM(LatentDiffusion):\n\n def __init__(self, control_stage_config, control_key, only_mid_control, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.control_model = instantiate_from_config(control_stage_config)\n self.control_key = control_key\n self.shadow_mask = \"mask\"\n self.only_mid_control = only_mid_control\n self.control_scales = [1.0] * 13\n self.LGP = latent_guidance_predictor(output_chan=1, input_chan=2240, num_encodings=9)\n\n @torch.no_grad()\n def get_input(self, batch, k, bs=None, *args, **kwargs):\n x, c = super().get_input(batch, self.first_stage_key, *args, **kwargs)\n control = batch[self.control_key]\n if bs is not None:\n control = control[:bs]\n control = control.to(self.device)\n control = einops.rearrange(control, 'b h w c -> b c h w')\n control = control.to(memory_format=torch.contiguous_format).float()\n mask = batch[self.shadow_mask]\n # mask = None\n return x, dict(c_crossattn=[c], c_concat=[control]), mask\n\n def apply_model(self, x_noisy, t, cond, *args, **kwargs):\n assert isinstance(cond, dict)\n diffusion_model = self.model.diffusion_model\n\n cond_txt = torch.cat(cond['c_crossattn'], 1)\n\n if cond['c_concat'] is None:\n eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=None, only_mid_control=self.only_mid_control)\n else:\n control = self.control_model(x=x_noisy, hint=torch.cat(cond['c_concat'], 1), timesteps=t, context=cond_txt)\n control = [c * scale for c, scale in zip(control, self.control_scales)]\n eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=control, only_mid_control=self.only_mid_control)\n\n return eps\n '''\n def training_step(self, batch, batch_idx):\n self.LGP.train()\n x, c, mask= self.get_input(batch, self.first_stage_key)\n features, encoded_edge_maps, noise_levels = [], [], []\n save_hook = save_out_hook\n blocks = [0,1,2,3]\n self.feature_blocks = []\n batch_size = batch['mask'].shape[0]\n if batch_size != 2:\n return None\n comp_img = batch['hint'][:, :, :, :3].permute(0,3,1,2)\n\n for idx, block in enumerate(self.model.diffusion_model.down_blocks):\n if idx in blocks:\n h=block.register_forward_hook(save_hook)\n self.feature_blocks.append([block,h]) \n \n # for idx, block in enumerate(self.model.diffusion_model.up_blocks):\n # if idx in blocks:\n # h=block.register_forward_hook(save_hook)\n # self.feature_blocks.append([block,h]) \n\n loss_noise1, _ = self(x, c, mask)\n\n loss_noise2, _, pred_x0 = self(x, c, mask, train_mask_only=True)\n\n activations = []\n\n for block,h in self.feature_blocks:\n activations.append(block.activations)\n block.activations = None\n h.remove()\n\n features = resize_and_concatenate(activations, x)\n\n gt_mask = batch[\"gt_mask\"].unsqueeze(1)\n\n predicted_mask = self.LGP(features)\n predicted_mask = predicted_mask.view(batch_size,1,64,64)\n\n loss_mask = nn.functional.mse_loss(predicted_mask, gt_mask, reduction='none').mean()\n\n predicted_mask = nn.functional.interpolate(\n predicted_mask.detach(), size=(512,512), mode=\"bilinear\"\n )\n predicted_mask = torch.greater_equal(predicted_mask, 0.6).int()\n\n cv2.imwrite(\"./pred_mask.png\", np.array(predicted_mask[0].squeeze(0).to('cpu') * 255))\n\n # pred_img = self.decode_first_stage_with_grad(pred_x0) * predicted_mask + (1-predicted_mask) * comp_img\n\n # loss_img = nn.functional.mse_loss(pred_img, batch['jpg'].permute(0,3,1,2), reduction='none').mean()\n\n loss = loss_noise1 + loss_noise2 + loss_mask\n\n return loss\n '''\n\n @torch.no_grad()\n def get_unconditional_conditioning(self, N):\n return self.get_learned_conditioning([\"\"] * N)\n\n @torch.no_grad()\n def log_images(self, batch, N=16, n_row=2, sample=False, ddim_steps=50, ddim_eta=0.0, return_keys=None,\n quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,\n plot_diffusion_rows=False, unconditional_guidance_scale=9.0, unconditional_guidance_label=None,\n use_ema_scope=True, mode='ddim', input=None, add_noise_strength=1, \n **kwargs):\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, _= self.get_input(batch, self.first_stage_key, bs=N)\n c_cat, c = c[\"c_concat\"][0][:N], c[\"c_crossattn\"][0][:N]\n N = min(z.shape[0], N)\n n_row = min(z.shape[0], n_row)\n log[\"reconstruction\"] = self.decode_first_stage(z)\n log[\"control\"] = c_cat * 2.0 - 1.0\n log[\"conditioning\"] = log_txt_as_img((512, 512), batch[self.cond_stage_key], size=16)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), '1 -> b', b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')\n diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n samples, z_denoise_row = self.sample_log(cond={\"c_concat\": [c_cat], \"c_crossattn\": [c]},\n batch_size=N, mode=mode,\n ddim_steps=ddim_steps, eta=ddim_eta,\n input=input, add_noise_strength=add_noise_strength)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if unconditional_guidance_scale > 1.0:\n uc_cross = self.get_unconditional_conditioning(N)\n uc_cat = c_cat # torch.zeros_like(c_cat)\n uc_full = {\"c_concat\": [uc_cat], \"c_crossattn\": [uc_cross]}\n samples_cfg, _ = self.sample_log(cond={\"c_concat\": [c_cat], \"c_crossattn\": [c]},\n batch_size=N, mode=mode,ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc_full,\n input=input, add_noise_strength=add_noise_strength\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"] = x_samples_cfg\n\n return log\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, mode, ddim_steps, input, add_noise_strength, **kwargs):\n ddim_sampler = DDIMSampler(self)\n pndm_sampler = PNDMSampler(self)\n b, c, h, w = cond[\"c_concat\"][0].shape\n shape = (self.channels, h // 8, w // 8)\n if mode == 'ddim':\n samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs)\n elif mode == 'pndm':\n samples, intermediates = pndm_sampler.sample(ddim_steps, batch_size, shape, cond, \n verbose=False, input=input,\n strength=add_noise_strength, **kwargs)\n return samples, intermediates\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = list(self.control_model.parameters())\n if not self.sd_locked:\n params += list(self.model.diffusion_model.output_blocks.parameters())\n params += list(self.model.diffusion_model.out.parameters())\n opt = torch.optim.AdamW(params, lr=lr)\n return opt"
},
{
"identifier": "create_model",
"path": "libcom/shadow_generation/source/cldm/model.py",
"snippet": "def create_model(config_path):\n config = OmegaConf.load(config_path) if isinstance(config_path, str) else config_path\n model = instantiate_from_config(config.model).cpu()\n return model"
},
{
"identifier": "load_state_dict",
"path": "libcom/shadow_generation/source/cldm/model.py",
"snippet": "def load_state_dict(ckpt_path, location='cpu'):\n _, extension = os.path.splitext(ckpt_path)\n if extension.lower() == \".safetensors\":\n import safetensors.torch\n state_dict = safetensors.torch.load_file(ckpt_path, device=location)\n else:\n state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location)))\n state_dict = get_state_dict(state_dict)\n return state_dict"
},
{
"identifier": "PostProcessLogger",
"path": "libcom/shadow_generation/source/cldm/logger.py",
"snippet": "class PostProcessLogger(Callback):\n def __init__(self, batch_frequency=2000, max_images=4, log_num=1, clamp=True, increase_log_steps=True,\n rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False,\n log_images_kwargs=None):\n super().__init__()\n self.rescale = rescale\n self.batch_freq = batch_frequency\n self.max_images = max_images\n self.log_num = log_num\n if not increase_log_steps:\n self.log_steps = [self.batch_freq]\n self.clamp = clamp\n self.disabled = disabled\n self.log_on_batch_idx = log_on_batch_idx\n self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {}\n self.log_first_step = log_first_step\n\n @rank_zero_only\n def log_img(self, pl_module, batch, batch_idx):\n check_idx = batch_idx # if self.log_on_batch_idx else pl_module.global_step\n if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0\n hasattr(pl_module, \"get_log\")):\n log_info = pl_module.get_log(batch, batch_idx, self.log_num)\n img_size = 256\n width = len(log_info) * img_size\n height = img_size\n\n for i in range(self.log_num):\n x_offset = 0\n img_to_save = Image.new(\"RGB\", size = (width, height))\n draw = ImageDraw.Draw(img_to_save)\n font = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuMathTeXGyre.ttf\", size=40)\n for title, imgs in log_info.items():\n img = Image.fromarray(np.array(imgs[i], dtype=np.uint8))\n img_to_save.paste(img, (x_offset, 0))\n draw.text((x_offset ,0), title, fill=\"red\", font=font)\n x_offset += img_size\n filename = \"gs-{:06}_e-{:06}_b-{:06}_{}.png\".format(pl_module.global_step, pl_module.current_epoch, batch_idx, i)\n root = os.path.join(pl_module.logger.save_dir, \"ppp_log\")\n os.makedirs(root, exist_ok=True)\n save_path = os.path.join(root, filename)\n img_to_save.save(save_path)\n\n def check_frequency(self, check_idx):\n return check_idx % self.batch_freq == 0\n\n def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):\n if not self.disabled:\n self.log_img(pl_module, batch, batch_idx)"
},
{
"identifier": "ResBlock",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/openaimodel.py",
"snippet": "def convert_module_to_f16(x):\ndef convert_module_to_f32(x):\n def __init__(\n self,\n spacial_dim: int,\n embed_dim: int,\n num_heads_channels: int,\n output_dim: int = None,\n ):\n def forward(self, x):\n def forward(self, x, emb):\n def forward(self, x, emb, context=None):\n def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):\n def forward(self, x):\n def __init__(self, channels, out_channels=None, ks=5):\n def forward(self,x):\n def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):\n def forward(self, x):\n def __init__(\n self,\n channels,\n emb_channels,\n dropout,\n out_channels=None,\n use_conv=False,\n use_scale_shift_norm=False,\n dims=2,\n use_checkpoint=False,\n up=False,\n down=False,\n ):\n def forward(self, x, emb):\n def _forward(self, x, emb):\n def __init__(\n self,\n channels,\n num_heads=1,\n num_head_channels=-1,\n use_checkpoint=False,\n use_new_attention_order=False,\n ):\n def forward(self, x):\n def _forward(self, x):\ndef count_flops_attn(model, _x, y):\n def __init__(self, n_heads):\n def forward(self, qkv):\n def count_flops(model, _x, y):\n def __init__(self, n_heads):\n def forward(self, qkv):\n def count_flops(model, _x, y):\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n disable_self_attentions=None,\n num_attention_blocks=None,\n disable_middle_self_attn=False,\n use_linear_in_transformer=False,\n ):\n def convert_to_fp16(self):\n def convert_to_fp32(self):\n def forward(self, x, timesteps=None, context=None, y=None,**kwargs):\nclass AttentionPool2d(nn.Module):\nclass TimestepBlock(nn.Module):\nclass TimestepEmbedSequential(nn.Sequential, TimestepBlock):\nclass Upsample(nn.Module):\nclass TransposedUpsample(nn.Module):\nclass Downsample(nn.Module):\nclass ResBlock(TimestepBlock):\nclass AttentionBlock(nn.Module):\nclass QKVAttentionLegacy(nn.Module):\nclass QKVAttention(nn.Module):\nclass UNetModel(nn.Module):"
},
{
"identifier": "checkpoint",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)"
},
{
"identifier": "conv_nd",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py",
"snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "linear",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py",
"snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)"
},
{
"identifier": "avg_pool_nd",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py",
"snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "zero_module",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py",
"snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module"
},
{
"identifier": "normalization",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py",
"snippet": "def normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)"
},
{
"identifier": "timestep_embedding",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py",
"snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding"
},
{
"identifier": "exists",
"path": "libcom/shadow_generation/source/ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
}
] | from torch import nn
from .cldm.cldm import ControlLDM
from .cldm.model import create_model, load_state_dict
from torch.utils.data import DataLoader
from .cldm.logger import PostProcessLogger
from PIL import Image
from libcom.shadow_generation.source.ldm.modules.diffusionmodules.openaimodel import (ResBlock, TimestepEmbedSequential, AttentionBlock,
Upsample, SpatialTransformer, Downsample)
from libcom.shadow_generation.source.ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from libcom.shadow_generation.source.ldm.util import exists
import torch
import pytorch_lightning as pl
import os
import cv2
import numpy as np | 7,271 |
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
use_checkpoint=use_checkpoint
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks1 = nn.ModuleList([])
self.output_blocks2 = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(self.num_res_blocks[level] + 1):
ich = input_block_chans.pop()
layers1 = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
layers2 = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = model_channels * mult
if ds in attention_resolutions:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
if exists(disable_self_attentions):
disabled_sa = disable_self_attentions[level]
else:
disabled_sa = False
if not exists(num_attention_blocks) or i < num_attention_blocks[level]:
layers1.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
use_checkpoint=use_checkpoint
)
)
layers2.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
use_checkpoint=use_checkpoint
)
)
if level and i == self.num_res_blocks[level]:
out_ch = ch
layers1.append(ResBlock(ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
) if resblock_updown else
| # from share import *
class Post_Process_Net(nn.Module):
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=-1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
use_spatial_transformer=False, # custom transformer support
transformer_depth=1, # custom transformer support
context_dim=None, # custom transformer support
n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
legacy=True,
disable_self_attentions=None,
num_attention_blocks=None,
disable_middle_self_attn=False,
use_linear_in_transformer=False,
):
super().__init__()
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
if isinstance(num_res_blocks, int):
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
else:
if len(num_res_blocks) != len(channel_mult):
raise ValueError("provide num_res_blocks either as an int (globally constant) or "
"as a list/tuple (per-level) with the same length as channel_mult")
self.num_res_blocks = num_res_blocks
if disable_self_attentions is not None:
# should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
assert len(disable_self_attentions) == len(channel_mult)
if num_attention_blocks is not None:
assert len(num_attention_blocks) == len(self.num_res_blocks)
assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
f"attention will still not be set.")
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = torch.float16 if use_fp16 else torch.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
self.predict_codebook_ids = n_embed is not None
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
if isinstance(self.num_classes, int):
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
elif self.num_classes == "continuous":
print("setting up linear c_adm embedding layer")
self.label_emb = nn.Linear(1, time_embed_dim)
else:
raise ValueError()
self.input_blocks = nn.ModuleList(
[
TimestepEmbedSequential(
conv_nd(dims, in_channels, model_channels, 3, padding=1)
)
]
)
self._feature_size = model_channels
input_block_chans = [model_channels]
ch = model_channels
ds = 1
for level, mult in enumerate(channel_mult):
for nr in range(self.num_res_blocks[level]):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=mult * model_channels,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = mult * model_channels
if ds in attention_resolutions:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
if exists(disable_self_attentions):
disabled_sa = disable_self_attentions[level]
else:
disabled_sa = False
if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
use_checkpoint=use_checkpoint
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
if resblock_updown:
stage_last_block = ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
else:
stage_last_block = Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
self.input_blocks.append(
TimestepEmbedSequential(stage_last_block)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
use_checkpoint=use_checkpoint
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks1 = nn.ModuleList([])
self.output_blocks2 = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(self.num_res_blocks[level] + 1):
ich = input_block_chans.pop()
layers1 = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
layers2 = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = model_channels * mult
if ds in attention_resolutions:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
if exists(disable_self_attentions):
disabled_sa = disable_self_attentions[level]
else:
disabled_sa = False
if not exists(num_attention_blocks) or i < num_attention_blocks[level]:
layers1.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
use_checkpoint=use_checkpoint
)
)
layers2.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
use_checkpoint=use_checkpoint
)
)
if level and i == self.num_res_blocks[level]:
out_ch = ch
layers1.append(ResBlock(ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
) if resblock_updown else | Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)) | 4 | 2023-10-19 05:08:12+00:00 | 12k |
e4s2023/E4S2023 | swap_face_fine/defliker/src/stage1_neural_atlas.py | [
{
"identifier": "IMLP",
"path": "swap_face_fine/defliker/src/models/stage_1/implicit_neural_networks.py",
"snippet": "class IMLP(nn.Module):\n def __init__(\n self,\n input_dim,\n output_dim,\n hidden_dim=256,\n use_positional=True,\n positional_dim=10,\n skip_layers=[4, 6],\n num_layers=8, # includes the output layer\n verbose=True,use_tanh=True,apply_softmax=False):\n super(IMLP, self).__init__()\n self.verbose = verbose\n self.use_tanh = use_tanh\n self.apply_softmax = apply_softmax\n if apply_softmax:\n self.softmax= nn.Softmax()\n if use_positional:\n encoding_dimensions = 2 * input_dim * positional_dim\n self.b = torch.tensor([(2 ** j) * np.pi for j in range(positional_dim)],requires_grad = False)\n else:\n encoding_dimensions = input_dim\n\n self.hidden = nn.ModuleList()\n for i in range(num_layers):\n if i == 0:\n input_dims = encoding_dimensions\n elif i in skip_layers:\n input_dims = hidden_dim + encoding_dimensions\n else:\n input_dims = hidden_dim\n\n if i == num_layers - 1:\n # last layer\n self.hidden.append(nn.Linear(input_dims, output_dim, bias=True))\n else:\n self.hidden.append(nn.Linear(input_dims, hidden_dim, bias=True))\n\n self.skip_layers = skip_layers\n self.num_layers = num_layers\n\n self.positional_dim = positional_dim\n self.use_positional = use_positional\n\n if self.verbose:\n print(f'Model has {count_parameters(self)} params')\n\n def forward(self, x):\n if self.use_positional:\n if self.b.device!=x.device:\n self.b=self.b.to(x.device)\n pos = positionalEncoding_vec(x,self.b)\n x = pos\n\n input = x.detach().clone()\n for i, layer in enumerate(self.hidden):\n if i > 0:\n x = F.relu(x)\n if i in self.skip_layers:\n x = torch.cat((x, input), 1)\n x = layer(x)\n if self.use_tanh:\n x = torch.tanh(x)\n\n if self.apply_softmax:\n x = self.softmax(x)\n return x"
},
{
"identifier": "evaluate_model_single",
"path": "swap_face_fine/defliker/src/models/stage_1/evaluate.py",
"snippet": "def evaluate_model_single(model_F_atlas, resx, resy, number_of_frames, model_F_mapping1,video_frames,\n results_folder, iteration, mask_frames, optimizer_all, writer, vid_name, derivative_amount,\n uv_mapping_scale, optical_flows, optical_flows_mask, device,\n save_checkpoint=True, show_atlas_alpha=False): #\n\n os.makedirs(os.path.join(results_folder, '%06d' % iteration), exist_ok = True)\n os.makedirs(os.path.join(results_folder, \"output\"), exist_ok = True)\n evaluation_folder = os.path.join(results_folder, '%06d' % iteration)\n resx = np.int64(resx)\n resy = np.int64(resy)\n larger_dim = np.maximum(resx, resy)\n if save_checkpoint:\n torch.save({\n 'F_atlas_state_dict': model_F_atlas.state_dict(),\n 'iteration': iteration,\n 'model_F_mapping1_state_dict': model_F_mapping1.state_dict(),\n 'optimizer_all_state_dict': optimizer_all.state_dict()\n }, '%s/checkpoint' % (results_folder))\n \n minx = 0\n miny = 0\n edge_size = 1\n \n video_frames_reconstruction = np.zeros((resy, resx, 3, number_of_frames))\n \n flow_loss1_video = np.zeros((resy, resx, number_of_frames))\n\n rigidity_loss1_video = np.zeros((resy, resx, number_of_frames))\n rgb_error_video = np.zeros((resy, resx, number_of_frames))\n rgb_residual_video = np.zeros((resy, resx, 3, number_of_frames))\n\n uv1_frames_reconstruction = np.zeros((resy, resx, 3, number_of_frames))\n\n all_masks1 = np.zeros((1000, 1000, number_of_frames))\n\n with torch.no_grad():\n for f in range(number_of_frames):\n print(f)\n\n relis_i, reljs_i = torch.where(torch.ones(resy, resx) > 0)\n\n # split the coordinates of the entire image such that no more than 100k coordinates in each batch\n relisa = np.array_split(relis_i.numpy(), np.ceil(relis_i.shape[0] / 100000))\n reljsa = np.array_split(reljs_i.numpy(), np.ceil(relis_i.shape[0] / 100000))\n\n for i in range(len(relisa)):\n relis = torch.from_numpy(relisa[i]).unsqueeze(1) / (larger_dim / 2) - 1\n reljs = torch.from_numpy(reljsa[i]).unsqueeze(1) / (larger_dim / 2) - 1\n # Map video indices to uv coordinates using the two mapping networks:\n uv_temp1 = model_F_mapping1(\n torch.cat((reljs, relis,\n (f / (number_of_frames / 2.0) - 1) * torch.ones_like(relis)),\n dim=1).to(device))\n \n # Sample RGB values from the atlas:\n rgb_current1 = model_F_atlas(uv_temp1 * 0.5 + 0.5)\n rgb_current1 = (rgb_current1 + 1) * 0.5\n\n alpha = torch.ones(rgb_current1.shape[0], 1).to(device)\n\n # pixels reconstruction from the MLPs:\n rgb_current = rgb_current1\n \n jif_foreground = torch.cat((torch.from_numpy(reljsa[i]).unsqueeze(-1),\n torch.from_numpy(relisa[i]).unsqueeze(-1),\n torch.ones_like(torch.from_numpy(relisa[i]).unsqueeze(-1)) * f),\n dim=1).T.unsqueeze(-1)\n\n # reconstruct rigidity losses for visualization:\n rigidity_loss1 = get_rigidity_loss(\n jif_foreground,\n derivative_amount,\n larger_dim,\n number_of_frames,\n model_F_mapping1,\n uv_temp1, device,\n uv_mapping_scale=uv_mapping_scale, return_all=True)\n \n # Reconstruct flow losses for visualization:\n if f < (number_of_frames - 1):\n flow_loss1 = get_optical_flow_loss_all(\n jif_foreground, uv_temp1, larger_dim,\n number_of_frames, model_F_mapping1, optical_flows, optical_flows_mask, uv_mapping_scale, device,\n alpha=alpha)\n \n else: # for not calculating the optical flow between the last frame and the next non-existing frame\n flow_loss1 = torch.zeros_like(relis).squeeze()\n\n \n # Same uv values from each frame for visualization:\n uv_temp1 = uv_temp1.detach().cpu()\n\n uv1_frames_reconstruction[relisa[i], reljsa[i], 0, f] = uv_temp1[:, 0]\n uv1_frames_reconstruction[relisa[i], reljsa[i], 1, f] = uv_temp1[:, 1]\n\n video_frames_reconstruction[relisa[i], reljsa[i], :, f] = rgb_current.detach().cpu(\n ).numpy()\n \n flow_loss1_video[relisa[i], reljsa[i], f] = flow_loss1.cpu().numpy()\n rigidity_loss1_video[relisa[i], reljsa[i], f] = rigidity_loss1.cpu().numpy()\n rgb_error_video[relisa[i], reljsa[i], f] = (\n (video_frames[relisa[i], reljsa[i], :, f] - rgb_current.cpu()).norm(dim=1) ** 2).numpy()\n rgb_residual_video[relisa[i], reljsa[i], :, f] = (\n (video_frames[relisa[i], reljsa[i], :, f] - rgb_current.cpu())).numpy()\n\n uv1_frames_reconstruction = normalize_uv_images(uv1_frames_reconstruction, 0.5, edge_size, minx, miny)\n\n Path(evaluation_folder).mkdir(parents=True, exist_ok=True)\n\n writer_im_rec = imageio.get_writer(\n \"%s/reconstruction_%s.mp4\" % (evaluation_folder, vid_name), fps=10)\n\n writer_residuals = imageio.get_writer(\n \"%s/residuals_%s.mp4\" % (evaluation_folder, vid_name), fps=10)\n\n writer_uv_1 = imageio.get_writer(\n \"%s/uv_1_%s.mp4\" % (evaluation_folder, vid_name),\n fps=10)\n\n writer_global_info = imageio.get_writer(\n \"%s/global_info_%s.mp4\" % (evaluation_folder, vid_name), fps=10)\n\n pnsrs = np.zeros((number_of_frames, 1))\n # save evaluation videos:\n for i in range(number_of_frames):\n print(i)\n # save image\n svae_image_path = os.path.join(results_folder, 'output', '%05d.png' % i)\n imageio.imwrite(svae_image_path, (video_frames_reconstruction[:, :, :, i] * (255)).astype(np.uint8))\n \n writer_im_rec.append_data((video_frames_reconstruction[:, :, :, i] * (255)).astype(np.uint8))\n writer_residuals.append_data(((rgb_residual_video[:, :, :, i] + 0.5) * 255).astype(np.uint8))\n\n writer_uv_1.append_data((uv1_frames_reconstruction[:, :, :, i] * (255)).astype(np.uint8))\n\n pnsrs[i] = skimage.metrics.peak_signal_noise_ratio(\n video_frames[:, :, :, i].numpy(),\n video_frames_reconstruction[:, :, :, i],\n data_range=1)\n\n fig = plt.figure(figsize=(20, 10))\n plt.subplot(3, 4, 3)\n plt.imshow(rgb_error_video[:, :, i], vmin=0.0, vmax=0.2)\n plt.colorbar()\n plt.title(\"RGB error\")\n\n plt.subplot(3, 4, 12)\n plt.imshow(rigidity_loss1_video[:, :, i], vmin=2.8, vmax=50.0)\n plt.colorbar()\n plt.title(\"rigidity_loss1\")\n\n plt.subplot(3, 4, 9)\n plt.imshow(flow_loss1_video[:, :, i], vmin=0.0, vmax=2.0)\n plt.colorbar()\n plt.title(\"flow_loss1\")\n\n plt.subplot(3, 4, 1)\n plt.imshow(video_frames_reconstruction[:, :, :, i], vmin=0.0, vmax=1.0)\n plt.colorbar()\n plt.title(\"video_reconstruction\")\n\n plt.subplot(3, 4, 2)\n plt.imshow(video_frames[:, :, :, i].numpy(), vmin=0.0, vmax=1.0)\n plt.colorbar()\n plt.title(\"original_video\")\n\n imm = get_img_from_fig(fig)\n writer_global_info.append_data(imm)\n plt.close(fig)\n\n print(pnsrs.mean())\n writer_im_rec.close()\n writer_global_info.close()\n writer_residuals.close()\n writer_uv_1.close()\n\n # save the psnr result as the name of a dummy file\n file1 = open('%s/%06d/PSNR_%f' % (results_folder, iteration, pnsrs.mean()), \"a\")\n file1.close()\n if save_checkpoint:\n writer.add_image(\n \"Train/recon_frame_0\",\n video_frames_reconstruction[:, :, :, 0],\n iteration,\n dataformats='HWC')\n writer.add_image(\n \"Train/recon_frame_end\",\n video_frames_reconstruction[:, :, :, -1],\n iteration,\n dataformats='HWC')"
},
{
"identifier": "get_gradient_loss_single",
"path": "swap_face_fine/defliker/src/models/stage_1/loss_utils.py",
"snippet": "def get_gradient_loss_single(video_frames_dx, video_frames_dy, jif_current,\n model_F_mapping1, model_F_atlas,\n rgb_output_foreground, device,resx,number_of_frames):\n xplus1yt_foreground = torch.cat(\n ((jif_current[0, :] + 1) / (resx / 2) - 1, jif_current[1, :] / (resx / 2) - 1,\n jif_current[2, :] / (number_of_frames / 2.0) - 1),\n dim=1).to(device)\n\n xyplus1t_foreground = torch.cat(\n ((jif_current[0, :]) / (resx / 2) - 1, (jif_current[1, :] + 1) / (resx / 2) - 1,\n jif_current[2, :] / (number_of_frames / 2.0) - 1),\n dim=1).to(device)\n\n # precomputed discrete derivative with respect to x,y direction\n rgb_dx_gt = video_frames_dx[jif_current[1, :], jif_current[0, :], :,\n jif_current[2, :]].squeeze(1).to(device)\n rgb_dy_gt = video_frames_dy[jif_current[1, :], jif_current[0, :], :,\n jif_current[2, :]].squeeze(1).to(device)\n\n # uv coordinates for locations with offsets of 1 pixel\n uv_foreground1_xyplus1t = model_F_mapping1(xyplus1t_foreground)\n uv_foreground1_xplus1yt = model_F_mapping1(xplus1yt_foreground)\n\n # The RGB values (from the 2 layers) for locations with offsets of 1 pixel\n rgb_output1_xyplus1t = (model_F_atlas(uv_foreground1_xyplus1t * 0.5 + 0.5) + 1.0) * 0.5\n rgb_output1_xplus1yt = (model_F_atlas(uv_foreground1_xplus1yt * 0.5 + 0.5) + 1.0) * 0.5\n\n # Reconstructed RGB values:\n rgb_output_foreground_xyplus1t = rgb_output1_xyplus1t\n rgb_output_foreground_xplus1yt = rgb_output1_xplus1yt\n\n # Use reconstructed RGB values for computing derivatives:\n rgb_dx_output = rgb_output_foreground_xplus1yt - rgb_output_foreground\n rgb_dy_output = rgb_output_foreground_xyplus1t - rgb_output_foreground\n gradient_loss = torch.mean(\n (rgb_dx_gt - rgb_dx_output).norm(dim=1) ** 2 + (rgb_dy_gt - rgb_dy_output).norm(dim=1) ** 2)\n return gradient_loss"
},
{
"identifier": "get_rigidity_loss",
"path": "swap_face_fine/defliker/src/models/stage_1/loss_utils.py",
"snippet": "def get_rigidity_loss(jif_foreground, derivative_amount, resx, number_of_frames, model_F_mapping, uv_foreground, device,\n uv_mapping_scale=1.0, return_all=False):\n # concatenating (x,y-derivative_amount,t) and (x-derivative_amount,y,t) to get xyt_p:\n is_patch = torch.cat((jif_foreground[1, :] - derivative_amount, jif_foreground[1, :])) / (resx / 2) - 1\n js_patch = torch.cat((jif_foreground[0, :], jif_foreground[0, :] - derivative_amount)) / (resx / 2) - 1\n fs_patch = torch.cat((jif_foreground[2, :], jif_foreground[2, :])) / (number_of_frames / 2.0) - 1\n xyt_p = torch.cat((js_patch, is_patch, fs_patch), dim=1).to(device)\n\n uv_p = model_F_mapping(xyt_p)\n u_p = uv_p[:, 0].view(2, -1) # u_p[0,:]= u(x,y-derivative_amount,t). u_p[1,:]= u(x-derivative_amount,y,t)\n v_p = uv_p[:, 1].view(2, -1) # v_p[0,:]= u(x,y-derivative_amount,t). v_p[1,:]= v(x-derivative_amount,y,t)\n\n u_p_d_ = uv_foreground[:, 0].unsqueeze(\n 0) - u_p # u_p_d_[0,:]=u(x,y,t)-u(x,y-derivative_amount,t) u_p_d_[1,:]= u(x,y,t)-u(x-derivative_amount,y,t).\n v_p_d_ = uv_foreground[:, 1].unsqueeze(\n 0) - v_p # v_p_d_[0,:]=u(x,y,t)-v(x,y-derivative_amount,t). v_p_d_[1,:]= u(x,y,t)-v(x-derivative_amount,y,t).\n\n # to match units: 1 in uv coordinates is resx/2 in image space.\n du_dx = u_p_d_[1, :] * resx / 2\n du_dy = u_p_d_[0, :] * resx / 2\n dv_dy = v_p_d_[0, :] * resx / 2\n dv_dx = v_p_d_[1, :] * resx / 2\n\n jacobians = torch.cat((torch.cat((du_dx.unsqueeze(-1).unsqueeze(-1), du_dy.unsqueeze(-1).unsqueeze(-1)), dim=2),\n torch.cat((dv_dx.unsqueeze(-1).unsqueeze(-1), dv_dy.unsqueeze(-1).unsqueeze(-1)),\n dim=2)),\n dim=1)\n jacobians = jacobians / uv_mapping_scale\n jacobians = jacobians / derivative_amount\n\n # Apply a loss to constrain the Jacobian to be a rotation matrix as much as possible\n JtJ = torch.matmul(jacobians.transpose(1, 2), jacobians)\n\n a = JtJ[:, 0, 0] + 0.001\n b = JtJ[:, 0, 1]\n c = JtJ[:, 1, 0]\n d = JtJ[:, 1, 1] + 0.001\n\n JTJinv = torch.zeros_like(jacobians).to(device)\n JTJinv[:, 0, 0] = d\n JTJinv[:, 0, 1] = -b\n JTJinv[:, 1, 0] = -c\n JTJinv[:, 1, 1] = a\n JTJinv = JTJinv / ((a * d - b * c).unsqueeze(-1).unsqueeze(-1))\n\n # See Equation (9) in the paper:\n rigidity_loss = (JtJ ** 2).sum(1).sum(1).sqrt() + (JTJinv ** 2).sum(1).sum(1).sqrt()\n\n if return_all:\n return rigidity_loss\n else:\n return rigidity_loss.mean()"
},
{
"identifier": "get_optical_flow_loss",
"path": "swap_face_fine/defliker/src/models/stage_1/loss_utils.py",
"snippet": "def get_optical_flow_loss(jif_foreground, uv_foreground, optical_flows_reverse, optical_flows_reverse_mask, resx,\n number_of_frames, model_F_mapping, optical_flows, optical_flows_mask, uv_mapping_scale,\n device, use_alpha=False, alpha=1.0):\n # Forward flow:\n uv_foreground_forward_relevant, xyt_foreground_forward_should_match, relevant_batch_indices_forward = get_corresponding_flow_matches(\n jif_foreground, optical_flows_mask, optical_flows, resx, number_of_frames, True, uv_foreground)\n uv_foreground_forward_should_match = model_F_mapping(xyt_foreground_forward_should_match.to(device))\n loss_flow_next = (uv_foreground_forward_should_match - uv_foreground_forward_relevant).norm(dim=1) * resx / (\n 2 * uv_mapping_scale)\n\n # Backward flow:\n uv_foreground_backward_relevant, xyt_foreground_backward_should_match, relevant_batch_indices_backward = get_corresponding_flow_matches(\n jif_foreground, optical_flows_reverse_mask, optical_flows_reverse, resx, number_of_frames, False, uv_foreground)\n uv_foreground_backward_should_match = model_F_mapping(xyt_foreground_backward_should_match.to(device))\n loss_flow_prev = (uv_foreground_backward_should_match - uv_foreground_backward_relevant).norm(dim=1) * resx / (\n 2 * uv_mapping_scale)\n\n if use_alpha:\n flow_loss = (loss_flow_prev * alpha[relevant_batch_indices_backward].squeeze()).mean() * 0.5 + (\n loss_flow_next * alpha[relevant_batch_indices_forward].squeeze()).mean() * 0.5\n else:\n flow_loss = (loss_flow_prev).mean() * 0.5 + (loss_flow_next).mean() * 0.5\n\n return flow_loss"
},
{
"identifier": "get_tuples",
"path": "swap_face_fine/defliker/src/models/stage_1/unwrap_utils.py",
"snippet": "def get_tuples(number_of_frames, video_frames):\n # video_frames shape: (resy, resx, 3, num_frames), mask_frames shape: (resy, resx, num_frames)\n jif_all = []\n for f in range(number_of_frames):\n mask = (video_frames[:, :, :, f] > -1).any(dim=2)\n relis, reljs = torch.where(mask > 0.5)\n jif_all.append(torch.stack((reljs, relis, f * torch.ones_like(reljs))))\n return torch.cat(jif_all, dim=1)"
},
{
"identifier": "pre_train_mapping",
"path": "swap_face_fine/defliker/src/models/stage_1/unwrap_utils.py",
"snippet": "def pre_train_mapping(model_F_mapping, frames_num, uv_mapping_scale, resx, resy, larger_dim, device,\n pretrain_iters=100):\n optimizer_mapping = optim.Adam(model_F_mapping.parameters(), lr=0.0001)\n print(\"pre-training\")\n for i in tqdm(range(pretrain_iters)):\n for f in range(frames_num):\n i_s_int = torch.randint(resy, (np.int64(10000), 1))\n j_s_int = torch.randint(resx, (np.int64(10000), 1))\n\n i_s = i_s_int / (larger_dim / 2) - 1\n j_s = j_s_int / (larger_dim / 2) - 1\n\n xyt = torch.cat((j_s, i_s, (f / (frames_num / 2.0) - 1) * torch.ones_like(i_s)),\n dim=1).to(device)\n uv_temp = model_F_mapping(xyt)\n\n model_F_mapping.zero_grad()\n\n loss = (xyt[:, :2] * uv_mapping_scale - uv_temp).norm(dim=1).mean()\n # print(f\"pre-train loss: {loss.item()}\")\n loss.backward()\n optimizer_mapping.step()\n return model_F_mapping"
},
{
"identifier": "load_input_data_single",
"path": "swap_face_fine/defliker/src/models/stage_1/unwrap_utils.py",
"snippet": "def load_input_data_single(resy, resx, maximum_number_of_frames, data_folder, use_mask_rcnn_bootstrapping, filter_optical_flow,\n vid_root, vid_name):\n out_flow_dir = vid_root / f'{vid_name}_flow'\n maskrcnn_dir = vid_root / f'{vid_name}_maskrcnn'\n\n input_files = sorted(list(data_folder.glob('*.jpg')) + list(data_folder.glob('*.png')))\n\n number_of_frames=np.minimum(maximum_number_of_frames,len(input_files))\n video_frames = torch.zeros((resy, resx, 3, number_of_frames))\n video_frames_dx = torch.zeros((resy, resx, 3, number_of_frames))\n video_frames_dy = torch.zeros((resy, resx, 3, number_of_frames))\n\n mask_frames = torch.zeros((resy, resx, number_of_frames))\n\n optical_flows = torch.zeros((resy, resx, 2, number_of_frames, 1))\n optical_flows_mask = torch.zeros((resy, resx, number_of_frames, 1))\n optical_flows_reverse = torch.zeros((resy, resx, 2, number_of_frames, 1))\n optical_flows_reverse_mask = torch.zeros((resy, resx, number_of_frames, 1))\n\n\n for i in range(number_of_frames):\n file1 = input_files[i]\n im = np.array(Image.open(str(file1))).astype(np.float64) / 255.\n # xuanchi add\n if len(im.shape) == 2:\n im = np.tile(im[:,:,np.newaxis], [1,1,3])\n video_frames[:, :, :, i] = torch.from_numpy(cv2.resize(im[:, :, :3], (resx, resy)))\n video_frames_dy[:-1, :, :, i] = video_frames[1:, :, :, i] - video_frames[:-1, :, :, i]\n video_frames_dx[:, :-1, :, i] = video_frames[:, 1:, :, i] - video_frames[:, :-1, :, i]\n\n for i in range(number_of_frames - 1):\n file1 = input_files[i]\n j = i + 1\n file2 = input_files[j]\n\n fn1 = file1.name\n fn2 = file2.name\n\n flow12_fn = out_flow_dir / f'{fn1}_{fn2}.npy'\n flow21_fn = out_flow_dir / f'{fn2}_{fn1}.npy'\n flow12 = np.load(flow12_fn)\n flow21 = np.load(flow21_fn)\n\n if flow12.shape[0] != resy or flow12.shape[1] != resx:\n flow12 = resize_flow(flow12, newh=resy, neww=resx)\n flow21 = resize_flow(flow21, newh=resy, neww=resx)\n mask_flow = compute_consistency(flow12, flow21) < 1.0\n mask_flow_reverse = compute_consistency(flow21, flow12) < 1.0\n\n optical_flows[:, :, :, i, 0] = torch.from_numpy(flow12)\n optical_flows_reverse[:, :, :, j, 0] = torch.from_numpy(flow21)\n\n if filter_optical_flow:\n optical_flows_mask[:, :, i, 0] = torch.from_numpy(mask_flow)\n optical_flows_reverse_mask[:, :, j, 0] = torch.from_numpy(mask_flow_reverse)\n else:\n optical_flows_mask[:, :, i, 0] = torch.ones_like(mask_flow)\n optical_flows_reverse_mask[:, :, j, 0] = torch.ones_like(mask_flow_reverse)\n return optical_flows_mask, video_frames, optical_flows_reverse_mask, mask_frames, video_frames_dx, video_frames_dy, optical_flows_reverse, optical_flows"
},
{
"identifier": "save_mask_flow",
"path": "swap_face_fine/defliker/src/models/stage_1/unwrap_utils.py",
"snippet": "def save_mask_flow(optical_flows_mask, video_frames, results_folder):\n for j in range(optical_flows_mask.shape[3]):\n\n filter_flow_0 = imageio.get_writer(\n \"%s/filter_flow_%d.mp4\" % (results_folder, j), fps=10)\n for i in range(video_frames.shape[3]):\n if torch.where(optical_flows_mask[:, :, i, j] == 1)[0].shape[0] == 0:\n continue\n cur_frame = video_frames[:, :, :, i].clone()\n # Put red color where mask=0.\n cur_frame[\n torch.where(optical_flows_mask[:, :, i, j] == 0)[0], torch.where(optical_flows_mask[:, :, i, j] == 0)[\n 1], 0] = 1\n cur_frame[\n torch.where(optical_flows_mask[:, :, i, j] == 0)[0], torch.where(optical_flows_mask[:, :, i, j] == 0)[\n 1], 1] = 0\n cur_frame[\n torch.where(optical_flows_mask[:, :, i, j] == 0)[0], torch.where(optical_flows_mask[:, :, i, j] == 0)[\n 1], 2] = 0\n\n filter_flow_0.append_data((cur_frame.numpy() * 255).astype(np.uint8))\n\n filter_flow_0.close()\n # save the video in the working resolution\n input_video = imageio.get_writer(\n \"%s/input_video.mp4\" % (results_folder), fps=10)\n for i in range(video_frames.shape[3]):\n cur_frame = video_frames[:, :, :, i].clone()\n\n input_video.append_data((cur_frame.numpy() * 255).astype(np.uint8))\n\n input_video.close()"
}
] | import sys
import torch
import torch.optim as optim
import numpy as np
import argparse
import cv2
import glob
import json
import os
import subprocess
from tqdm import tqdm
from swap_face_fine.defliker.src.models.stage_1.implicit_neural_networks import IMLP
from swap_face_fine.defliker.src.models.stage_1.evaluate import evaluate_model_single
from swap_face_fine.defliker.src.models.stage_1.loss_utils import get_gradient_loss_single, get_rigidity_loss, get_optical_flow_loss
from swap_face_fine.defliker.src.models.stage_1.unwrap_utils import get_tuples, pre_train_mapping, load_input_data_single, save_mask_flow
from pathlib import Path
from datetime import datetime
from torch.utils.tensorboard import SummaryWriter | 8,012 |
# set gpu
select_gpu = "0" # default use 0
os.environ["CUDA_VISIBLE_DEVICES"] = select_gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(config, args):
maximum_number_of_frames = config["maximum_number_of_frames"]
# read the first frame of vid path and get its resolution
frames_list = sorted(glob.glob(os.path.join(args.vid_path, "*g")))
frame_temp = cv2.imread(frames_list[0])
resx = frame_temp.shape[1]
resy = frame_temp.shape[0]
if args.down is not None:
resx = int(resx / args.down)
resy = int(resy / args.down)
iters_num = config["iters_num"]
#batch size:
samples = config["samples_batch"]
# evaluation frequency (in terms of iterations number)
evaluate_every = np.int64(config["evaluate_every"])
# optionally it is possible to load a checkpoint
load_checkpoint = config["load_checkpoint"] # set to true to continue from a checkpoint
checkpoint_path = config["checkpoint_path"]
# a data folder that contains folders named "[video_name]","[video_name]_flow","[video_name]_maskrcnn" (optionally)
data_folder = Path(args.vid_path)
# results_folder_name = config["results_folder_name"] # the folder (under the code's folder where the experiments will be saved.
results_folder_name = "results"
# add_to_experiment_folder_name = config["add_to_experiment_folder_name"] # for each experiment folder (saved inside "results_folder_name") add this string
# boolean variables for determining if a pretraining is used:
pretrain_mapping1 = config["pretrain_mapping1"]
pretrain_iter_number = config["pretrain_iter_number"]
# the scale of the atlas uv coordinates relative to frame's xy coordinates
uv_mapping_scale = config["uv_mapping_scale"]
# M_f's hyper parameters
use_positional_encoding_mapping1 = config["use_positional_encoding_mapping1"]
number_of_positional_encoding_mapping1 = config["number_of_positional_encoding_mapping1"]
number_of_layers_mapping1 = config["number_of_layers_mapping1"]
number_of_channels_mapping1 = config["number_of_channels_mapping1"]
# Atlas MLP's hyper parameters
number_of_channels_atlas = config["number_of_channels_atlas"]
number_of_layers_atlas = config["number_of_layers_atlas"]
positional_encoding_num_atlas = config[
"positional_encoding_num_atlas"]
# coefficients for the different loss terms
rgb_coeff = config["rgb_coeff"] # coefficient for rgb loss term:
# optical flow loss term coefficient (beta_f in the paper):
optical_flow_coeff = config["optical_flow_coeff"]
use_gradient_loss = config["use_gradient_loss"]
gradient_loss_coeff = config["gradient_loss_coeff"]
rigidity_coeff = config["rigidity_coeff"] # coefficient for the rigidity loss term
derivative_amount = config["derivative_amount"] # For finite differences gradient computation:
# for using global (in addition to the current local) rigidity loss:
include_global_rigidity_loss = config["include_global_rigidity_loss"]
# Finite differences parameters for the global rigidity terms:
global_rigidity_derivative_amount_fg = config["global_rigidity_derivative_amount_fg"]
global_rigidity_coeff_fg = config["global_rigidity_coeff_fg"]
stop_global_rigidity = config["stop_global_rigidity"]
use_optical_flow = True
vid_name = data_folder.name
vid_root = data_folder.parent
results_folder = Path(
f'./{results_folder_name}/{vid_name}/stage_1')
results_folder.mkdir(parents=True, exist_ok=True)
with open('%s/config.json' % results_folder, 'w') as json_file:
json.dump(config, json_file, indent=4)
writer = SummaryWriter(log_dir=str(results_folder))
optical_flows_mask, video_frames, optical_flows_reverse_mask, mask_frames, video_frames_dx, video_frames_dy, optical_flows_reverse, optical_flows = load_input_data_single(
resy, resx, maximum_number_of_frames, data_folder, True, True, vid_root, vid_name)
number_of_frames=video_frames.shape[3]
# save a video showing the masked part of the forward optical flow:s
|
# set gpu
select_gpu = "0" # default use 0
os.environ["CUDA_VISIBLE_DEVICES"] = select_gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(config, args):
maximum_number_of_frames = config["maximum_number_of_frames"]
# read the first frame of vid path and get its resolution
frames_list = sorted(glob.glob(os.path.join(args.vid_path, "*g")))
frame_temp = cv2.imread(frames_list[0])
resx = frame_temp.shape[1]
resy = frame_temp.shape[0]
if args.down is not None:
resx = int(resx / args.down)
resy = int(resy / args.down)
iters_num = config["iters_num"]
#batch size:
samples = config["samples_batch"]
# evaluation frequency (in terms of iterations number)
evaluate_every = np.int64(config["evaluate_every"])
# optionally it is possible to load a checkpoint
load_checkpoint = config["load_checkpoint"] # set to true to continue from a checkpoint
checkpoint_path = config["checkpoint_path"]
# a data folder that contains folders named "[video_name]","[video_name]_flow","[video_name]_maskrcnn" (optionally)
data_folder = Path(args.vid_path)
# results_folder_name = config["results_folder_name"] # the folder (under the code's folder where the experiments will be saved.
results_folder_name = "results"
# add_to_experiment_folder_name = config["add_to_experiment_folder_name"] # for each experiment folder (saved inside "results_folder_name") add this string
# boolean variables for determining if a pretraining is used:
pretrain_mapping1 = config["pretrain_mapping1"]
pretrain_iter_number = config["pretrain_iter_number"]
# the scale of the atlas uv coordinates relative to frame's xy coordinates
uv_mapping_scale = config["uv_mapping_scale"]
# M_f's hyper parameters
use_positional_encoding_mapping1 = config["use_positional_encoding_mapping1"]
number_of_positional_encoding_mapping1 = config["number_of_positional_encoding_mapping1"]
number_of_layers_mapping1 = config["number_of_layers_mapping1"]
number_of_channels_mapping1 = config["number_of_channels_mapping1"]
# Atlas MLP's hyper parameters
number_of_channels_atlas = config["number_of_channels_atlas"]
number_of_layers_atlas = config["number_of_layers_atlas"]
positional_encoding_num_atlas = config[
"positional_encoding_num_atlas"]
# coefficients for the different loss terms
rgb_coeff = config["rgb_coeff"] # coefficient for rgb loss term:
# optical flow loss term coefficient (beta_f in the paper):
optical_flow_coeff = config["optical_flow_coeff"]
use_gradient_loss = config["use_gradient_loss"]
gradient_loss_coeff = config["gradient_loss_coeff"]
rigidity_coeff = config["rigidity_coeff"] # coefficient for the rigidity loss term
derivative_amount = config["derivative_amount"] # For finite differences gradient computation:
# for using global (in addition to the current local) rigidity loss:
include_global_rigidity_loss = config["include_global_rigidity_loss"]
# Finite differences parameters for the global rigidity terms:
global_rigidity_derivative_amount_fg = config["global_rigidity_derivative_amount_fg"]
global_rigidity_coeff_fg = config["global_rigidity_coeff_fg"]
stop_global_rigidity = config["stop_global_rigidity"]
use_optical_flow = True
vid_name = data_folder.name
vid_root = data_folder.parent
results_folder = Path(
f'./{results_folder_name}/{vid_name}/stage_1')
results_folder.mkdir(parents=True, exist_ok=True)
with open('%s/config.json' % results_folder, 'w') as json_file:
json.dump(config, json_file, indent=4)
writer = SummaryWriter(log_dir=str(results_folder))
optical_flows_mask, video_frames, optical_flows_reverse_mask, mask_frames, video_frames_dx, video_frames_dy, optical_flows_reverse, optical_flows = load_input_data_single(
resy, resx, maximum_number_of_frames, data_folder, True, True, vid_root, vid_name)
number_of_frames=video_frames.shape[3]
# save a video showing the masked part of the forward optical flow:s | save_mask_flow(optical_flows_mask, video_frames, results_folder) | 8 | 2023-10-15 12:15:01+00:00 | 12k |
sotopia-lab/sotopia | sotopia-chat/chat_server.py | [
{
"identifier": "redis_agent",
"path": "sotopia/agents/redis_agent.py",
"snippet": "class RedisAgent(BaseAgent[Observation, AgentAction]):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n session_id: str | None = None,\n agent_profile: AgentProfile | None = None,\n ) -> None:\n def act(\n self,\n obs: Observation,\n ) -> AgentAction:\n async def aact(\n self,\n obs: Observation,\n ) -> AgentAction:\n def reset(\n self,\n reset_reason: str = \"\",\n ) -> None:"
},
{
"identifier": "LLMAgent",
"path": "sotopia/agents/llm_agent.py",
"snippet": "class LLMAgent(BaseAgent[Observation, AgentAction]):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n script_like: bool = False,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n self.model_name = model_name\n self.script_like = script_like\n\n @property\n def goal(self) -> str:\n if self._goal is not None:\n return self._goal\n assert (\n len(self.inbox) > 0\n ), \"attribute goal has to be called after at least one step\"\n goal = generate_goal(\n self.model_name,\n background=self.inbox[0][\n 1\n ].to_natural_language(), # Only consider the first message for now\n )\n return goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(\n self,\n obs: Observation,\n gen_func: Callable[..., AgentAction] = generate_action,\n ) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action = gen_func(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n )\n return action\n\n async def aact(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action, prompt = await agenerate_action(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n script_like=self.script_like,\n )\n return action"
},
{
"identifier": "EnvAgentComboStorage",
"path": "sotopia/database/env_agent_combo_storage.py",
"snippet": "class EnvAgentComboStorage(JsonModel):\n env_id: str = Field(default_factory=lambda: \"\", index=True)\n agent_ids: list[str] = Field(default_factory=lambda: [], index=True)"
},
{
"identifier": "AgentProfile",
"path": "sotopia/database/persistent_profile.py",
"snippet": "class AgentProfile(JsonModel):\n first_name: str = Field(index=True)\n last_name: str = Field(index=True)\n age: int = Field(index=True, default_factory=lambda: 0)\n occupation: str = Field(index=True, default_factory=lambda: \"\")\n gender: str = Field(index=True, default_factory=lambda: \"\")\n gender_pronoun: str = Field(index=True, default_factory=lambda: \"\")\n public_info: str = Field(index=True, default_factory=lambda: \"\")\n big_five: str = Field(index=True, default_factory=lambda: \"\")\n moral_values: list[str] = Field(index=False, default_factory=lambda: [])\n schwartz_personal_values: list[str] = Field(\n index=False, default_factory=lambda: []\n )\n personality_and_values: str = Field(index=True, default_factory=lambda: \"\")\n decision_making_style: str = Field(index=True, default_factory=lambda: \"\")\n secret: str = Field(default_factory=lambda: \"\")\n model_id: str = Field(default_factory=lambda: \"\")"
},
{
"identifier": "EnvironmentList",
"path": "sotopia/database/persistent_profile.py",
"snippet": "class EnvironmentList(JsonModel):\n name: str = Field(index=True)\n environments: list[str] = Field(default_factory=lambda: [])\n agent_index: list[str] | None = Field(default_factory=lambda: None)\n\n # validate the length of agent_index should be same as environments\n @root_validator\n def the_length_agent_index_matches_environments(cls, values: Any) -> Any:\n environments, agent_index = (\n values.get(\"environments\"),\n values.get(\"agent_index\"),\n )\n if agent_index is None:\n return values\n assert len(environments) == len(\n agent_index\n ), f\"Number of environments {len(environments)} and agent_index {len(agent_index)} do not match\"\n return values"
},
{
"identifier": "EnvironmentProfile",
"path": "sotopia/database/persistent_profile.py",
"snippet": "class EnvironmentProfile(JsonModel):\n codename: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The codename of the environment\",\n )\n source: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The source of the environment\",\n )\n scenario: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"A concrete scenario of where the social interaction takes place, the scenario should have two agents (agent1 and agent2), and you should illustrate the relationship between the two agents, and for what purpose agent1 is interacting with agent2. Please avoid mentioning specific names and occupations in the scenario and keep all the mentions gender-neutral. Also avoid generating scenarios that requires childrend (below 18) or elderly (above 70) to be involved.\",\n )\n agent_goals: list[str] = Field(\n default_factory=lambda: [],\n description=\"The social goals of each agent, which could include <extra_info>...</extra_info>, <clarification_hint>...</clarification_hint>, and <strategy_hint>...</strategy_hint> to help the agent achieve the goal. Avoid providing too specific strategy hint, try to be as abstract as possible. For example, use 'you can provide financial benefits to achieve your goal' instead of 'you can buy him a boba tea to achieve your goal.'\",\n )\n relationship: RelationshipType = Field(\n index=True,\n default_factory=lambda: RelationshipType.stranger,\n description=\"The relationship between the two agents, choose from: stranger, know_by_name, acquaintance, friend, romantic_relationship, family_member. Do not make up a relationship, but choose from the list, 0 means stranger, 1 means know_by_name, 2 means acquaintance, 3 means friend, 4 means romantic_relationship, 5 means family_member\",\n )\n age_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The age constraint of the environment, a list of tuples, each tuple is a range of age, e.g., '[(18, 25), (30, 40)]' means the environment is only available to agent one between 18 and 25, and agent two between 30 and 40\",\n )\n occupation_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The occupation constraint of the environment, a list of lists, each list is a list of occupations, e.g., '[['student', 'teacher'], ['doctor', 'nurse']]' means the environment is only available to agent one if agent one is a student or a teacher, and agent two is a doctor or a nurse\",\n )\n agent_constraint: list[list[str]] | None = Field(\n default_factory=lambda: None,\n )"
},
{
"identifier": "ReachGoalLLMEvaluator",
"path": "sotopia/envs/evaluators.py",
"snippet": "class ReachGoalLLMEvaluator(Evaluator):\n @beartype\n def __init__(\n self, model_name: LLM_Name, response_format: str = \"basic\"\n ) -> None:\n self.model_name = model_name\n self.prompt = \"\"\n self.response_format = response_format\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n raise NotImplementedError(\n \"ReachGoalLLMEvaluator is not implemented for synchronous evaluation\"\n )\n\n @gin.configurable\n @beartype\n async def __acall__(\n self,\n turn_number: int,\n messages: list[tuple[str, Message]] | None,\n history: str = \"\",\n temperature: float = 0.7,\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # filter did nothing\n if not history and messages:\n messages_filtered = [\n (x, y)\n for x, y in messages\n if \"did nothing\" not in y.to_natural_language()\n ]\n history = \"\\n\".join(\n [\n f\"{x} {y.to_natural_language()}\"\n if x != \"Environment\"\n else y.to_natural_language()\n for x, y in messages_filtered\n ]\n )\n response_format_class = (\n EnvResponsePlus if self.response_format == \"plus\" else EnvResponse\n )\n\n try:\n response: EnvResponsePlus | EnvResponse # fix type error from langchain 0.0.264. we don't need this line for langchain 0.0.263\n response, prompt = await agenerate(\n model_name=self.model_name,\n template=\"\"\"{history},\n Based on previous interactions, evaluate how well participants achieve their goals.\n Please following the format:\n {format_instructions}\n \"\"\",\n input_values=dict(history=history),\n output_parser=PydanticOutputParser[\n EnvResponsePlus | EnvResponse\n ](pydantic_object=response_format_class),\n temperature=temperature,\n )\n self.prompt = prompt\n response_list = []\n # TODO: multiple agents\n for dimension in response.agent_1_evaluation.dict().keys():\n response_list.append(\n (\n \"agent_1\",\n (\n (\n dimension,\n response.agent_1_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_1_evaluation.dict()[dimension][0],\n ),\n )\n )\n response_list.append(\n (\n \"agent_2\",\n (\n (\n dimension,\n response.agent_2_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_2_evaluation.dict()[dimension][0],\n ),\n )\n )\n return response_list\n except Exception as e:\n log.debug(f\"[red] Failed to generate environment response. {e}\")\n return []"
},
{
"identifier": "RuleBasedTerminatedEvaluator",
"path": "sotopia/envs/evaluators.py",
"snippet": "class RuleBasedTerminatedEvaluator(Evaluator):\n def __init__(\n self, max_turn_number: int = 20, max_stale_turn: int = 2\n ) -> None:\n self.max_turn_number = max_turn_number\n self.max_stale_turn = max_stale_turn\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # Rule 1: If the conversation is too long, terminate the conversation\n conversation_too_long = turn_number > self.max_turn_number\n # Rule 2: If one of the players leaves, terminate the conversation\n p1_leaving = (\n len(messages) > 1\n and isinstance(messages[-2][1], AgentAction)\n and messages[-2][1].action_type == \"leave\"\n )\n p2_leaving = (\n bool(len(messages))\n and isinstance(messages[-1][1], AgentAction)\n and messages[-1][1].action_type == \"leave\"\n )\n # Rule 3: If the conversation is stale for too long, terminate the conversation\n stale_count = 0\n for message in messages[::-1]:\n if message[0] == \"Environment\":\n continue\n assert isinstance(message[1], AgentAction)\n if message[1].action_type == \"none\":\n stale_count += 1\n else:\n break\n if stale_count > self.max_stale_turn:\n break\n stale_too_long = stale_count > self.max_stale_turn\n terminated = (\n conversation_too_long or p1_leaving or p2_leaving or stale_too_long\n )\n reasons_for_termination = (\n f\"{'The conversation is too long; ' if conversation_too_long else ''}\"\n f\"{'Agent 1 is leaving; ' if p1_leaving else ''}\"\n f\"{'Agent 2 is leaving; ' if p2_leaving else ''}\"\n f\"{'The conversation stales for too long; ' if stale_too_long else ''}\"\n )\n return [\n (\n \"environment\",\n ((\"terminated\", terminated), reasons_for_termination),\n )\n ]\n\n async def __acall__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n return self(turn_number, messages)"
},
{
"identifier": "ParallelSotopiaEnv",
"path": "sotopia/envs/parallel.py",
"snippet": "class ParallelSotopiaEnv(\n ParallelEnv[str, Observation, AgentAction], MessengerMixin\n):\n def __init__(\n self,\n available_action_types: set[ActionType] = set(\n [\"none\", \"speak\", \"non-verbal communication\", \"action\", \"leave\"]\n ),\n action_order: Literal[\n \"simutaneous\", \"round-robin\", \"random\"\n ] = \"simutaneous\",\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n evaluators: list[Evaluator] = [],\n terminal_evaluators: list[Evaluator] = [],\n uuid_str: str | None = None,\n env_profile: EnvironmentProfile | None = None,\n ) -> None:\n \"\"\"A sotopia environment for parallel agents.\n\n Args:\n available_action_types (set[ActionType], optional): The action types that are available to the agents. Defaults to set([\"none\", \"speak\", \"non-verbal communication\", \"action\"]).\n action_order (Literal[\"simutaneous\", \"round-robin\", \"random\"], optional): The order in which the agents take actions. Defaults to \"simutaneous\".\n model_name (LLM_Name, optional): The name of the language model to use. Defaults to \"gpt-3.5-turbo\".\n \"\"\"\n super().__init__()\n self.model_name = model_name\n self.background = ScriptBackground(\n scenario=\"\",\n p1_background=\"\",\n p2_background=\"\",\n p1_goal=\"\",\n p2_goal=\"\",\n p1_name=\"\",\n p2_name=\"\",\n )\n\n self.agents = []\n self.action_spaces = {}\n self.available_action_types = list(available_action_types)\n self.action_order = action_order\n self.action_mask: list[bool] = []\n self.evaluators = evaluators\n self.terminal_evaluators = terminal_evaluators\n\n # if an environment profile is provided, use it\n assert (\n env_profile or uuid_str\n ), \"Either env_profile or uuid_str must be provided\"\n if env_profile is not None:\n self.profile = env_profile\n # if a uuid is provided, try to load the environment profile from the database\n elif uuid_str is not None:\n # try retrieving profile from database\n try:\n self.profile = EnvironmentProfile.get(pk=uuid_str)\n except NotFoundError:\n raise ValueError(\n f\"Agent with uuid {uuid_str} not found in database\"\n )\n\n @configurable\n def reset(\n self,\n seed: int | None = None,\n options: dict[str, str] | None = None,\n agents: Agents | None = None,\n omniscient: bool = False,\n lite: bool = False,\n ) -> dict[str, Observation]:\n \"\"\"Starting a new episode. Must be called before step().\n\n Args:\n seed (int, optional): Seed for the environment. Defaults to None. Not used right now.\n options (dict, optional): Options for the environment. Defaults to None.\n \"partial_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound can be incompleted (\"unknown\" for missing parts), and the missing parts will be filled in by the environment.\n \"full_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound must be completed (no \"unknown\" for missing parts).\n omniscient (bool, optional): Whether the agents know the other agent's goal. Defaults to False.\n \"\"\"\n super().__init__()\n MessengerMixin.reset_inbox(self)\n assert (\n not options\n or not (\"partial_background_file\" in options)\n and not (\"full_background_file\" in options)\n ), \"partial_background_file and full_background_file are not supported anymore\"\n if agents is not None:\n assert agents, \"agents must be provided\"\n assert len(agents) == 2, \"Only supporting two agents right now\"\n agent_names = list(agents.keys())\n agent_goals = self.profile.agent_goals\n assert (\n len(agent_goals) == 2\n ), \"Only supporting two agents right now\"\n\n raw_background = ScriptBackground(\n scenario=self.profile.scenario,\n p1_background=get_bio(\n self.profile.relationship,\n agents[agent_names[0]].profile,\n agent_id=0,\n ),\n p2_background=get_bio(\n self.profile.relationship,\n agents[agent_names[1]].profile,\n agent_id=1,\n ),\n p1_goal=f\"<root viewer='agent_0'>{agent_goals[0]}</root>\",\n p2_goal=f\"<root viewer='agent_1'>{agent_goals[1]}</root>\",\n p1_name=agent_names[0],\n p2_name=agent_names[1],\n )\n\n if lite:\n raw_background.p1_background = \"\"\n raw_background.p2_background = \"\"\n\n self.background = ScriptBackground(\n scenario=render_text_for_environment(raw_background.scenario),\n p1_background=render_text_for_environment(\n raw_background.p1_background\n ),\n p2_background=render_text_for_environment(\n raw_background.p2_background\n ),\n p1_goal=render_text_for_environment(raw_background.p1_goal),\n p2_goal=render_text_for_environment(raw_background.p2_goal),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n else:\n raise ValueError(\"agents must be provided\")\n\n self.agents = [self.background.p1_name, self.background.p2_name]\n agent_backgrounds: list[ScriptBackground] = []\n if omniscient:\n for i in range(self.num_agents):\n agent_backgrounds.append(copy.deepcopy(self.background))\n else:\n for i in range(self.num_agents):\n agent_backgrounds.append(\n ScriptBackground(\n scenario=render_text_for_agent(\n raw_background.scenario, i\n ),\n p1_background=render_text_for_agent(\n raw_background.p1_background, i\n ),\n p2_background=render_text_for_agent(\n raw_background.p2_background, i\n ),\n p1_goal=render_text_for_agent(\n raw_background.p1_goal, i\n ),\n p2_goal=render_text_for_agent(\n raw_background.p2_goal, i\n ),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n )\n background_for_a = agent_backgrounds[0]\n background_for_b = agent_backgrounds[1]\n\n print(\"Is the agent omniscient?\", omniscient)\n if not omniscient:\n background_for_a.p2_goal = \"Unknown\"\n background_for_b.p1_goal = \"Unknown\"\n\n self.action_spaces = {\n agent: Dict(\n dict(\n action_type=Discrete(len(self.available_action_types)),\n argument=Text(256),\n )\n )\n for agent in self.agents\n }\n self.turn_number = 0\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[0] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n\n self.recv_message(\"Environment\", self.background)\n\n return {\n self.background.p1_name: Observation(\n last_turn=background_for_a.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=background_for_b.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n }\n\n @beartype\n def step(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *(\n evaluator(\n turn_number=self.turn_number, messages=self.inbox\n )\n for evaluator in self.evaluators\n )\n )\n )\n )\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n },\n )\n\n @beartype\n async def astep(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.evaluators\n ]\n )\n )\n )\n )\n\n if response.terminated:\n terminal_response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.terminal_evaluators\n ]\n )\n )\n )\n )\n # incorporate terminal response into response\n response.p1_rate = response.p1_rate or terminal_response.p1_rate\n response.p2_rate = response.p2_rate or terminal_response.p2_rate\n if response.comments and terminal_response.comments:\n response.comments += terminal_response.comments\n elif terminal_response.comments:\n response.comments = terminal_response.comments\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n info = {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n }\n if response.terminated:\n info[\"rewards_prompt\"] = {\"overall_prompt\": self.terminal_evaluators[0].prompt} # type: ignore\n\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n info,\n )\n\n def render(self, mode: str = \"human\") -> None:\n pass\n\n def close(self) -> None:\n pass"
},
{
"identifier": "arun_one_episode",
"path": "sotopia/server.py",
"snippet": "@gin.configurable\nasync def arun_one_episode(\n env: ParallelSotopiaEnv,\n agent_list: Sequence[BaseAgent[Observation, AgentAction]],\n model_dict: dict[str, LLM_Name],\n omniscient: bool = False,\n script_like: bool = False,\n json_in_script: bool = False,\n tag: str | None = None,\n push_to_db: bool = False,\n) -> list[tuple[str, str, Message]]:\n agents = Agents({agent.agent_name: agent for agent in agent_list})\n environment_messages = env.reset(agents=agents, omniscient=omniscient)\n agents_model_names = [model_dict[\"agent1\"], model_dict[\"agent2\"]]\n for agent_name, agent_model in zip(env.agents, agents_model_names):\n if agent_model == \"human\":\n agents[agent_name] = HumanAgent(agent_name)\n elif agent_model == \"redis\":\n agents[agent_name] = RedisAgent(agent_name)\n elif script_like and not json_in_script:\n agents[agent_name] = ScriptWritingAgent(\n agent_name,\n model_name=agent_model,\n background=env.background,\n agent_names=env.agents,\n )\n else:\n agents[agent_name] = LLMAgent(\n agent_name, model_name=agent_model, script_like=script_like\n )\n agents.reset()\n\n messages: list[list[tuple[str, str, Message]]] = []\n\n # Main Event Loop\n done = False\n messages.append(\n [\n (\"Environment\", agent_name, environment_messages[agent_name])\n for agent_name in env.agents\n ]\n )\n # set goal for agents\n for index, agent_name in enumerate(env.agents):\n agents[agent_name].goal = env.profile.agent_goals[index]\n rewards: list[list[float]] = []\n reasons: list[str] = []\n while not done:\n # gather agent messages\n agent_messages: dict[str, AgentAction] = dict()\n actions = await asyncio.gather(\n *[\n agents[agent_name].aact(environment_messages[agent_name])\n for agent_name in env.agents\n ]\n )\n if script_like:\n # manually mask one message\n agent_mask = env.action_mask\n for idx in range(len(agent_mask)):\n print(\"Current mask: \", agent_mask)\n if agent_mask[idx] == 0:\n print(\"Action not taken: \", actions[idx])\n actions[idx] = AgentAction(action_type=\"none\", argument=\"\")\n else:\n print(\"Current action taken: \", actions[idx])\n\n # actions = cast(list[AgentAction], actions)\n for idx, agent_name in enumerate(env.agents):\n agent_messages[agent_name] = actions[idx]\n\n messages[-1].append(\n (agent_name, \"Environment\", agent_messages[agent_name])\n )\n\n # send agent messages to environment\n (\n environment_messages,\n rewards_in_turn,\n terminated,\n ___,\n info,\n ) = await env.astep(agent_messages)\n messages.append(\n [\n (\"Environment\", agent_name, environment_messages[agent_name])\n for agent_name in env.agents\n ]\n )\n # print(\"Environment message: \", environment_messages)\n # exit(0)\n rewards.append(\n [rewards_in_turn[agent_name] for agent_name in env.agents]\n )\n reasons.append(\n \" \".join(info[agent_name][\"comments\"] for agent_name in env.agents)\n )\n done = all(terminated.values())\n\n # TODO: clean up this part\n epilog = EpisodeLog(\n environment=env.profile.pk,\n agents=[agent.profile.pk for agent in agent_list],\n tag=tag,\n models=[model_dict[\"env\"], model_dict[\"agent1\"], model_dict[\"agent2\"]],\n messages=[\n [\n (m[0], m[1], m[2].to_natural_language())\n for m in messages_in_turn\n ]\n for messages_in_turn in messages\n ],\n reasoning=info[env.agents[0]][\"comments\"],\n rewards=[\n info[agent_name][\"complete_rating\"] for agent_name in env.agents\n ],\n rewards_prompt=info[\"rewards_prompt\"][\"overall_prompt\"],\n )\n rich.print(epilog.rewards_prompt)\n agent_profiles, conversation = epilog.render_for_humans()\n for agent_profile in agent_profiles:\n rich.print(agent_profile)\n for message in conversation:\n rich.print(message)\n\n if push_to_db:\n try:\n epilog.save()\n except Exception as e:\n logging.error(f\"Failed to save episode log: {e}\")\n # flatten nested list messages\n return list(itertools.chain(*messages))"
}
] | import asyncio
import logging
import os
import random
import subprocess
import redis.asyncio as redis
import typer
from asyncio import gather
from asyncio import run as aiorun
from datetime import datetime
from logging import FileHandler
from typing import Literal, cast
from rich.logging import RichHandler
from sotopia.agents import redis_agent
from sotopia.agents.llm_agent import LLMAgent
from sotopia.database import EnvAgentComboStorage
from sotopia.database.persistent_profile import (
AgentProfile,
EnvironmentList,
EnvironmentProfile,
)
from sotopia.envs.evaluators import (
ReachGoalLLMEvaluator,
RuleBasedTerminatedEvaluator,
)
from sotopia.envs.parallel import ParallelSotopiaEnv
from sotopia.server import arun_one_episode | 9,170 |
process = subprocess.Popen(
["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE
)
git_head_hash = process.communicate()[0].strip()
FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
logging.basicConfig(
level=15,
format=FORMAT,
datefmt="[%X]",
handlers=[
RichHandler(),
FileHandler(
datetime.now().strftime(
f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log"
)
),
],
)
app = typer.Typer()
async def _start_server_with_two_session_ids_and_agent_env_combo(
session_ids: list[str], agent_env_combo_pk: str
) -> None:
env_agent_combo_storage = EnvAgentComboStorage.get(agent_env_combo_pk)
env = ParallelSotopiaEnv(
env_profile=EnvironmentProfile.get(env_agent_combo_storage.env_id),
model_name="gpt-4",
action_order="round-robin",
evaluators=[
RuleBasedTerminatedEvaluator(max_turn_number=20, max_stale_turn=2),
],
terminal_evaluators=[
ReachGoalLLMEvaluator("gpt-4"),
],
)
random.shuffle(session_ids)
agents = [
redis_agent.RedisAgent(
agent_profile=AgentProfile.get(
env_agent_combo_storage.agent_ids[idx]
),
session_id=session_id,
)
for idx, session_id in enumerate(session_ids)
]
await arun_one_episode(
env,
agents,
{"env": "gpt-4", "agent1": "redis", "agent2": "redis"},
tag="human_human_v0.0.3_dryrun",
push_to_db=True,
)
async def _start_server_with_one_session_id_and_agent_env_combo(
session_id: str,
agent_env_combo_pk: str,
left_or_right: Literal["left", "right"],
) -> None:
env_agent_combo_storage = EnvAgentComboStorage.get(agent_env_combo_pk)
env = ParallelSotopiaEnv(
env_profile=EnvironmentProfile.get(env_agent_combo_storage.env_id),
model_name="gpt-4",
action_order="round-robin",
evaluators=[
RuleBasedTerminatedEvaluator(max_turn_number=20, max_stale_turn=2),
],
terminal_evaluators=[
ReachGoalLLMEvaluator("gpt-4"),
],
)
agents = (
[
redis_agent.RedisAgent(
agent_profile=AgentProfile.get(
env_agent_combo_storage.agent_ids[0]
),
session_id=session_id,
),
|
process = subprocess.Popen(
["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE
)
git_head_hash = process.communicate()[0].strip()
FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
logging.basicConfig(
level=15,
format=FORMAT,
datefmt="[%X]",
handlers=[
RichHandler(),
FileHandler(
datetime.now().strftime(
f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log"
)
),
],
)
app = typer.Typer()
async def _start_server_with_two_session_ids_and_agent_env_combo(
session_ids: list[str], agent_env_combo_pk: str
) -> None:
env_agent_combo_storage = EnvAgentComboStorage.get(agent_env_combo_pk)
env = ParallelSotopiaEnv(
env_profile=EnvironmentProfile.get(env_agent_combo_storage.env_id),
model_name="gpt-4",
action_order="round-robin",
evaluators=[
RuleBasedTerminatedEvaluator(max_turn_number=20, max_stale_turn=2),
],
terminal_evaluators=[
ReachGoalLLMEvaluator("gpt-4"),
],
)
random.shuffle(session_ids)
agents = [
redis_agent.RedisAgent(
agent_profile=AgentProfile.get(
env_agent_combo_storage.agent_ids[idx]
),
session_id=session_id,
)
for idx, session_id in enumerate(session_ids)
]
await arun_one_episode(
env,
agents,
{"env": "gpt-4", "agent1": "redis", "agent2": "redis"},
tag="human_human_v0.0.3_dryrun",
push_to_db=True,
)
async def _start_server_with_one_session_id_and_agent_env_combo(
session_id: str,
agent_env_combo_pk: str,
left_or_right: Literal["left", "right"],
) -> None:
env_agent_combo_storage = EnvAgentComboStorage.get(agent_env_combo_pk)
env = ParallelSotopiaEnv(
env_profile=EnvironmentProfile.get(env_agent_combo_storage.env_id),
model_name="gpt-4",
action_order="round-robin",
evaluators=[
RuleBasedTerminatedEvaluator(max_turn_number=20, max_stale_turn=2),
],
terminal_evaluators=[
ReachGoalLLMEvaluator("gpt-4"),
],
)
agents = (
[
redis_agent.RedisAgent(
agent_profile=AgentProfile.get(
env_agent_combo_storage.agent_ids[0]
),
session_id=session_id,
), | LLMAgent( | 1 | 2023-10-23 19:47:26+00:00 | 12k |
Qualcomm-AI-research/geometric-algebra-transformer | gatr/experiments/nbody/wrappers.py | [
{
"identifier": "GCAGNN",
"path": "gatr/baselines/gcan.py",
"snippet": "class GCAGNN(nn.Module):\n \"\"\"GCA-GNN model as described in D. Ruhe et al.\n\n The model was described in \"Geometric Clifford Algebra Networks\" by D.Ruhe et al.,\n and in private communication from D. Ruhe.\n\n Combines multiple GCAGNNLayers.\n\n This network uses the same projective geometric algebra representations as GATr, but is not\n E(3)-equivariant.\n\n References\n ----------\n D. Ruhe et al, \"Geometric Clifford Algebra Networks\", arXiv:2302.06594\n\n Parameters\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n node_channels : int\n Number of channels in the hidden representation for each node.\n message_channels : int\n Number of channels in the messages.\n mlp_hidden_channels : int\n Number of hidden channels in the MLPs.\n mlp_hidden_layers : int\n Number of hidden layers in the MLPs.\n message_passing_steps : int\n Number of message-passing steps / GCAGNNLayer blocks.\n \"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n node_channels,\n message_channels,\n mlp_hidden_channels,\n mlp_hidden_layers,\n message_passing_steps,\n **kwargs,\n ):\n super().__init__()\n\n # Construct layers\n self.layers = nn.ModuleList([])\n shared_kwargs = dict(\n mlp_hidden_channels=mlp_hidden_channels,\n mlp_hidden_layers=mlp_hidden_layers,\n message_channels=message_channels,\n )\n\n # Initial step: in_channels to node_channels\n self.layers.append(GCAGNNLayer(in_channels, node_channels, **shared_kwargs))\n\n # Intermediate steps / layers\n for _ in range(message_passing_steps - 2):\n self.layers.append(GCAGNNLayer(node_channels, node_channels, **shared_kwargs))\n\n # Final step: node_channels to out_channels\n self.layers.append(GCAGNNLayer(node_channels, out_channels, **shared_kwargs))\n\n def forward(self, x, edge_index):\n \"\"\"Forward pass.\"\"\"\n for layer in self.layers:\n x = layer(x, edge_index=edge_index)\n return x"
},
{
"identifier": "BaselineAxialTransformer",
"path": "gatr/baselines/transformer.py",
"snippet": "class BaselineAxialTransformer(nn.Module):\n \"\"\"Baseline axial transformer for data with two token dimensions.\n\n Combines num_blocks transformer blocks, each consisting of multi-head self-attention layers, an\n MLP, residual connections, and normalization layers.\n\n Assumes input data with shape `(..., num_items_1, num_items_2, num_channels, [16])`.\n\n The first, third, fifth, ... block computes attention over the `items_2` axis. The other blocks\n compute attention over the `items_1` axis. Positional encoding can be specified separately for\n both axes.\n\n Parameters\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n hidden_channels : int\n Number of hidden channels.\n num_blocks : int\n Number of transformer blocks.\n num_heads : int\n Number of attention heads.\n pos_encodings : tuple of bool\n Whether to apply rotary positional embeddings along the item dimensions to the scalar keys\n and queries.\n pos_encoding_base : int\n Maximum frequency used in positional encodings. (The minimum frequency is always 1.)\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n hidden_channels: int,\n num_blocks: int = 20,\n num_heads: int = 8,\n pos_encodings: Tuple[bool, bool] = (False, False),\n pos_encoding_base: int = 4096,\n ) -> None:\n super().__init__()\n self.linear_in = nn.Linear(in_channels, hidden_channels)\n self.blocks = nn.ModuleList(\n [\n BaselineTransformerBlock(\n hidden_channels,\n num_heads=num_heads,\n pos_encoding=pos_encodings[(block + 1) % 2],\n pos_encoding_base=pos_encoding_base,\n )\n for block in range(num_blocks)\n ]\n )\n self.linear_out = nn.Linear(hidden_channels, out_channels)\n\n def forward(self, inputs: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass.\n\n Parameters\n ----------\n inputs : Tensor with shape (..., num_items1, num_items2, num_channels)\n Input data\n\n Returns\n -------\n outputs : Tensor with shape (..., num_items1, num_items2, num_channels)\n Outputs\n \"\"\"\n\n rearrange_pattern = \"... i j c -> ... j i c\"\n\n h = self.linear_in(inputs)\n\n for i, block in enumerate(self.blocks):\n # For first, third, ... block, we want to perform attention over the first token\n # dimension. We implement this by transposing the two item dimensions.\n if i % 2 == 1:\n h = rearrange(h, rearrange_pattern)\n\n h = block(h)\n\n # Transposing back to standard axis order\n if i % 2 == 1:\n h = rearrange(h, rearrange_pattern)\n\n outputs = self.linear_out(h)\n\n return outputs"
},
{
"identifier": "BaselineTransformer",
"path": "gatr/baselines/transformer.py",
"snippet": "class BaselineTransformer(nn.Module):\n \"\"\"Baseline transformer.\n\n Combines num_blocks transformer blocks, each consisting of multi-head self-attention layers, an\n MLP, residual connections, and normalization layers.\n\n Parameters\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n hidden_channels : int\n Number of hidden channels.\n num_blocks : int\n Number of transformer blocks.\n num_heads : int\n Number of attention heads.\n pos_encoding : bool\n Whether to apply rotary positional embeddings along the item dimension to the scalar keys\n and queries.\n pos_encoding_base : int\n Maximum frequency used in positional encodings. (The minimum frequency is always 1.)\n increase_hidden_channels : int\n Factor by which the key, query, and value size is increased over the default value of\n hidden_channels / num_heads.\n multi_query : bool\n Use multi-query attention instead of multi-head attention.\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n hidden_channels: int,\n num_blocks: int = 10,\n num_heads: int = 8,\n pos_encoding: bool = False,\n pos_encoding_base: int = 4096,\n checkpoint_blocks: bool = False,\n increase_hidden_channels=1,\n multi_query: bool = False,\n ) -> None:\n super().__init__()\n self.checkpoint_blocks = checkpoint_blocks\n self.linear_in = nn.Linear(in_channels, hidden_channels)\n self.blocks = nn.ModuleList(\n [\n BaselineTransformerBlock(\n hidden_channels,\n num_heads=num_heads,\n pos_encoding=pos_encoding,\n pos_encoding_base=pos_encoding_base,\n increase_hidden_channels=increase_hidden_channels,\n multi_query=multi_query,\n )\n for _ in range(num_blocks)\n ]\n )\n self.linear_out = nn.Linear(hidden_channels, out_channels)\n\n def forward(self, inputs: torch.Tensor, attention_mask=None) -> torch.Tensor:\n \"\"\"Forward pass.\n\n Parameters\n ----------\n inputs : Tensor with shape (..., num_items, num_channels)\n Input data\n attention_mask : None or Tensor or xformers.ops.AttentionBias\n Optional attention mask\n\n Returns\n -------\n outputs : Tensor with shape (..., num_items, num_channels)\n Outputs\n \"\"\"\n h = self.linear_in(inputs)\n for block in self.blocks:\n if self.checkpoint_blocks:\n fn = partial(block, attention_mask=attention_mask)\n h = checkpoint(fn, h)\n else:\n h = block(h, attention_mask=attention_mask)\n outputs = self.linear_out(h)\n return outputs"
},
{
"identifier": "BaseWrapper",
"path": "gatr/experiments/base_wrapper.py",
"snippet": "class BaseWrapper(nn.Module):\n \"\"\"Base GATr wrapper.\n\n To be subclassed by experiment-specific wrapper classes.\n\n Parameters\n ----------\n net : torch.nn.Module\n GATr network.\n scalars : bool\n Whether the GATr model uses auxiliary scalars in its inputs and outputs. (In hidden\n representations, GATr uses auxiliary scalars always.)\n return_other : bool\n Whether the wrapper should return regularization terms in addition to the model predictions.\n \"\"\"\n\n def __init__(self, net, scalars=True, return_other=True):\n super().__init__()\n self.net = net\n self.scalars = scalars\n self.return_other = return_other\n\n def build_attention_mask(\n self, inputs, mv=None, s=None\n ): # pylint: disable=unused-argument,redundant-returns-doc\n \"\"\"Construct attention mask.\n\n Parameters\n ----------\n inputs : torch.Tensor\n Raw inputs to wrapped network.\n mv : torch.Tensor\n Multivector embedding of inputs.\n s : torch.Tensor\n Auxiliary scalar embedding of inputs.\n\n Returns\n -------\n attention_mask : None or torch.Tensor or xformers.ops.fmha.BlockDiagonalMask\n Attention mask.\n \"\"\"\n return None\n\n def forward(self, inputs: torch.Tensor):\n \"\"\"Wrapped forward pass pass.\n\n Parses inputs into GA + scalar representation, calls the forward pass of the wrapped net,\n and extracts the outputs from the GA + scalar representation again.\n\n Parameters\n ----------\n inputs : torch.Tensor\n Raw inputs, as given by dataset.\n\n Returns\n -------\n outputs : torch.Tensor\n Raw outputs, as expected in dataset.\n other : torch.Tensor\n Additional output data, e.g. required for regularization. Only returned if\n `self.return_other`.\n \"\"\"\n\n multivector, scalars = self.embed_into_ga(inputs)\n mask = self.build_attention_mask( # pylint: disable=assignment-from-none\n inputs, mv=multivector, s=scalars\n )\n multivector_outputs, scalar_outputs = self.net(\n multivector, scalars=scalars, attention_mask=mask\n )\n outputs, other = self.extract_from_ga(multivector_outputs, scalar_outputs)\n outputs = self.postprocess_results(inputs, outputs)\n\n if self.return_other:\n return outputs, other\n\n return outputs\n\n def embed_into_ga(self, inputs):\n \"\"\"Embeds raw inputs into the geometric algebra (+ scalar) representation.\n\n To be implemented by subclasses.\n\n Parameters\n ----------\n inputs : torch.Tensor\n Raw inputs, as given by dataset.\n\n Returns\n -------\n mv_inputs : torch.Tensor\n Multivector inputs, as expected by geometric network.\n scalar_inputs : torch.Tensor or None\n Scalar inputs, as expected by geometric network.\n \"\"\"\n raise NotImplementedError\n\n def extract_from_ga(self, multivector, scalars):\n \"\"\"Extracts raw outputs from the GATr multivector + scalar outputs.\n\n To be implemented by subclasses.\n\n Parameters\n ----------\n multivector : torch.Tensor\n Multivector outputs from GATr.\n scalars : torch.Tensor or None\n Scalar outputs from GATr.\n\n Returns\n -------\n outputs : torch.Tensor\n Raw outputs, as expected in dataset.\n other : torch.Tensor\n Additional output data, e.g. required for regularization.\n \"\"\"\n raise NotImplementedError\n\n def postprocess_results(self, inputs, outputs): # pylint: disable=unused-argument\n \"\"\"Postprocesses the outputs extracted from the GA representation.\n\n To be implemented by subclasses, optionally (by default, no postprocessing is applied).\n\n Parameters\n ----------\n inputs\n Raw inputs, pre embedding.\n outputs : torch.Tensor\n Raw outputs, pre postprocessing.\n\n Returns\n -------\n processed_outputs : torch.Tensor\n Raw outputs, after postprocessing.\n \"\"\"\n return outputs"
},
{
"identifier": "embed_point",
"path": "gatr/interface/point.py",
"snippet": "def embed_point(coordinates: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds 3D points in multivectors.\n\n We follow the convention used in the reference below and map points to tri-vectors.\n\n References\n ----------\n Leo Dorst, \"A Guided Tour to the Plane-Based Geometric Algebra PGA\",\n https://geometricalgebra.org/downloads/PGA4CS.pdf\n\n Parameters\n ----------\n coordinates : torch.Tensor with shape (..., 3)\n 3D coordinates\n\n Returns\n -------\n multivector : torch.Tensor with shape (..., 16)\n Embedding into multivector.\n \"\"\"\n\n # Create multivector tensor with same batch shape, same device, same dtype as input\n batch_shape = coordinates.shape[:-1]\n multivector = torch.zeros(*batch_shape, 16, dtype=coordinates.dtype, device=coordinates.device)\n\n # Embedding into trivectors\n # Homogeneous coordinates: unphysical component / embedding dim, x_123\n multivector[..., 14] = 1.0\n multivector[..., 13] = -coordinates[..., 0] # x-coordinate embedded in x_023\n multivector[..., 12] = coordinates[..., 1] # y-coordinate embedded in x_013\n multivector[..., 11] = -coordinates[..., 2] # z-coordinate embedded in x_012\n\n return multivector"
},
{
"identifier": "extract_point",
"path": "gatr/interface/point.py",
"snippet": "def extract_point(\n multivector: torch.Tensor, divide_by_embedding_dim: bool = True, threshold: float = 1e-3\n) -> torch.Tensor:\n \"\"\"Given a multivector, extract any potential 3D point from the trivector components.\n\n References\n ----------\n Leo Dorst, \"A Guided Tour to the Plane-Based Geometric Algebra PGA\",\n https://geometricalgebra.org/downloads/PGA4CS.pdf\n\n Parameters\n ----------\n multivector : torch.Tensor with shape (..., 16)\n Multivector.\n divide_by_embedding_dim : bool\n Whether to divice by the embedding dim. Proper PGA etiquette would have us do this, but it\n may not be good for NN training.\n threshold : float\n Minimum value of the additional, unphysical component. Necessary to avoid exploding values\n or NaNs when this unphysical component of the homogeneous coordinates becomes small.\n\n Returns\n -------\n coordinates : torch.Tensor with shape (..., 3)\n 3D coordinates corresponding to the trivector components of the multivector.\n \"\"\"\n\n coordinates = torch.cat(\n [-multivector[..., [13]], multivector[..., [12]], -multivector[..., [11]]], dim=-1\n )\n\n # Divide by embedding dim\n if divide_by_embedding_dim:\n embedding_dim = multivector[\n ..., [14]\n ] # Embedding dimension / scale of homogeneous coordinates\n embedding_dim = torch.where(torch.abs(embedding_dim) > threshold, embedding_dim, threshold)\n coordinates = coordinates / embedding_dim\n\n return coordinates"
},
{
"identifier": "extract_point_embedding_reg",
"path": "gatr/interface/point.py",
"snippet": "def extract_point_embedding_reg(multivector: torch.Tensor) -> torch.Tensor:\n \"\"\"Given a multivector x, returns |x_{123}| - 1.\n\n Put differently, this is the deviation of the norm of a pseudoscalar component from 1.\n This can be used as a regularization term when predicting point positions, to avoid x_123 to\n be too close to 0.\n\n Parameters\n ----------\n multivector : torch.Tensor with shape (..., 16)\n Multivector.\n\n Returns\n -------\n regularization : torch.Tensor with shape (..., 1)\n |multivector_123| - 1.\n \"\"\"\n\n return torch.abs(multivector[..., [14]]) - 1.0"
},
{
"identifier": "embed_scalar",
"path": "gatr/interface/scalar.py",
"snippet": "def embed_scalar(scalars: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds a scalar tensor into multivectors.\n\n Parameters\n ----------\n scalars: torch.Tensor with shape (..., 1)\n Scalar inputs.\n\n Returns\n -------\n multivectors: torch.Tensor with shape (..., 16)\n Multivector outputs. `multivectors[..., [0]]` is the same as `scalars`. The other components\n are zero.\n \"\"\"\n\n non_scalar_shape = list(scalars.shape[:-1]) + [15]\n non_scalar_components = torch.zeros(\n non_scalar_shape, device=scalars.device, dtype=scalars.dtype\n )\n embedding = torch.cat((scalars, non_scalar_components), dim=-1)\n\n return embedding"
},
{
"identifier": "embed_translation",
"path": "gatr/interface/translation.py",
"snippet": "def embed_translation(translation_vector: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds a 3D translation in multivectors.\n\n In our convention, a translation vector is embedded into a combination of the scalar and\n bivector components.\n\n We have (in agreement with Eq. (82) of the reference below) that\n ```\n T(t) = 1 - e_0 / 2 (t_1 e_1 + t_2 e_2 + t_3 e_3) .\n ```\n\n References\n ----------\n Leo Dorst, \"A Guided Tour to the Plane-Based Geometric Algebra PGA\",\n https://geometricalgebra.org/downloads/PGA4CS.pdf\n\n Parameters\n ----------\n translation_vector : torch.Tensor with shape (..., 3)\n Vectorial amount of translation.\n\n Returns\n -------\n multivector : torch.Tensor with shape (..., 16)\n Embedding into multivector.\n \"\"\"\n\n # Create multivector tensor with same batch shape, same device, same dtype as input\n batch_shape = translation_vector.shape[:-1]\n multivector = torch.zeros(\n *batch_shape, 16, dtype=translation_vector.dtype, device=translation_vector.device\n )\n\n # Embedding into trivectors\n multivector[..., 0] = 1.0 # scalar\n multivector[..., 5:8] = (\n -0.5 * translation_vector[..., :]\n ) # Translation vector embedded in x_0i with i = 1, 2, 3\n\n return multivector"
},
{
"identifier": "make_full_edge_index",
"path": "gatr/utils/misc.py",
"snippet": "@lru_cache()\[email protected]_grad()\ndef make_full_edge_index(num_nodes, batchsize=1, self_loops=False, device=torch.device(\"cpu\")):\n \"\"\"Creates a PyG-style edge index for a fully connected graph of `num_nodes` nodes.\"\"\"\n\n # Construct fully connected edge index\n src, dst = [], []\n for i, j in product(range(num_nodes), repeat=2):\n if not self_loops and i == j:\n continue\n src.append(i)\n dst.append(j)\n\n edge_index_per_batch = torch.LongTensor([src, dst]).to(device)\n\n # Repeat for each batch element\n if batchsize > 1:\n edge_index_list = [edge_index_per_batch + k * num_nodes for k in range(batchsize)]\n edge_index = torch.cat(edge_index_list, dim=1)\n else:\n edge_index = edge_index_per_batch\n\n return edge_index"
}
] | import dgl
import numpy as np
import torch
from e3nn.o3 import Irreps, spherical_harmonics
from torch import nn
from torch_geometric.data import Data
from torch_geometric.nn import knn_graph
from torch_scatter import scatter
from gatr.baselines.gcan import GCAGNN
from gatr.baselines.transformer import BaselineAxialTransformer, BaselineTransformer
from gatr.experiments.base_wrapper import BaseWrapper
from gatr.interface import (
embed_point,
embed_scalar,
embed_translation,
extract_point,
extract_point_embedding_reg,
)
from gatr.utils.misc import make_full_edge_index | 7,294 | SE3-Transformer model.
"""
def __init__(self, net, canonicalize_to_com=True, canonicalize_mode="com"):
super().__init__()
self.net = net
self.canonicalize_to_com = canonicalize_to_com
self.canonicalize_mode = canonicalize_mode
self.supports_variable_items = True
def forward(self, inputs):
"""Wrapped forward pass.
Parameters
----------
inputs : torch.Tensor
Raw inputs, as given by dataset.
Returns
-------
outputs : torch.Tensor
Raw outputs, as expected in dataset.
other : torch.Tensor
Dummy term, since the baselines do not require regularization.
Raises
------
ValueError
If `self.canonicalize_mode` is invalid.
"""
batchsize, num_objects, _ = inputs.shape
# Separate into scalars and vectors
masses = inputs[:, :, [0]] # (batchsize, objects, 1)
locations = inputs[:, :, 1:4] # (batchsize, objects, 3)
velocities = inputs[:, :, 4:7] # (batchsize, objects, 3)
# Canonicalize to center-of-mass frame if requested
if self.canonicalize_to_com:
if self.canonicalize_mode == "com":
weights = masses
elif self.canonicalize_mode == "heaviest":
weights = torch.exp(2.0 * masses.double())
else:
raise ValueError(f"Unknown canonicalization mode {self.canonicalize_mode}")
com = torch.sum(
weights / torch.sum(weights, dim=-2, keepdim=True) * locations.double(),
dim=-2,
keepdim=True,
).float()
locations = locations - com
else:
com = torch.zeros_like(locations)
# Represent as graph
graphs = self._build_graphs(locations, velocities, masses)
# Push through model
predictions = self.net(graphs)
predictions = predictions[:, 0, :] # Only positions, not velocities
predictions = predictions.view(batchsize, num_objects, 3)
predictions = (
locations + predictions
) # Model predicts positions relative to initial pos, make it absolute
# Undo canonicalization
if self.canonicalize_to_com:
predictions = predictions + com
return predictions, torch.zeros(batchsize, device=inputs.device)
def _build_graphs(self, locations, velocities, masses):
"""Builds graph for a full batch."""
graphs = [
self._build_graph(loc, vel, m) for loc, vel, m in zip(locations, velocities, masses)
]
graphs = dgl.batch(graphs)
return graphs
def _build_graph(self, locations, velocities, masses):
"""Builds graph for a single sample."""
n_points = len(locations)
indices_src, indices_dst = self._fully_connected_idx(n_points)
graph = dgl.DGLGraph((indices_src, indices_dst)).to(locations.device)
graph.ndata["x"] = torch.unsqueeze(locations, dim=1) # [N, 1, 3]
graph.ndata["v"] = torch.unsqueeze(velocities, dim=1) # [N, 1, 3]
graph.ndata["c"] = torch.unsqueeze(masses, dim=1) # [N, 1, 1]
graph.edata["d"] = locations[indices_dst] - locations[indices_src] # relative postions
graph.edata["w"] = masses[indices_dst] * masses[indices_src]
return graph
@staticmethod
def _fully_connected_idx(num_atoms):
"""Creates source and destination indices for a fully connected graph."""
src = []
dst = []
for i in range(num_atoms):
for j in range(num_atoms):
if i != j:
src.append(i)
dst.append(j)
return np.array(src), np.array(dst)
class NBodyGCANWrapper(nn.Module):
"""Wraps around GCA-MLP and GCA-GNN baselines for the n-body experiment.
Parameters
----------
net : torch.nn.Module
GCAN model that accepts inputs with multivector inputs with 1 channel and
returns multivector outputs with 1 channel.
"""
def __init__(self, net, geometric_batching=False):
super().__init__()
self.net = net
self._geometric_batching = geometric_batching
| # Copyright (c) 2023 Qualcomm Technologies, Inc.
# All rights reserved.
def embed_nbody_data_in_pga(inputs):
"""Represent the n-body initial state in PGA multivectors.
Masses are represented as scalars, positions as trivectors, and velocities as bivectors
(like translations). All three are summed (this is equivalent to concatenation, as an equi
linear layer can easily separate the grades again).
This function is used both by the GATr and by the GCAN wrappers.
Parameters
----------
inputs : torch.Tensor with shape (batchsize, objects, 7)
n-body initial state: a concatenation of masses, initial positions, and initial
velocities along the feature dimension.
Returns
-------
multivector : torch.Tensor with shape (batchsize, objects, 1, 16)
GA embedding.
"""
# Build one multivector holding masses, points, and velocities for each object
masses = inputs[:, :, [0]] # (batchsize, objects, 1)
masses = embed_scalar(masses) # (batchsize, objects, 16)
points = inputs[:, :, 1:4] # (batchsize, objects, 3)
points = embed_point(points) # (batchsize, objects, 16)
velocities = inputs[:, :, 4:7] # (batchsize, objects, 3)
velocities = embed_translation(velocities) # (batchsize, objects, 16)
multivector = masses + points + velocities # (batchsize, objects, 16)
# Insert channel dimension
multivector = multivector.unsqueeze(2) # (batchsize, objects, 1, 16)
return multivector
class NBodyGATrWrapper(BaseWrapper):
"""Wraps around GATr for the n-body prediction experiment.
Parameters
----------
net : torch.nn.Module
GATr model that accepts inputs with 1 multivector channel and 1 scalar channel, and
returns outputs with 1 multivector channel and 1 scalar channel.
"""
def __init__(self, net):
super().__init__(net, scalars=True, return_other=True)
self.supports_variable_items = True
def embed_into_ga(self, inputs):
"""Embeds raw inputs into the geometric algebra (+ scalar) representation.
Parameters
----------
inputs : torch.Tensor with shape (batchsize, objects, 7)
n-body initial state: a concatenation of masses, initial positions, and initial
velocities along the feature dimension.
Returns
-------
mv_inputs : torch.Tensor
Multivector representation of masses, positions, and velocities.
scalar_inputs : torch.Tensor or None
Dummy auxiliary scalars, containing no information.
"""
batchsize, num_objects, _ = inputs.shape
# Build one multivector holding masses, positions, and velocities for each object
multivector = embed_nbody_data_in_pga(inputs)
# Scalar inputs are not really needed here
scalars = torch.zeros((batchsize, num_objects, 1), device=inputs.device)
return multivector, scalars
def extract_from_ga(self, multivector, scalars):
"""Extracts raw outputs from the GATr multivector + scalar outputs.
We parameterize the predicted final positions as points.
Parameters
----------
multivector : torch.Tensor
Multivector outputs from GATr.
scalars : torch.Tensor or None
Scalar outputs from GATr.
Returns
-------
outputs : torch.Tensor
Predicted final-state positions.
other : torch.Tensor
Regularization terms.
"""
# Check channels of inputs. Batchsize and object numbers are free.
assert multivector.shape[2:] == (1, 16)
assert scalars.shape[2:] == (1,)
# Extract position
points = extract_point(multivector[:, :, 0, :])
# Extract non-point components and compute regularization
other = extract_point_embedding_reg(multivector[:, :, 0, :])
reg = torch.sum(other**2, dim=[1, 2])
if self.scalars:
reg = reg + torch.sum(scalars**2, dim=[1, 2])
return points, reg
class NBodyBaselineWrapper(nn.Module):
"""Wraps around simple baselines (MLP or Transformer) for the n-body prediction experiment.
Parameters
----------
net : torch.nn.Module
Model that accepts inputs with 7 channels and returns outputs with 3 channels.
"""
def __init__(self, net):
super().__init__()
self.net = net
self.supports_variable_items = isinstance(
net, (BaselineTransformer, BaselineAxialTransformer)
)
def forward(self, inputs):
"""Wrapped forward pass.
Parameters
----------
inputs : torch.Tensor
Raw inputs, as given by dataset.
Returns
-------
outputs : torch.Tensor
Raw outputs, as expected in dataset.
other : torch.Tensor
Dummy term, since the baselines do not require regularization.
"""
batchsize = inputs.shape[0]
return self.net(inputs), torch.zeros(batchsize, device=inputs.device)
class NBodySEGNNWrapper(nn.Module):
"""Wraps around the SEGNN baseline for the n-body prediction experiment.
Parameters
----------
net : torch.nn.Module
SEGNN model that accepts inputs with inputs with 2 vector channels and 1 scalar channel,
and returns outputs with 1 vector channel.
"""
def __init__(self, net, neighbors, lmax_attr, canonicalize_mode="com"):
super().__init__()
self.net = net
self.canonicalize_mode = canonicalize_mode
self.neighbors = neighbors
self.transform_attr_irreps = Irreps.spherical_harmonics(lmax_attr)
self.supports_variable_items = True
def forward(self, inputs):
"""Wrapped forward pass.
Parameters
----------
inputs : torch.Tensor
Raw inputs, as given by dataset.
Returns
-------
outputs : torch.Tensor
Raw outputs, as expected in dataset.
other : torch.Tensor
Dummy term, since the baselines do not require regularization.
Raises
------
ValueError
If `self.canonicalize_mode` is invalid.
"""
batchsize, num_objects, _ = inputs.shape
# Separate into scalars and vectors
masses = inputs[:, :, [0]] # (batchsize, objects, 1)
locations = inputs[:, :, 1:4] # (batchsize, objects, 3)
velocities = inputs[:, :, 4:7] # (batchsize, objects, 3)
# Canonicalize
if self.canonicalize_mode == "com":
weights = masses
elif self.canonicalize_mode == "heaviest":
weights = torch.exp(2.0 * masses.double())
elif self.canonicalize_mode == "even":
weights = torch.ones_like(masses)
else:
raise ValueError(f"Unknown canonicalization mode {self.canonicalize_mode}")
com = torch.sum(
weights / torch.sum(weights, dim=-2, keepdim=True) * locations.double(),
dim=-2,
keepdim=True,
).float()
locations = locations - com
# Represent as graph
graph = Data(pos=locations.view(-1, 3), vel=velocities.view(-1, 3), mass=masses.view(-1, 1))
batch = torch.arange(0, batchsize, device=inputs.device)
graph.batch = batch.repeat_interleave(num_objects).to(inputs.device, torch.long)
graph.edge_index = knn_graph(locations.view(-1, 3), self.neighbors, graph.batch)
graph = self._augment_gravity_graph(graph) # Add O3 attributes
# Push through model
pred_shift = self.net(graph)
pred_shift = pred_shift.view(batchsize, num_objects, 3)
predictions = (
locations + pred_shift
) # The model predicts the shift, not the final positions
# Undo canonicalization
predictions = predictions + com
return predictions, torch.zeros(batchsize, device=inputs.device)
def _augment_gravity_graph(self, graph):
"""SEGNN feature engineering for n-body experiments.
Constructs node features (position relative to mean position, velocity embedding, absolute
velocity) and edge features (pairwise distances, product of charges / masses).
"""
pos = graph.pos
vel = graph.vel
mass = graph.mass
prod_mass = mass[graph.edge_index[0]] * mass[graph.edge_index[1]]
rel_pos = pos[graph.edge_index[0]] - pos[graph.edge_index[1]]
edge_dist = torch.sqrt(rel_pos.pow(2).sum(1, keepdims=True))
graph.edge_attr = spherical_harmonics(
self.transform_attr_irreps, rel_pos, normalize=True, normalization="integral"
)
vel_embedding = spherical_harmonics(
self.transform_attr_irreps, vel, normalize=True, normalization="integral"
)
graph.node_attr = (
scatter(graph.edge_attr, graph.edge_index[1], dim=0, reduce="mean") + vel_embedding
)
vel_abs = torch.sqrt(vel.pow(2).sum(1, keepdims=True))
graph.x = torch.cat((pos, vel, vel_abs), 1) # Note that pos is here already canonicalized
graph.additional_message_features = torch.cat((edge_dist, prod_mass), dim=-1)
return graph
class NBodySE3TransformerWrapper(nn.Module):
"""Wraps around the SE3-Transformer baseline for the n-body prediction experiment.
Parameters
----------
net : torch.nn.Module
SE3-Transformer model.
"""
def __init__(self, net, canonicalize_to_com=True, canonicalize_mode="com"):
super().__init__()
self.net = net
self.canonicalize_to_com = canonicalize_to_com
self.canonicalize_mode = canonicalize_mode
self.supports_variable_items = True
def forward(self, inputs):
"""Wrapped forward pass.
Parameters
----------
inputs : torch.Tensor
Raw inputs, as given by dataset.
Returns
-------
outputs : torch.Tensor
Raw outputs, as expected in dataset.
other : torch.Tensor
Dummy term, since the baselines do not require regularization.
Raises
------
ValueError
If `self.canonicalize_mode` is invalid.
"""
batchsize, num_objects, _ = inputs.shape
# Separate into scalars and vectors
masses = inputs[:, :, [0]] # (batchsize, objects, 1)
locations = inputs[:, :, 1:4] # (batchsize, objects, 3)
velocities = inputs[:, :, 4:7] # (batchsize, objects, 3)
# Canonicalize to center-of-mass frame if requested
if self.canonicalize_to_com:
if self.canonicalize_mode == "com":
weights = masses
elif self.canonicalize_mode == "heaviest":
weights = torch.exp(2.0 * masses.double())
else:
raise ValueError(f"Unknown canonicalization mode {self.canonicalize_mode}")
com = torch.sum(
weights / torch.sum(weights, dim=-2, keepdim=True) * locations.double(),
dim=-2,
keepdim=True,
).float()
locations = locations - com
else:
com = torch.zeros_like(locations)
# Represent as graph
graphs = self._build_graphs(locations, velocities, masses)
# Push through model
predictions = self.net(graphs)
predictions = predictions[:, 0, :] # Only positions, not velocities
predictions = predictions.view(batchsize, num_objects, 3)
predictions = (
locations + predictions
) # Model predicts positions relative to initial pos, make it absolute
# Undo canonicalization
if self.canonicalize_to_com:
predictions = predictions + com
return predictions, torch.zeros(batchsize, device=inputs.device)
def _build_graphs(self, locations, velocities, masses):
"""Builds graph for a full batch."""
graphs = [
self._build_graph(loc, vel, m) for loc, vel, m in zip(locations, velocities, masses)
]
graphs = dgl.batch(graphs)
return graphs
def _build_graph(self, locations, velocities, masses):
"""Builds graph for a single sample."""
n_points = len(locations)
indices_src, indices_dst = self._fully_connected_idx(n_points)
graph = dgl.DGLGraph((indices_src, indices_dst)).to(locations.device)
graph.ndata["x"] = torch.unsqueeze(locations, dim=1) # [N, 1, 3]
graph.ndata["v"] = torch.unsqueeze(velocities, dim=1) # [N, 1, 3]
graph.ndata["c"] = torch.unsqueeze(masses, dim=1) # [N, 1, 1]
graph.edata["d"] = locations[indices_dst] - locations[indices_src] # relative postions
graph.edata["w"] = masses[indices_dst] * masses[indices_src]
return graph
@staticmethod
def _fully_connected_idx(num_atoms):
"""Creates source and destination indices for a fully connected graph."""
src = []
dst = []
for i in range(num_atoms):
for j in range(num_atoms):
if i != j:
src.append(i)
dst.append(j)
return np.array(src), np.array(dst)
class NBodyGCANWrapper(nn.Module):
"""Wraps around GCA-MLP and GCA-GNN baselines for the n-body experiment.
Parameters
----------
net : torch.nn.Module
GCAN model that accepts inputs with multivector inputs with 1 channel and
returns multivector outputs with 1 channel.
"""
def __init__(self, net, geometric_batching=False):
super().__init__()
self.net = net
self._geometric_batching = geometric_batching | self.supports_variable_items = isinstance(net, GCAGNN) | 0 | 2023-10-23 15:58:36+00:00 | 12k |
tomguluson92/cloth2tex | phase1_inference.py | [
{
"identifier": "ClothRenderer",
"path": "renderer/cloth_renderer.py",
"snippet": "class ClothRenderer(object):\n \n def __init__(self, objfile, resolution=512, focal_distance=1.6, scale_factor=1):\n self.device = torch.device(\"cuda:0\")\n\n self.img_size = resolution\n self.render_size = resolution\n self.renderer, self.renderer_silhouette = self.__get_renderer(self.render_size, focal_distance)\n \n print(\"[Cloth2Tex]\", objfile)\n obj_filename = os.path.join(objfile)\n verts, faces, aux = load_obj(\n obj_filename,\n device=self.device,\n load_textures=True)\n self.faces = faces.verts_idx\n self.verts = verts\n self.aux = aux\n \n self.verts = self.normalize_vertex(verts.clone()) * scale_factor\n \n self.center = verts.mean(0)\n self.scale = max((verts - self.center).abs().max(0)[0])\n self.landmark_cam = OrthogonalCamera(rotation=self.cameras.R.cuda(), translation=self.cameras.T.cuda()).to(self.device)\n \n _keys = []\n if len(aux.texture_images.keys()) > 0:\n for _ in aux.texture_images.keys():\n _keys.append(_)\n self.tex_lst = [aux.texture_images[i] for i in _keys]\n texture_image = self.tex_lst[0]\n \n \n self.verts_uvs = aux.verts_uvs[None, ...] # (1, V, 2)\n faces_uvs = faces.textures_idx[None, ...] # (1, F, 3)\n tex_maps = aux.texture_images\n\n # Canonical Mesh\n texture_image = texture_image[None, ...].to(self.device) # (1, H, W, 3)\n self.texture = TexturesUV(maps=texture_image, faces_uvs=self.faces[None], verts_uvs=self.verts_uvs)\n self.canonical_mesh = Meshes([self.verts], [self.faces], self.texture)\n \n def normalize_vertex(self, verts):\n # Normalizing\n N = verts.shape[0]\n center = verts.mean(0)\n scale = max((verts - center).abs().max(0)[0])\n \n verts = verts - center\n verts = verts * (1/float(scale))\n \n return verts\n \n def denormalize_vertex(self, verts):\n \n out = self.scale*verts + self.center\n \n return out\n \n def render_silhouette(self, verts, side='back', landmark=True, vertex_number=[[], []]):\n vert_lst_front = vertex_number[0]\n vert_lst_back = vertex_number[1]\n \n tmp_verts = verts.clone()\n mesh = Meshes([tmp_verts], [self.faces], self.texture)\n meshes = mesh.extend(2)\n \n # Get a batch(2) of viewing angles. \n elev = torch.linspace(180, -180, 2)\n azim = torch.linspace(0, 0, 2)\n \n focal_length = torch.linspace(-1, 1, 2)\n R, T = look_at_view_transform(dist=focal_length, elev=elev, azim=azim)\n cameras = FoVOrthographicCameras(device=self.device, R=R, T=T)\n \n target_images, fragments = self.renderer_silhouette(meshes, cameras=cameras)\n \n if landmark is True:\n # project normalized vertex to image space(fix vertex)\n specific_verts_2d_front = self.landmark_cam(verts[vert_lst_front].unsqueeze(0))[0]\n # conversion from OpenGL coordinate to OpenCV coordinate\n specific_verts_2d_front[:,] = -specific_verts_2d_front[:,]\n # conversion from [-1,1] to [0,512]\n specific_verts_2d_front = (specific_verts_2d_front+1)/2*self.render_size\n \n # project normalized vertex to image space(fix vertex)\n specific_verts_2d_back = self.landmark_cam(verts[vert_lst_back].unsqueeze(0))[0]\n # conversion from OpenGL coordinate to OpenCV coordinate\n specific_verts_2d_back[:,] = -specific_verts_2d_back[:,]\n # conversion from [-1,1] to [0,512]\n specific_verts_2d_back = (specific_verts_2d_back+1)/2*self.render_size\n \n if side == 'front':\n return target_images[0], [specific_verts_2d_front]\n elif side == 'back':\n return target_images[1], [specific_verts_2d_back]\n else:\n return target_images, [specific_verts_2d_front, specific_verts_2d_back]\n \n return target_images, fragments\n \n def render_image(self, texture_image):\n texture = TexturesUV(maps=texture_image, faces_uvs=self.faces[None], verts_uvs=self.verts_uvs)\n \n tmp_verts = self.verts.clone()\n mesh = Meshes([tmp_verts], [self.faces.clone()], texture)\n meshes = mesh.extend(2)\n \n # Get a batch(2) of viewing angles. \n elev = torch.linspace(180, -180, 2)\n azim = torch.linspace(0, 0, 2)\n \n focal_length = torch.linspace(-1, 1, 2)\n R, T = look_at_view_transform(dist=focal_length, elev=elev, azim=azim)\n cameras = FoVOrthographicCameras(device=self.device, R=R, T=T)\n \n target_images = self.renderer(meshes, cameras=cameras)\n target_masks, _ = self.renderer_silhouette(meshes, cameras=cameras)\n \n return target_images, target_masks\n \n \n def __get_renderer(self, render_size, focal_distance=2):\n \n lights = PointLights(device=self.device, location=[[0.0, 0.0, -3.0]],\n ambient_color=((1,1,1),),diffuse_color=((0,0,0),),specular_color=((0,0,0),))\n \n self.focal_distance = focal_distance\n R, T = look_at_view_transform(focal_distance, -180, 0) # 180 -> -180\n cameras = FoVPerspectiveCameras(device=self.device, R=R, T=T) # silhouette only!\n # cameras = FoVOrthographicCameras(device=self.device, R=R, T=T)\n \n self.cameras = cameras\n \n raster_settings = RasterizationSettings(\n image_size=render_size, \n blur_radius=0.0, \n faces_per_pixel=1, \n )\n sigma = 1e-4\n gamma = 1e-4\n blend_params = BlendParams(sigma=sigma, gamma=gamma, background_color=(255, 255, 255))\n \n renderer = MeshRenderer(\n rasterizer=MeshRasterizer(\n cameras=cameras,\n raster_settings=raster_settings\n ),\n shader = SoftPhongShader(\n device=self.device, \n cameras=cameras,\n lights=lights,\n # blend_params=blend_params\n )\n )\n \n # ref: https://github.com/facebookresearch/pytorch3d/issues/470\n sigma = 1e-8\n gamma = 1e-8\n blend_params = BlendParams(sigma=sigma, gamma=gamma, background_color=(0, 0, 0))\n raster_settings = RasterizationSettings(\n image_size=render_size, \n blur_radius=np.log(1. / 1e-8 - 1.)*sigma, # blur_radius=np.log(1. / 1e-8 - 1.)*sigma, \n faces_per_pixel=10, \n bin_size=None, \n max_faces_per_bin=None\n )\n \n renderer_silhouette = MeshRendererWithFragments(\n rasterizer=MeshRasterizer(\n cameras=cameras, \n raster_settings=raster_settings\n ),\n shader=SoftSilhouetteShader(blend_params=blend_params)\n # shader=SoftSilhouetteShader(blend_params=blend_params)\n )\n\n return renderer, renderer_silhouette"
},
{
"identifier": "extract_ampl_phase",
"path": "utils/frequency.py",
"snippet": "def extract_ampl_phase(input_img):\n \n fft_img = torch.fft.rfftn(input_img.clone())\n fft_im = torch.stack((fft_img.real, fft_img.imag), -1)\n \n # fft_im: size should be bx3xhxwx2\n fft_amp = fft_im[:,:,:,:,0]**2 + fft_im[:,:,:,:,1]**2\n fft_amp = torch.sqrt(fft_amp) # amplitude\n fft_pha = torch.atan2( fft_im[:,:,:,:,1], fft_im[:,:,:,:,0]) # phase\n return fft_amp, fft_pha"
},
{
"identifier": "Binarize",
"path": "utils/binary_function.py",
"snippet": "class Binarize(Function):\n clip_value = 1\n\n @staticmethod\n def forward(ctx, inp):\n ctx.save_for_backward(inp)\n\n output = inp.sign()\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n inp: Tensor = ctx.saved_tensors[0]\n\n clipped = inp.abs() <= Binarize.clip_value\n\n output = torch.zeros(inp.size()).to(grad_output.device)\n output[clipped] = 1\n output[~clipped] = 0\n\n return output * grad_output"
},
{
"identifier": "TVLoss",
"path": "utils/tvl_loss.py",
"snippet": "class TVLoss(nn.Module):\n def __init__(self, weight=1):\n super(TVLoss,self).__init__()\n self.TVLoss_weight = weight\n\n def forward(self, x):\n batch_size = x.size()[0]\n h_x = x.size()[2]\n w_x = x.size()[3]\n count_h = self._tensor_size(x[:,:,1:,:])\n count_w = self._tensor_size(x[:,:,:,1:])\n h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum()\n w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum()\n \n # 2023.03.29 +2nearest\n # h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum() + torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:])[:, :, ::2, :],2).sum()\n # w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum() + torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1])[:, :, :, ::2],2).sum()\n \n return self.TVLoss_weight*2*(h_tv/count_h+w_tv/count_w)/batch_size\n\n def _tensor_size(self,t):\n return t.size()[1]*t.size()[2]*t.size()[3]"
},
{
"identifier": "TVMaskLoss",
"path": "utils/tvl_loss.py",
"snippet": "class TVMaskLoss(nn.Module):\n def __init__(self, weight=1):\n super(TVMaskLoss,self).__init__()\n self.TVMaskLoss_weight = weight\n self.non_idx = None\n\n def forward(self, mask, x):\n if self.non_idx is None:\n non_idx = mask.nonzero()\n self.non_idx = non_idx.split(1, dim=1)\n \n tmp_mask = torch.ones(1,3,512,512).cuda()\n tmp_mask[self.non_idx] = 0 # 排除非UV区域.\n \n batch_size = x.size()[0]\n h_x = x.size()[2]\n w_x = x.size()[3]\n \n x = x * tmp_mask\n \n count_h = self._tensor_size(x[:,:,1:,:])\n count_w = self._tensor_size(x[:,:,:,1:])\n # h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum()\n # w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum()\n \n # 2023.03.29 +2nearest\n h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum() + torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:])[:, :, ::2, :],2).sum()\n w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum() + torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1])[:, :, :, ::2],2).sum()\n return self.TVMaskLoss_weight*2*(h_tv/count_h+w_tv/count_w)/batch_size\n\n def _tensor_size(self,t):\n return t.size()[1]*t.size()[2]*t.size()[3]"
},
{
"identifier": "DeformationGraph",
"path": "lib/deformation_graph.py",
"snippet": "class DeformationGraph(nn.Module):\n \n def __init__(self, vert_number=9648, radius=0.015, k=9, sampling_strategy='qslim'): \n super().__init__()\n \n self.radius = radius\n self.k = k\n self.max_neigh_num = 40\n self.sampling_strategy = sampling_strategy\n self.one_ring_neigh = []\n self.nodes_idx = None\n self.weights = None\n self.influence_nodes_idx = []\n self.dists = []\n \n self.vert_number = vert_number\n\n def construct_graph(self, category_name, vertices=None, faces=None):\n \n transform_fp = \"transform_{}.pkl\".format(category_name)\n if self.sampling_strategy == 'qslim':\n m = Mesh(v=vertices, f=faces)\n if os.path.exists(transform_fp):\n with open(transform_fp, 'rb') as f:\n tmp = pickle.load(f, encoding='latin1')\n M, A, D = tmp['M'], tmp['A'], tmp['D']\n else:\n M, A, D = generate_transform_matrices(m, [20, 20])\n tmp = {'M': M, 'A': A, 'D': D}\n with open(transform_fp, 'wb') as fp:\n pickle.dump(tmp, fp)\n # import pdb; pdb.set_trace()\n nodes_v = M[1].v\n self.nodes_idx = D[0].nonzero()[1]\n adj_mat = A[1].toarray()\n \n for i in range(adj_mat.shape[0]):\n self.one_ring_neigh.append(adj_mat[i].nonzero()[0].tolist() + [i]*(self.max_neigh_num-len(adj_mat[i].nonzero()[0])))\n self.one_ring_neigh = torch.tensor(self.one_ring_neigh).cuda() \n\n # construct kd tree\n kdtree = KDTree(nodes_v)\n \n for vert in vertices:\n dist, idx = kdtree.query(vert, k=self.k)\n self.dists.append(dist)\n self.influence_nodes_idx.append(idx)\n \n self.weights = -np.log(np.array(self.dists)+eps)\n \n # weights normalization\n self.weights = torch.tensor(self.weights/col(self.weights.sum(1))).cuda()\n self.influence_nodes_idx = torch.tensor(self.influence_nodes_idx).cuda()\n \n def forward(self, vertices, opt_d_rotations, opt_d_translations):\n \n opt_d_rotmat = batch_rodrigues(opt_d_rotations[0]).unsqueeze(0) # 1 * N_c * 3 * 3\n nodes = vertices[self.nodes_idx, ...]\n \n opt_d_rotmat = opt_d_rotmat.cuda()\n opt_d_translations = opt_d_translations.cuda()\n\n influence_nodes_v = nodes[self.influence_nodes_idx.reshape((-1,))]# .reshape((28944(self.k * 9648),3,3))\n opt_d_r = opt_d_rotmat[0, self.influence_nodes_idx.reshape((-1,)), ...]# .reshape((28944,3,3,3)) \n opt_d_t = opt_d_translations[0, self.influence_nodes_idx.reshape((-1,)), ...]# .reshape((28944,3,3))\n \n warpped_vertices = (torch.einsum('bij, bkj->bki', opt_d_r.cuda(), (vertices.repeat_interleave(self.k, dim=0) - influence_nodes_v).unsqueeze(1)).squeeze(1) \\\n + influence_nodes_v + opt_d_t.cuda()).reshape((self.vert_number, self.k, 3)) * (self.weights.unsqueeze(-1))\n warpped_vertices = warpped_vertices.sum(axis=1).float()\n\n diff_term = (nodes + opt_d_translations[0].cuda()).repeat_interleave(self.max_neigh_num, dim=0) - \\\n (nodes[self.one_ring_neigh.reshape((-1,))] + opt_d_translations[0][self.one_ring_neigh.reshape((-1,))].cuda()) - \\\n torch.einsum('bij, bkj->bki', opt_d_rotmat[0].repeat_interleave(self.max_neigh_num, dim=0).cuda(), \\\n (nodes.repeat_interleave(self.max_neigh_num, dim=0) - nodes[self.one_ring_neigh.reshape((-1,))]).unsqueeze(1)).squeeze(1)\n arap_loss = torch.sum(diff_term ** 2) / self.nodes_idx.shape[0]\n \n return warpped_vertices.unsqueeze(0), arap_loss"
},
{
"identifier": "generate_transform_matrices_coma",
"path": "lib/mesh_sampling.py",
"snippet": "def generate_transform_matrices_coma(mesh, factors):\n \"\"\"Generates len(factors) meshes, each of them is scaled by factors[i] and\n computes the transformations between them.\n Returns:\n M: a set of meshes downsampled from mesh by a factor specified in factors.\n A: Adjacency matrix for each of the meshes\n D: csc_matrix Downsampling transforms between each of the meshes\n U: Upsampling transforms between each of the meshes\n F: a list of faces\n \"\"\"\n\n factors = map(lambda x: 1.0 / x, factors)\n M, A, D, U, F = [], [], [], [], []\n F.append(mesh.f) # F[0]\n A.append(get_vert_connectivity(mesh.v, mesh.f).astype('float32')) # A[0]\n M.append(mesh) # M[0]\n\n for factor in factors:\n ds_f, ds_D = qslim_decimator_transformer(M[-1], factor=factor)\n D.append(ds_D.astype('float32'))\n new_mesh_v = ds_D.dot(M[-1].v)\n new_mesh = Mesh(v=new_mesh_v, f=ds_f)\n F.append(new_mesh.f)\n M.append(new_mesh)\n A.append(\n get_vert_connectivity(new_mesh.v, new_mesh.f).tocoo())\n U.append(setup_deformation_transfer(M[-1], M[-2]).astype('float32'))\n\n return M, A, D, U, F"
},
{
"identifier": "to_edge_index",
"path": "lib/utils_dg.py",
"snippet": "def to_edge_index(mat):\n return torch.LongTensor(np.vstack(mat.nonzero()))"
},
{
"identifier": "to_sparse",
"path": "lib/utils_dg.py",
"snippet": "def to_sparse(spmat):\n return torch.sparse.FloatTensor(\n torch.LongTensor([spmat.tocoo().row,\n spmat.tocoo().col]),\n torch.FloatTensor(spmat.tocoo().data), torch.Size(spmat.tocoo().shape))"
},
{
"identifier": "get_vert_connectivity",
"path": "lib/utils_dg.py",
"snippet": "def get_vert_connectivity(mesh_v, mesh_f):\n \"\"\"Returns a sparse matrix (of size #verts x #verts) where each nonzero\n element indicates a neighborhood relation. For example, if there is a\n nonzero element in position (15,12), that means vertex 15 is connected\n by an edge to vertex 12.\"\"\"\n\n vpv = sp.csc_matrix((len(mesh_v),len(mesh_v)))\n\n # for each column in the faces...\n for i in range(3):\n IS = mesh_f[:,i]\n JS = mesh_f[:,(i+1)%3]\n data = np.ones(len(IS))\n ij = np.vstack((row(IS.flatten()), row(JS.flatten())))\n mtx = sp.csc_matrix((data, ij), shape=vpv.shape)\n vpv = vpv + mtx + mtx.T\n\n return vpv"
},
{
"identifier": "scipy_to_torch_sparse",
"path": "lib/utils_dg.py",
"snippet": "def scipy_to_torch_sparse(scp_matrix):\n values = scp_matrix.data\n indices = np.vstack((scp_matrix.row, scp_matrix.col))\n i = torch.LongTensor(indices)\n v = torch.FloatTensor(values)\n shape = scp_matrix.shape\n\n sparse_tensor = torch.sparse.FloatTensor(i, v, torch.Size(shape))\n return sparse_tensor"
},
{
"identifier": "DeformGraphModel",
"path": "models/deform_model.py",
"snippet": "class DeformGraphModel(torch.nn.Module):\n def __init__(self, deform_graph, renderer, binarization, canonical_mesh, std_lst, lr_rate=5e-4, savedir=\"1017\"):\n super(DeformGraphModel, self).__init__()\n \n self.device = torch.device(\"cuda:0\")\n \n self.deform_graph = deform_graph\n self.cloth_renderer = renderer\n self.binarization = binarization\n self.canonical_mesh = canonical_mesh\n \n self.step_size = lr_rate\n \n self.device = torch.device(\"cuda:0\")\n self.std_lst = std_lst[0]\n self.savedir = savedir\n # self.std_lst_b = std_lst[1]\n \n def iterative_deformgraph(self,\n batch_id,\n vertex_number,\n inputs,\n contours,\n verts,\n opt_d_rotations,\n opt_d_translations,\n times=101):\n \n verts_for_dg = verts.detach()\n verts_for_dg.requires_grad = False\n \n surface_optimizer = torch.optim.Adam([\n {'params': [opt_d_rotations]},\n {'params': [opt_d_translations]}\n ], lr=self.step_size)\n \n w_dg = 50\n w_kp = 0.001\n w_lap = 100\n w_norm = 10\n w_arap = 50\n w_edge = 1\n \n min_loss = 10000\n loop = tqdm(range(times))\n \n inputs_front, inputs_back = inputs[0].to(self.device).float(), inputs[1].to(self.device).float()\n landmark_front, landmark_back = contours[0].to(self.device).float(), contours[1].to(self.device).float() # landmark (2023.02.15)\n \n \n for i in loop:\n surface_optimizer.zero_grad()\n \n # arap: as rigid as possible\n warpped_vertices, loss_arap = self.deform_graph(verts_for_dg, opt_d_rotations, opt_d_translations)\n warpped_vertices = warpped_vertices.squeeze()\n \n src_mesh = Meshes([warpped_vertices], [self.cloth_renderer.faces], self.cloth_renderer.texture)\n \n # front&back\n masks = torch.stack([inputs_front[0], inputs_back[0]]).squeeze()\n \n # mn\n if landmark_back.shape[1] < landmark_front.shape[1]:\n _cc = [landmark_back, torch.zeros(1,1,1,2).cuda()] # original\n # _cc = [landmark_back, torch.zeros(1,1,2).cuda()] # blender\n landmark_back = torch.cat(_cc, 1)\n \n landmarks_canon = torch.stack([landmark_front.squeeze(), landmark_back.squeeze()])\n \n render_mask, specific_verts_2d = self.cloth_renderer.render_silhouette(warpped_vertices, side='both', landmark=True, vertex_number=vertex_number)\n \n # mn\n if specific_verts_2d[0].shape[0] != specific_verts_2d[1].shape[0]:\n _dd = [specific_verts_2d[1], torch.zeros(1,2).cuda()]\n specific_verts_2d[1] = torch.cat(_dd, 0)\n \n render_mask = render_mask[..., 3]\n render_mask_out = self.binarization(render_mask)\n \n loss_dg = nn.MSELoss()(render_mask_out, masks) + 0.3 * mask_iou(render_mask_out, masks) # [2, 512, 512] [2, 512, 512]\n loss_kp = nn.MSELoss()(torch.stack(specific_verts_2d), landmarks_canon)\n edge_mask = edge_extraction(masks)[:, 0].float()\n edge_render_mask = edge_extraction(render_mask_out)[:, 0].float()\n \n loss_edge = nn.L1Loss()(edge_render_mask*render_mask_out, edge_mask)\n \n loss_lap = mesh_laplacian_smoothing(src_mesh, method=\"uniform\")\n loss_norm = mesh_normal_consistency(src_mesh)\n \n # loss = w_dg*loss_dg + w_kp*loss_kp + w_norm*loss_norm + w_arap*loss_arap + w_edge*loss_edge\n loss = w_dg*loss_dg + w_kp*loss_kp + w_norm*loss_norm + w_arap*loss_arap + w_edge*loss_edge # + w_lap*loss_lap + w_norm*loss_norm\n \n loss.backward()\n surface_optimizer.step()\n \n with torch.no_grad():\n render_mask, specific_verts_2d = self.cloth_renderer.render_silhouette(warpped_vertices, side='both', landmark=True, vertex_number=vertex_number)\n f_render_mask, b_render_mask = render_mask[0, ..., 3], render_mask[1, ..., 3]\n f_render_mask, b_render_mask = self.binarization(f_render_mask), self.binarization(b_render_mask)\n \n _f_2d, _b_2d = specific_verts_2d[0].cpu().numpy().copy(), specific_verts_2d[1].cpu().numpy().copy()\n \n loop.set_description('[Total]{0:.2f}[Mask]{1:.2f}[Nor]{2:.2f}[KP]{3:.2f}[ARAP]{4:.2f}[Edge]{5:.2f}'.format(loss, w_dg * loss_dg, w_norm*loss_norm, w_kp*loss_kp, w_arap*loss_arap, w_edge*loss_edge))\n \n if float(loss) < min_loss:\n min_loss = float(loss)\n \n aaa1 = f_render_mask.detach().cpu().numpy() * 255.\n aaa2 = b_render_mask.detach().cpu().numpy() * 255.\n \n bbb1 = inputs_front[0][0].unsqueeze(-1).cpu().numpy() * 255.\n bbb2 = inputs_back[0][0].unsqueeze(-1).cpu().numpy() * 255.\n \n if len(aaa1.shape) == 2:\n aaa1 = np.expand_dims(aaa1, -1)\n aaa2 = np.expand_dims(aaa2, -1)\n \n ccc1 = aaa1 * 0.4 + bbb1\n ccc2 = aaa2 * 0.4 + bbb2\n cv2.putText(ccc1, \"front\", (int(10), int(40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (193, 33, 240), 2, cv2.LINE_AA) \n cv2.putText(ccc2, \"back\", (int(10), int(40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (193, 33, 240), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(_f_2d):\n cv2.circle(ccc1, (int(vvvv[0]), int(vvvv[1])), 3, (193, 33, 240), -1)\n cv2.putText(ccc1, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (200, 100, 90), 2, cv2.LINE_AA) \n for iii, vvvv in enumerate(landmarks_canon[0]):\n cv2.circle(ccc1, (int(vvvv[0]), int(vvvv[1])), 3, (193, 33, 240), -1)\n cv2.putText(ccc1, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (80, 40, 200), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(_b_2d):\n if int(vvvv[0]) != 0:\n cv2.circle(ccc2, (int(vvvv[0]), int(vvvv[1])), 3, (193, 33, 240), -1)\n cv2.putText(ccc2, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (200, 100, 90), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(landmarks_canon[1]):\n if int(vvvv[0]) != 0:\n cv2.circle(ccc2, (int(vvvv[0]), int(vvvv[1])), 3, (193, 33, 240), -1)\n cv2.putText(ccc2, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (80, 40, 200), 2, cv2.LINE_AA) \n \n \n cv2.imwrite(\"experiments/{0}/{1}_step2_min.jpg\".format(self.savedir, batch_id), cv2.hconcat([(ccc1.astype(np.uint8)), ccc2.astype(np.uint8)]))\n \n \n ddd1, ddd2 = edge_render_mask[0].unsqueeze(-1).cpu().numpy() * 255., edge_render_mask[1].unsqueeze(-1).cpu().numpy() * 255.\n cv2.imwrite(\"experiments/{0}/{1}_step2_edge.jpg\".format(self.savedir, batch_id), cv2.hconcat([(ddd1.astype(np.uint8)), ddd2.astype(np.uint8)]))\n \n minimum_vertices = warpped_vertices.clone()\n best_opt_d_rot = opt_d_rotations.clone()\n best_opt_d_trans = opt_d_translations.clone()\n \n # if i >= 50:\n # if i % 50 == 0:\n # save_obj(\"experiments/batch_result/mesh/0505_{}.obj\".format(i), warpped_vertices.detach(), self.cloth_renderer.faces)\n # else:\n # if i % 5 == 0:\n # save_obj(\"experiments/batch_result/mesh/0505_{}.obj\".format(i), warpped_vertices.detach(), self.cloth_renderer.faces) \n\n if i % 500 == 0:\n aaa1 = f_render_mask.detach().cpu().numpy() * 255.\n aaa2 = b_render_mask.detach().cpu().numpy() * 255.\n \n bbb1 = inputs_front[0][0].unsqueeze(-1).cpu().numpy() * 255.\n bbb2 = inputs_back[0][0].unsqueeze(-1).cpu().numpy() * 255.\n \n if len(aaa1.shape) == 2:\n aaa1 = np.expand_dims(aaa1, -1)\n aaa2 = np.expand_dims(aaa2, -1)\n \n ccc1 = aaa1 * 0.4 + bbb1\n ccc2 = aaa2 * 0.4 + bbb2\n cv2.putText(ccc1, \"front\", (int(10), int(40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (193, 33, 240), 2, cv2.LINE_AA) \n cv2.putText(ccc2, \"back\", (int(10), int(40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (193, 33, 240), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(_f_2d):\n cv2.circle(ccc1, (int(vvvv[0]), int(vvvv[1])), 3, (80, 40, 200), -1)\n cv2.putText(ccc1, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (200, 100, 90), 2, cv2.LINE_AA) \n for iii, vvvv in enumerate(landmarks_canon[0]):\n cv2.circle(ccc1, (int(vvvv[0]), int(vvvv[1])), 3, (80, 40, 200), -1)\n cv2.putText(ccc1, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (80, 40, 200), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(_b_2d):\n if int(vvvv[0]) != 0:\n cv2.circle(ccc2, (int(vvvv[0]), int(vvvv[1])), 3, (80, 40, 200), -1)\n cv2.putText(ccc2, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (200, 100, 90), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(landmarks_canon[1]):\n if int(vvvv[0]) != 0:\n cv2.circle(ccc2, (int(vvvv[0]), int(vvvv[1])), 3, (80, 40, 200), -1)\n cv2.putText(ccc2, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (80, 40, 200), 2, cv2.LINE_AA) \n \n cv2.imwrite(\"experiments/{0}/{1}_step2_{2}.jpg\".format(self.savedir, batch_id, i), cv2.hconcat([(ccc1.astype(np.uint8)), ccc2.astype(np.uint8)]))\n \n \n print(\"[cloth2tex] [deformation graph parameter]\", opt_d_rotations.shape, opt_d_translations.shape)\n return minimum_vertices, best_opt_d_rot, best_opt_d_trans\n \n def forward(self, x):\n out = self.linear(x)\n # out = self.sigmoid(out)\n return out"
}
] | import argparse
import datetime
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pickle
import os
import os.path as osp
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
import thinplate as tps
import time
import matplotlib.pyplot as plt
import importlib
import random
import json
import cv2
from torchvision.models.feature_extraction import create_feature_extractor, get_graph_node_names
from renderer.cloth_renderer import ClothRenderer
from PIL import Image
from utils.frequency import extract_ampl_phase
from utils.binary_function import Binarize
from utils.tvl_loss import TVLoss, TVMaskLoss
from tqdm import tqdm
from pytorch3d.io import load_obj, save_obj
from itertools import chain
from pytorch3d.structures import Meshes
from pytorch3d.transforms import RotateAxisAngle
from pytorch3d.loss import (
mesh_edge_loss,
mesh_laplacian_smoothing,
mesh_normal_consistency,
)
from lib.deformation_graph import DeformationGraph
from lib.mesh_sampling import generate_transform_matrices_coma
from lib.utils_dg import to_edge_index, to_sparse, get_vert_connectivity, scipy_to_torch_sparse
from models import DeformGraphModel
from torch_geometric.transforms import FaceToEdge
from torch_geometric.data import Data
from psbody.mesh import Mesh
from torch_geometric.io import read_ply | 8,630 | # -*- coding: utf-8 -*-
"""
@date: 2023.03.29-31 week13
@func: PhaseI inference code.
"""
class Trainer(object):
def __init__(self, objfile, savedir, resolution=512, focal_distance=2, verts_num=9648, scale_factor=1.0):
self.device = torch.device("cuda")
#set mesh and visualizer----------------------
| # -*- coding: utf-8 -*-
"""
@date: 2023.03.29-31 week13
@func: PhaseI inference code.
"""
class Trainer(object):
def __init__(self, objfile, savedir, resolution=512, focal_distance=2, verts_num=9648, scale_factor=1.0):
self.device = torch.device("cuda")
#set mesh and visualizer---------------------- | self.cloth_renderer = ClothRenderer(objfile, resolution, focal_distance, scale_factor) | 0 | 2023-10-17 11:30:53+00:00 | 12k |
uukuguy/multi_loras | multi_loras/slora/router/model_infer/model_rpc.py | [
{
"identifier": "sample",
"path": "multi_loras/slora/router/model_infer/post_process.py",
"snippet": "def sample(logits, batch:InferBatch):\n logits = logits.contiguous()\n presence_penalties, frequency_penalties, temperatures, top_ps, top_ks, p_token_ids, p_token_counts, p_cumsum_seq_len, p_max_len_in_batch = batch.get_post_sample_tensors()\n \n apply_penalty(logits, presence_penalties, frequency_penalties, p_token_ids, p_token_counts, p_cumsum_seq_len, p_max_len_in_batch) \n logits.div_(temperatures.view((-1, 1)))\n probs = torch.softmax(logits, dim=-1)\n probs_sort, probs_idx = _top_p_top_k(probs, top_ps, top_ks)\n sampled_index = torch.multinomial(probs_sort, num_samples=1, replacement=True)\n \n batch_next_token_ids = torch.gather(probs_idx, dim=1, index=sampled_index)\n batch_next_token_probs = torch.gather(probs_sort, dim=1, index=sampled_index)\n \n return batch_next_token_ids.view(-1), batch_next_token_probs.view(-1)"
},
{
"identifier": "InferBatch",
"path": "multi_loras/slora/router/model_infer/infer_batch.py",
"snippet": "class InferBatch:\n batch_id: int\n requests: List\n requests_idx_mapping: Dict[int, int]\n\n input_ids: torch.Tensor\n\n all_input_ids: List[List[int]]\n input_lengths: List[int]\n \n out_token_id_counts: List\n sampling_param_list : List[InferSamplingParams]\n\n input_ids: torch.Tensor\n\n nopad_total_token_num: int\n nopad_max_len_in_batch: int\n nopad_b_loc: torch.Tensor\n nopad_b_start_loc: torch.Tensor\n nopad_b_seq_len: torch.Tensor\n mem_manager: MemoryManager\n\n adapter_dirs: List[str]\n\n @classmethod\n @torch.no_grad()\n def init_batch(cls, batch_id, requests, dtype: torch.dtype, device: torch.device, mem_manager:MemoryManager, vocab_size: int):\n\n input_lengths = []\n all_input_ids = []\n requests_idx_mapping = {}\n \n out_token_id_counts = []\n sampling_param_list = []\n \n nopad_total_token_num = 0\n nopad_max_len_in_batch = 0\n nopad_b_loc = torch.empty((len(requests), setting['max_req_total_len'] + 12), dtype=torch.long, device='cuda')\n nopad_b_start_loc = torch.zeros(len(requests), dtype=torch.int32, device='cuda')\n\n # here sort the requests by adapter\n # requests.sort(key=lambda x: x[\"adapter_dir\"] if x[\"adapter_dir\"] is not None else \"\")\n\n adapter_dirs = []\n\n for i, r in enumerate(requests):\n # request id -> idx in list mapping\n requests_idx_mapping[r['request_id']] = i\n\n tokenized_input = r['input_id']\n\n input_length = len(tokenized_input)\n input_lengths.append(input_length)\n all_input_ids.append(tokenized_input)\n out_token_id_counts.append(collections.defaultdict(int))\n\n # postprocessor\n sampling_param = r[\"sampling_param\"]\n sampling_param[\"vocab_size\"] = vocab_size\n sampling_param_list.append(InferSamplingParams(**sampling_param))\n \n nopad_total_token_num += input_length\n nopad_max_len_in_batch = max(nopad_max_len_in_batch, input_length)\n\n adapter_dirs.append(r[\"adapter_dir\"])\n \n nopad_b_seq_len = torch.tensor(input_lengths, dtype=torch.int32, device=\"cuda\")\n nopad_b_start_loc[1:] = torch.cumsum(nopad_b_seq_len, dim=0, dtype=torch.int32)[0:-1]\n if len(requests) > 1:\n input_ids = np.concatenate(all_input_ids, dtype=np.int64)\n else:\n input_ids = all_input_ids[0]\n\n # Create tensors on device\n input_ids = torch.tensor(input_ids, dtype=torch.int64, device=device)\n\n return cls(\n batch_id=batch_id,\n requests=requests,\n requests_idx_mapping=requests_idx_mapping,\n input_ids=input_ids,\n input_lengths=input_lengths,\n all_input_ids=all_input_ids,\n nopad_total_token_num=nopad_total_token_num,\n nopad_max_len_in_batch=nopad_max_len_in_batch,\n nopad_b_loc=nopad_b_loc,\n nopad_b_start_loc=nopad_b_start_loc,\n nopad_b_seq_len=nopad_b_seq_len,\n out_token_id_counts=out_token_id_counts,\n sampling_param_list=sampling_param_list,\n mem_manager=mem_manager,\n adapter_dirs=adapter_dirs,\n )\n \n @torch.no_grad()\n def free_self(self):\n remove_index = []\n for idx in range(len(self)):\n remove_index.append(self.nopad_b_loc[idx, (self.nopad_max_len_in_batch - 1) - (self.nopad_b_seq_len[idx] - 1): (self.nopad_max_len_in_batch - 1)])\n remove_index = torch.cat(remove_index, dim=-1)\n self.mem_manager.free(remove_index)\n return\n \n # @calculate_time(show=True, min_cost_ms=0)\n @torch.no_grad()\n def filter(self, request_ids: List[int]):\n if len(request_ids) == 0:\n raise ValueError(\"Batch must have at least one request\")\n if len(request_ids) == len(self):\n return self\n requests_idx_mapping = {}\n indices = []\n requests = []\n all_input_ids = []\n input_lengths = []\n\n nopad_total_token_num = 0\n nopad_max_len_in_batch = 0\n nopad_b_loc = torch.empty((len(request_ids), setting['max_req_total_len'] + 12),\n dtype=torch.long, device='cuda')\n nopad_b_start_loc = torch.zeros(len(request_ids), dtype=torch.int32, device='cuda')\n nopad_b_seq_len = torch.zeros(len(request_ids), dtype=torch.int32, device='cuda')\n\n left_idx = []\n for i, request_id in enumerate(request_ids):\n idx = self.requests_idx_mapping[request_id]\n left_idx.append(idx)\n \n left_idx_set = set(left_idx)\n remove_index = []\n for idx in range(len(self)):\n if idx not in left_idx_set:\n remove_index.append(self.nopad_b_loc[idx, (self.nopad_max_len_in_batch - 1) - (self.nopad_b_seq_len[idx] - 1): (self.nopad_max_len_in_batch - 1)])\n remove_index = torch.cat(remove_index, dim=-1)\n \n # mark_start(\"filter free mem manager\")\n self.mem_manager.free(remove_index)\n # mark_end(\"filter free mem manager\")\n\n # ''' sort according to adapters '''\n # # Create a list of tuples containing request_id and its index\n # request_with_idx = [(self.requests[self.requests_idx_mapping[request_id]], request_id)\n # for request_id in request_ids]\n # # Sort the list based on the 'adapter' field of the request\n # request_with_idx.sort(key=lambda x: x[0][\"adapter_dir\"] if x[0][\"adapter_dir\"] is not None else \"\")\n\n # sorted_request_ids = [item[1] for item in request_with_idx]\n # request_ids = sorted_request_ids \n # ''' end '''\n\n nopad_max_len_in_batch = 0\n for i, request_id in enumerate(request_ids):\n idx = self.requests_idx_mapping[request_id]\n indices.append(idx)\n \n nopad_b_seq_len[:] = self.nopad_b_seq_len[indices]\n nopad_max_len_in_batch = torch.max(nopad_b_seq_len).item()\n nopad_b_start_loc[1:] = torch.cumsum(nopad_b_seq_len, dim=0, dtype=torch.int32)[0:-1]\n nopad_total_token_num = torch.sum(nopad_b_seq_len).item()\n \n nopad_b_loc[:, 0 : (nopad_max_len_in_batch - 1)] = self.nopad_b_loc[indices, (self.nopad_max_len_in_batch - 1) - (nopad_max_len_in_batch - 1): (self.nopad_max_len_in_batch - 1)]\n adapter_dirs = []\n for i, request_id in enumerate(request_ids):\n idx = self.requests_idx_mapping[request_id]\n requests_idx_mapping[request_id] = i\n requests.append(self.requests[idx])\n all_input_ids.append(self.all_input_ids[idx])\n input_lengths.append(self.input_lengths[idx])\n adapter_dirs.append(self.requests[idx][\"adapter_dir\"])\n \n input_ids = self.input_ids[indices]\n\n return InferBatch(\n batch_id=self.batch_id,\n requests=requests,\n requests_idx_mapping=requests_idx_mapping,\n input_ids=input_ids,\n input_lengths=input_lengths,\n all_input_ids=all_input_ids,\n nopad_total_token_num=nopad_total_token_num,\n nopad_max_len_in_batch=nopad_max_len_in_batch,\n nopad_b_loc=nopad_b_loc,\n nopad_b_start_loc=nopad_b_start_loc,\n nopad_b_seq_len=nopad_b_seq_len,\n out_token_id_counts=[self.out_token_id_counts[_i] for _i in indices],\n sampling_param_list=[self.sampling_param_list[_i] for _i in indices],\n mem_manager=self.mem_manager,\n adapter_dirs=adapter_dirs,\n )\n\n\n @classmethod\n @torch.no_grad()\n def merge(cls, batch1, batch2):\n requests = batch1.requests + batch2.requests\n requests_idx_mapping = {}\n new_batch_size = len(batch1) + len(batch2)\n\n input_ids = batch1.input_ids.new_empty(new_batch_size)\n all_input_ids = []\n input_lengths = []\n out_token_id_counts=[]\n sampling_param_list=[]\n\n cumulative_batch_size = 0\n nopad_total_token_num = batch1.nopad_total_token_num + batch2.nopad_total_token_num\n nopad_max_len_in_batch = max(batch1.nopad_max_len_in_batch, batch2 .nopad_max_len_in_batch)\n \n nopad_b_loc = torch.empty((new_batch_size, setting['max_req_total_len'] + 12), dtype=torch.long, device='cuda')\n nopad_b_start_loc = torch.zeros(new_batch_size, dtype=torch.int32, device='cuda')\n nopad_b_seq_len = torch.zeros(new_batch_size, dtype=torch.int32, device='cuda')\n nopad_start_loc_len_temp = 0\n adapter_dirs = []\n batches = [batch1, batch2]\n for i, batch in enumerate(batches):\n if i == 0:\n requests_idx_mapping = batch.requests_idx_mapping\n else:\n for k, v in batch.requests_idx_mapping.items():\n requests_idx_mapping[k] = v + cumulative_batch_size\n start_index = cumulative_batch_size\n end_index = cumulative_batch_size + len(batch)\n input_ids[start_index:end_index] = batch.input_ids\n nopad_b_seq_len[start_index: end_index] = batch.nopad_b_seq_len\n nopad_b_start_loc[start_index: end_index] = batch.nopad_b_start_loc + nopad_start_loc_len_temp\n nopad_start_loc_len_temp = nopad_b_start_loc[end_index - 1] + nopad_b_seq_len[end_index - 1]\n nopad_b_loc[start_index: end_index, nopad_max_len_in_batch - batch.nopad_max_len_in_batch: nopad_max_len_in_batch -\n 1] = batch.nopad_b_loc[:, :batch.nopad_max_len_in_batch - 1]\n adapter_dirs += batch.adapter_dirs\n\n all_input_ids.extend(batch.all_input_ids)\n\n input_lengths.extend(batch.input_lengths)\n out_token_id_counts.extend(batch.out_token_id_counts)\n sampling_param_list.extend(batch.sampling_param_list)\n # Update\n cumulative_batch_size += len(batch)\n \n nopad_b_loc[:, nopad_max_len_in_batch - 1] = nopad_total_token_num - \\\n new_batch_size + torch.arange(0, new_batch_size, dtype=torch.int32, device='cuda')\n return InferBatch(\n batch_id=batches[0].batch_id,\n requests=requests,\n requests_idx_mapping=requests_idx_mapping,\n input_ids=input_ids,\n input_lengths=input_lengths,\n all_input_ids=all_input_ids,\n nopad_total_token_num=nopad_total_token_num,\n nopad_max_len_in_batch=nopad_max_len_in_batch,\n nopad_b_loc=nopad_b_loc,\n nopad_b_start_loc=nopad_b_start_loc,\n nopad_b_seq_len=nopad_b_seq_len,\n out_token_id_counts=out_token_id_counts,\n sampling_param_list=sampling_param_list,\n mem_manager=batches[0].mem_manager,\n adapter_dirs=adapter_dirs,\n )\n\n def __len__(self):\n return len(self.requests)\n \n \n def get_post_sample_tensors(self):\n presence_penalties: List[float] = []\n frequency_penalties: List[float] = []\n temperatures: List[float] = []\n top_ps: List[float] = []\n top_ks: List[int] = []\n p_token_ids: List[int] = []\n p_token_counts: List[int] = []\n p_seq_len: List[int] = [0,]\n p_max_len_in_batch: int = 0\n for i, id_to_count in enumerate(self.out_token_id_counts):\n sample_param = self.sampling_param_list[i]\n presence_penalties.append(sample_param.presence_penalty)\n frequency_penalties.append(sample_param.frequency_penalty)\n temperatures.append(sample_param.temperature)\n top_ps.append(sample_param.top_p)\n top_ks.append(sample_param.top_k)\n \n for token_id, count in id_to_count.items():\n p_token_ids.append(token_id)\n p_token_counts.append(count)\n p_seq_len.append(len(id_to_count))\n p_max_len_in_batch = max(p_max_len_in_batch, len(id_to_count))\n \n presence_penalties = torch.tensor(presence_penalties, dtype=torch.float, device=\"cuda\")\n frequency_penalties = torch.tensor(frequency_penalties, dtype=torch.float, device=\"cuda\")\n temperatures = torch.tensor(temperatures, dtype=torch.float, device=\"cuda\")\n top_ps = torch.tensor(top_ps, dtype=torch.float, device=\"cuda\")\n top_ks = torch.tensor(top_ks, dtype=torch.int32, device=\"cuda\")\n p_token_ids = torch.tensor(p_token_ids, dtype=torch.int32, device=\"cuda\")\n p_token_counts = torch.tensor(p_token_counts, dtype=torch.int32, device=\"cuda\")\n p_seq_len = torch.tensor(p_seq_len, dtype=torch.int32, device=\"cuda\")\n p_cumsum_seq_len = torch.cumsum(p_seq_len, dim=0, dtype=torch.int32)\n return presence_penalties, frequency_penalties, temperatures, top_ps, top_ks, p_token_ids, p_token_counts, p_cumsum_seq_len, p_max_len_in_batch"
},
{
"identifier": "InferAdapter",
"path": "multi_loras/slora/router/model_infer/infer_adapter.py",
"snippet": "class InferAdapter:\n adapter_dirs: List[str] # all adapters on the server\n a_loc: torch.Tensor # a_loc[i] is a list of indices occupied by adapter i\n a_start: torch.Tensor # a_start[i] is the start location of adapter i\n a_len: torch.Tensor # a_len[i] is the number of cells occupied by adapter i\n a_scaling: torch.Tensor # a_scaling[i] is the scaling factor of adapter i\n mem_manager: MemoryAllocator\n\n idx_map: Dict[str, int]\n prefetch_tag: Dict[str, int]\n cur_tag: int\n\n prefetch_stream: Any\n\n @classmethod\n def init(cls, mem_manager, prefetch_stream):\n return cls(\n adapter_dirs=[],\n a_loc=torch.empty(0, dtype=torch.long, device=\"cuda\"),\n a_start=torch.empty(0, dtype=torch.long, device=\"cuda\"),\n a_len=torch.empty(0, dtype=torch.long, device=\"cuda\"),\n a_scaling=torch.empty(0, dtype=torch.float16, device=\"cuda\"),\n mem_manager=mem_manager,\n idx_map={},\n prefetch_tag={},\n cur_tag=0,\n prefetch_stream=prefetch_stream,\n )\n\n\n # @calculate_time(show=True, min_cost_ms=0)\n def load_lora_A(self, adapter, loc, prefetch=False):\n r = adapter.r\n h = adapter.network_config[\"hidden_size\"]\n head_num = adapter.network_config[\"num_attention_heads\"]\n head_dim = h // head_num\n\n for i in range(adapter.network_config[\"num_hidden_layers\"]):\n adapter.layers[i].load_to_gpu(prefetch=prefetch)\n #self.mem_manager.key_buffer[i][loc[:r]] = adapter.layers[i].q_lora_A.transpose(0, 1).reshape(r, head_num, head_dim)\n #self.mem_manager.key_buffer[i][loc[r:r * 2]] = adapter.layers[i].k_lora_A.transpose(0, 1).reshape(r, head_num, head_dim)\n #self.mem_manager.key_buffer[i][loc[r * 2:r * 3]] = adapter.layers[i].v_lora_A.transpose(0, 1).reshape(r, head_num, head_dim)\n #self.mem_manager.key_buffer[i][loc[r * 3:r * 4]] = adapter.layers[i].o_lora_A.transpose(0, 1).reshape(r, head_num, head_dim)\n\n w_combined = adapter.layers[i].w_combined\n self.mem_manager.key_buffer[i][loc] = w_combined[0]\n\n #self.mem_manager.key_buffer[i][loc[:r]] = w_combined[0].T.reshape(r, head_num, head_dim)\n #self.mem_manager.key_buffer[i][loc[r:r * 2]] = w_combined[1].T.reshape(r, head_num, head_dim)\n #self.mem_manager.key_buffer[i][loc[r * 2:r * 3]] = w_combined[2].T.reshape(r, head_num, head_dim)\n #self.mem_manager.key_buffer[i][loc[r * 3:r * 4]] = w_combined[3].T.reshape(r, head_num, head_dim)\n\n adapter.layers[i].offload_from_gpu()\n\n\n # @calculate_time(show=True, min_cost_ms=0)\n def load_lora_B(self, adapter, loc, prefetch=False):\n r = adapter.r\n h = adapter.network_config[\"hidden_size\"]\n head_num = adapter.network_config[\"num_attention_heads\"]\n head_dim = h // head_num\n for i in range(adapter.network_config[\"num_hidden_layers\"]):\n adapter.layers[i].load_to_gpu(prefetch=prefetch)\n # this copy on gpu takes very few time, ~3ms for the following lines of copy\n #self.mem_manager.value_buffer[i][loc[:r]] = adapter.layers[i].q_lora_B.transpose(0, 1).reshape(r, head_num, head_dim)\n #self.mem_manager.value_buffer[i][loc[r:r * 2]] = adapter.layers[i].k_lora_B.transpose(0, 1).reshape(r, head_num, head_dim)\n #self.mem_manager.value_buffer[i][loc[r * 2:r * 3]] = adapter.layers[i].v_lora_B.transpose(0, 1).reshape(r, head_num, head_dim)\n #self.mem_manager.value_buffer[i][loc[r * 3:r * 4]] = adapter.layers[i].o_lora_B.transpose(0, 1).reshape(r, head_num, head_dim)\n\n w_combined = adapter.layers[i].w_combined\n self.mem_manager.value_buffer[i][loc] = w_combined[1]\n\n #self.mem_manager.value_buffer[i][loc[:r]] = w_combined[4].reshape(r, head_num, head_dim)\n #self.mem_manager.value_buffer[i][loc[r:r * 2]] = w_combined[5].reshape(r, head_num, head_dim)\n #self.mem_manager.value_buffer[i][loc[r * 2:r * 3]] = w_combined[6].reshape(r, head_num, head_dim)\n #self.mem_manager.value_buffer[i][loc[r * 3:r * 4]] = w_combined[7].reshape(r, head_num, head_dim)\n\n adapter.layers[i].offload_from_gpu()\n\n # @calculate_time(show=True, min_cost_ms=0)\n def load_adapters(self, adapters, prefetch=False):\n # func_name = \"realload\" if not prefetch else \"prefetch\"\n # mark_start(func_name)\n if len(adapters) == 0:\n print(f\"load 0 adapters, {len(self.adapter_dirs)} in total\")\n return\n\n if prefetch:\n self.cur_tag ^= 1\n capacity = self.mem_manager.can_use_mem_size\n new_adapters = []\n tot_size = 0\n # mark_start(\"load scan\")\n for adapter in adapters:\n self.prefetch_tag[adapter.lora_dir] = self.cur_tag\n if adapter is not None and adapter.lora_dir not in self.idx_map:\n if tot_size + adapter.r * 4 > capacity:\n break\n new_adapters.append(adapter)\n tot_size += adapter.r * 4\n # mark_end(\"load scan\")\n print(f\"prefetch {len(new_adapters)} adapters, \"\n f\"{len(self.adapter_dirs) + len(new_adapters)} in total\")\n else:\n new_adapters = []\n tot_size = 0\n # mark_start(\"load scan\")\n for adapter in adapters:\n if adapter is not None and adapter.lora_dir not in self.idx_map:\n new_adapters.append(adapter)\n tot_size += adapter.r * 4\n # mark_end(\"load scan\")\n print(f\"load {len(new_adapters)} adapters, {len(self.adapter_dirs) + len(new_adapters)} in total\")\n\n new_loc = self.mem_manager.alloc(tot_size)\n # assert len(new_loc) == tot_size\n start_offset = self.a_start.shape[0]\n self.a_start = torch.cat((self.a_start, torch.empty(len(new_adapters,), dtype=torch.long, device=\"cuda\")))\n len_offset = self.a_len.shape[0]\n self.a_len = torch.cat((self.a_len, torch.empty(len(new_adapters,), dtype=torch.long, device=\"cuda\")))\n loc_offset = self.a_loc.shape[0]\n self.a_loc = torch.cat((self.a_loc, torch.empty(tot_size, dtype=torch.long, device=\"cuda\")))\n\n cum_loc = 0\n cum_loc_list = []\n for i, new_adapter in enumerate(new_adapters):\n cum_loc_list.append(cum_loc)\n self.idx_map[new_adapter.lora_dir] = len(self.adapter_dirs)\n self.adapter_dirs.append(new_adapter.lora_dir)\n self.a_start[start_offset + i] = loc_offset + cum_loc\n self.a_len[len_offset + i] = new_adapter.r * 4\n self.a_loc[loc_offset + cum_loc: loc_offset + cum_loc + new_adapter.r * 4] = (\n new_loc[cum_loc: cum_loc + new_adapter.r * 4])\n cum_loc += new_adapter.r * 4\n self.a_scaling = torch.cat((self.a_scaling, torch.tensor([adapter.scaling for adapter in new_adapters], dtype=torch.float16, device=\"cuda\")))\n\n #if prefetch:\n # torch.cuda.synchronize()\n # tic1 = time.time()\n\n if prefetch:\n with torch.cuda.stream(self.prefetch_stream):\n new_loc = new_loc.clone()\n for i, new_adapter in enumerate(new_adapters):\n #self.idx_map[new_adapter.lora_dir] = len(self.adapter_dirs)\n #self.adapter_dirs.append(new_adapter.lora_dir)\n #self.a_start[start_offset + i] = loc_offset + cum_loc\n #self.a_len[len_offset + i] = new_adapter.r * 4\n\n cum_loc = cum_loc_list[i]\n self.load_lora_A(new_adapter, new_loc[cum_loc: cum_loc + new_adapter.r * 4], prefetch)\n self.load_lora_B(new_adapter, new_loc[cum_loc: cum_loc + new_adapter.r * 4], prefetch)\n\n #self.load_lora_A(new_adapter, None, prefetch)\n #self.load_lora_B(new_adapter, None, prefetch)\n else:\n for i, new_adapter in enumerate(new_adapters):\n cum_loc = cum_loc_list[i]\n self.load_lora_A(new_adapter, new_loc[cum_loc: cum_loc + new_adapter.r * 4], prefetch)\n self.load_lora_B(new_adapter, new_loc[cum_loc: cum_loc + new_adapter.r * 4], prefetch)\n\n #if prefetch:\n # tic2 = time.time()\n # torch.cuda.synchronize()\n # tic3 = time.time()\n # print(\"launch time\", tic2 - tic1, flush=True)\n # print(\"total time\", tic3 - tic1, flush=True)\n # mark_end(func_name)\n # print(f\"current adapters on batch (loaded {len(new_adapters)})\",\n # len(self.adapter_dirs), self.adapter_dirs)\n # print(self.mem_manager.can_use_mem_size_suffix // 4 / 32)\n \n\n # @calculate_time(show=True, min_cost_ms=0)\n def offload_adapters(self, reserve_adapter_dirs):\n if len(reserve_adapter_dirs) == len(self.adapter_dirs):\n print(f\"offload 0 adapters, {len(self.adapter_dirs)} remains\")\n return\n if len(reserve_adapter_dirs) == 0:\n print(f\"offload {len(self.adapter_dirs)} adapters, 0 remains\")\n self.mem_manager.free(self.a_loc)\n self.adapter_dirs=[]\n self.a_loc=torch.empty(0, dtype=torch.long, device=\"cuda\")\n self.a_start=torch.empty(0, dtype=torch.long, device=\"cuda\")\n self.a_len=torch.empty(0, dtype=torch.long, device=\"cuda\")\n self.a_scaling=torch.empty(0, dtype=torch.float16, device=\"cuda\")\n self.idx_map={}\n return\n\n # mark_start(\"offload scan\")\n remove_ind = []\n left_ind = []\n new_adapter_dirs = []\n self.idx_map = {}\n for i, adapter_dir in enumerate(self.adapter_dirs):\n if (adapter_dir not in reserve_adapter_dirs and\n (adapter_dir not in self.prefetch_tag or\n self.prefetch_tag[adapter_dir] != self.cur_tag)):\n remove_ind.append(self.a_loc[self.a_start[i]:self.a_start[i] + self.a_len[i]])\n else:\n left_ind.append(i)\n self.idx_map[adapter_dir] = len(new_adapter_dirs)\n new_adapter_dirs.append(adapter_dir)\n if len(remove_ind) == 0:\n return\n # mark_end(\"offload scan\")\n self.adapter_dirs = new_adapter_dirs\n tot_size = torch.sum(self.a_len[left_ind]).item()\n print(f\"offload {len(remove_ind)} adapters, {len(left_ind)} remains\")\n\n # mark_start(\"offload cat\")\n remove_ind = torch.cat(remove_ind)\n # mark_end(\"offload cat\")\n # release memory\n # mark_start(\"offload free mem manager\")\n self.mem_manager.free(remove_ind)\n # mark_end(\"offload free mem manager\")\n \n # reset indexing\n # mark_start(\"offload torch.empty\")\n new_a_len = torch.empty(len(left_ind), dtype=torch.long, device=\"cuda\")\n new_a_start = torch.empty(len(left_ind), dtype=torch.long, device=\"cuda\")\n new_a_scaling = torch.empty(len(left_ind), dtype=torch.float16, device=\"cuda\")\n new_a_loc = torch.empty(tot_size, dtype=torch.long, device=\"cuda\")\n # mark_end(\"offload torch.empty\")\n\n new_a_len[:] = self.a_len[left_ind]\n new_a_start[0] = 0\n new_a_start[1:] = torch.cumsum(new_a_len, dim=0)[:-1]\n new_a_scaling[:] = self.a_scaling[left_ind]\n # mark_start(\"offload a_loc update\")\n launch_var_len_copy_triton(self.a_start[left_ind], new_a_len,\n self.a_loc, new_a_start, new_a_loc)\n # mark_end(\"offload a_loc update\")\n\n self.a_start = new_a_start\n self.a_len = new_a_len\n self.a_loc = new_a_loc\n self.a_scaling = new_a_scaling\n\n # print(f\"current adapters on batch (offloaded {len(remove_ind)})\",\n # len(self.adapter_dirs), self.adapter_dirs)\n # print(self.mem_manager.can_use_mem_size_suffix // 4 / 32)"
},
{
"identifier": "NaiveInferAdapter",
"path": "multi_loras/slora/router/model_infer/naive_infer_adapter.py",
"snippet": "class NaiveInferAdapter:\n adapter_dirs: List[str] # all adapters on the server\n a_loc: torch.Tensor # a_loc[i] is a list of indices occupied by adapter i\n a_start: torch.Tensor # a_start[i] is the start location of adapter i\n a_len: torch.Tensor # a_len[i] is the number of cells occupied by adapter i\n a_scaling: torch.Tensor # a_scaling[i] is the scaling factor of adapter i\n idx_map: Dict[str, int]\n key_buffer: torch.Tensor\n value_buffer: torch.Tensor\n layer_num: int\n head_num: int\n head_dim: int\n\n @classmethod\n def init(cls, _layer_num, _head_num, _head_dim):\n return cls(\n adapter_dirs=[],\n a_loc=torch.empty(0, dtype=torch.long, device=\"cuda\"),\n a_start=torch.empty(0, dtype=torch.long, device=\"cuda\"),\n a_len=torch.empty(0, dtype=torch.long, device=\"cuda\"),\n a_scaling=torch.empty(0, dtype=torch.float16, device=\"cuda\"),\n idx_map={},\n key_buffer=[torch.empty(0, dtype=torch.float16, device=\"cuda\")\n for _ in range(_layer_num)],\n value_buffer=[torch.empty(0, dtype=torch.float16, device=\"cuda\")\n for _ in range(_layer_num)],\n layer_num=_layer_num,\n head_num=_head_num,\n head_dim=_head_dim,\n )\n\n\n # @calculate_time(show=True, min_cost_ms=0)\n def load_lora_A(self, adapter, start, end):\n r = adapter.r\n h = adapter.network_config[\"hidden_size\"]\n for i in range(adapter.network_config[\"num_hidden_layers\"]):\n adapter.layers[i].load_to_gpu()\n w_combined = adapter.layers[i].w_combined\n self.key_buffer[i][start:end] = w_combined[0]\n adapter.layers[i].offload_from_gpu()\n\n\n # @calculate_time(show=True, min_cost_ms=0)\n def load_lora_B(self, adapter, start, end):\n r = adapter.r\n h = adapter.network_config[\"hidden_size\"]\n for i in range(adapter.network_config[\"num_hidden_layers\"]):\n adapter.layers[i].load_to_gpu()\n w_combined = adapter.layers[i].w_combined\n self.value_buffer[i][start:end] = w_combined[1]\n adapter.layers[i].offload_from_gpu()\n\n\n # @calculate_time(show=True, min_cost_ms=0)\n def load_adapters(self, adapters, prefetch=False):\n assert prefetch is False\n if len(adapters) == 0:\n print(f\"load 0 adapters, {len(self.adapter_dirs)} in total\")\n return\n\n new_adapters = []\n rank_sum = 0\n for adapter in adapters:\n if adapter is not None and adapter.lora_dir not in self.idx_map:\n new_adapters.append(adapter)\n rank_sum += adapter.r * 4\n print(f\"load {len(new_adapters)} adapters, {len(self.adapter_dirs) + len(new_adapters)} in total\")\n\n if len(new_adapters) == 0:\n print(f\"load 0 adapters, {len(self.adapter_dirs)} in total\")\n return\n\n new_key_buffer = [torch.empty((rank_sum, self.head_num, self.head_dim), dtype=torch.float16, device=\"cuda\")\n for _ in range(self.layer_num)]\n new_value_buffer = [torch.empty((rank_sum, self.head_num, self.head_dim), dtype=torch.float16, device=\"cuda\")\n for _ in range(self.layer_num)]\n self.key_buffer = [torch.cat((self.key_buffer[i], new_key_buffer[i]))\n for i in range(self.layer_num)]\n self.value_buffer = [torch.cat((self.value_buffer[i], new_value_buffer[i]))\n for i in range(self.layer_num)]\n\n start_offset = self.a_start.shape[0]\n self.a_start = torch.cat((self.a_start, torch.empty(len(new_adapters,), dtype=torch.long, device=\"cuda\")))\n len_offset = self.a_len.shape[0]\n self.a_len = torch.cat((self.a_len, torch.empty(len(new_adapters,), dtype=torch.long, device=\"cuda\")))\n loc_offset = self.a_loc.shape[0]\n self.a_loc = torch.arange(0, self.a_loc.shape[0] + rank_sum, dtype=torch.long, device=\"cuda\")\n\n cum_loc = loc_offset\n cum_loc_list = []\n for i, new_adapter in enumerate(new_adapters):\n cum_loc_list.append(cum_loc)\n self.idx_map[new_adapter.lora_dir] = len(self.adapter_dirs)\n self.adapter_dirs.append(new_adapter.lora_dir)\n self.a_start[start_offset + i] = cum_loc\n self.a_len[len_offset + i] = new_adapter.r * 4\n cum_loc += new_adapter.r * 4\n self.a_scaling = torch.cat((self.a_scaling, torch.tensor([adapter.scaling for adapter in new_adapters], dtype=torch.float16, device=\"cuda\")))\n\n for i, new_adapter in enumerate(new_adapters):\n cum_loc = cum_loc_list[i]\n self.load_lora_A(new_adapter, cum_loc, cum_loc + new_adapter.r * 4)\n self.load_lora_B(new_adapter, cum_loc, cum_loc + new_adapter.r * 4)\n \n\n # @calculate_time(show=True, min_cost_ms=0)\n def offload_adapters(self, reserve_adapter_dirs):\n if len(reserve_adapter_dirs) == len(self.adapter_dirs):\n print(f\"offload 0 adapters, {len(self.adapter_dirs)} remains\")\n return\n if len(reserve_adapter_dirs) == 0:\n print(f\"offload {len(self.adapter_dirs)} adapters, 0 remains\")\n self.key_buffer=[torch.empty(0, dtype=torch.float16, device=\"cuda\")\n for _ in range(self.layer_num)]\n self.value_buffer=[torch.empty(0, dtype=torch.float16, device=\"cuda\")\n for _ in range(self.layer_num)]\n self.adapter_dirs=[]\n self.a_loc=torch.empty(0, dtype=torch.long, device=\"cuda\")\n self.a_start=torch.empty(0, dtype=torch.long, device=\"cuda\")\n self.a_len=torch.empty(0, dtype=torch.long, device=\"cuda\")\n self.a_scaling=torch.empty(0, dtype=torch.float16, device=\"cuda\")\n self.idx_map={}\n return\n\n left_ind = []\n self.idx_map = {}\n new_adapter_dirs = []\n for i, adapter_dir in enumerate(self.adapter_dirs):\n if adapter_dir in reserve_adapter_dirs:\n left_ind.append(i)\n self.idx_map[adapter_dir] = len(new_adapter_dirs)\n new_adapter_dirs.append(adapter_dir)\n if len(new_adapter_dirs) == len(self.adapter_dirs):\n return\n print(f\"offload {len(self.adapter_dirs) - len(left_ind)} adapters, \"\n f\"{len(left_ind)} remains\")\n # left_ind = torch.tensor(left_ind, dtype=torch.int32, device=\"cuda\")\n left_ind = torch.tensor(left_ind, dtype=torch.long, device=\"cuda\")\n self.adapter_dirs = new_adapter_dirs\n rank_sum = torch.sum(self.a_len[left_ind]).item()\n \n new_a_len = torch.empty(len(left_ind), dtype=torch.long, device=\"cuda\")\n new_a_start = torch.empty(len(left_ind), dtype=torch.long, device=\"cuda\")\n new_a_scaling = torch.empty(len(left_ind), dtype=torch.float16, device=\"cuda\")\n\n new_a_len[:] = self.a_len[left_ind]\n new_a_start[0] = 0\n new_a_start[1:] = torch.cumsum(new_a_len, dim=0)[:-1]\n new_a_scaling[:] = self.a_scaling[left_ind]\n\n # update self.key_buffer self.value_buffer\n new_key_buffer = [torch.empty((rank_sum, self.head_num, self.head_dim), dtype=torch.float16, device=\"cuda\")\n for _ in range(self.layer_num)]\n new_value_buffer = [torch.empty((rank_sum, self.head_num, self.head_dim), dtype=torch.float16, device=\"cuda\")\n for _ in range(self.layer_num)]\n copy_ind = torch.empty(rank_sum, dtype=torch.long, device=\"cuda\")\n launch_var_len_copy_triton(self.a_start[left_ind], new_a_len,\n self.a_loc, new_a_start, copy_ind)\n new_key_buffer = [self.key_buffer[i][copy_ind] for i in range(self.layer_num)]\n new_value_buffer = [self.value_buffer[i][copy_ind] for i in range(self.layer_num)]\n self.key_buffer = new_key_buffer\n self.value_buffer = new_value_buffer\n\n self.a_len = new_a_len\n self.a_start = new_a_start\n self.a_loc = torch.arange(0, rank_sum, dtype=torch.long, device=\"cuda\")\n self.a_scaling = new_a_scaling"
}
] | import asyncio
import numpy as np
import rpyc
import torch
import traceback
import time
import torch
import torch.distributed as dist
import torch
import torch.distributed as dist
import multiprocessing
from collections import defaultdict
from datetime import timedelta
from tqdm import tqdm
from typing import Dict, List, Tuple
from rpyc.utils.classic import obtain
from transformers.configuration_utils import PretrainedConfig
from .post_process import sample
from .infer_batch import InferBatch
from .infer_adapter import InferAdapter
from .naive_infer_adapter import NaiveInferAdapter
from slora.common.configs.config import setting
from slora.models.llama.model import LlamaTpPartModel
from slora.models.llama2.model import Llama2TpPartModel
from slora.models.peft.lora_adapter import LoraTpPartAdapter
from slora.models.peft.lora_unordered_batch_infer import LoraUnorderedBatchInfer
from slora.models.peft.lora_single_batch_infer import LoraPEFTBatchInfer
from slora.models.bmm.lora_bmm_infer import LoraBmmInfer
from slora.utils.infer_utils import set_random_seed
from slora.utils.infer_utils import calculate_time, mark_start, mark_end
from slora.utils.model_utils import get_model_config
from rpyc.utils.server import ThreadedServer | 9,874 |
class ModelRpcServer(rpyc.Service):
def exposed_init_model(self, rank_id, world_size, weight_dir, adapter_dirs,
max_total_token_num, load_way, mode, input_params,
prefetch_stream):
if world_size != 1:
trans_list = [obtain(e) for e in (rank_id, world_size, weight_dir, adapter_dirs,
max_total_token_num, load_way, mode)]
rank_id, world_size, weight_dir, adapter_dirs, max_total_token_num, load_way, mode = trans_list
self.tp_rank = rank_id
self.world_size = world_size
self.load_way = load_way
self.mode = mode
self.input_params = input_params
self.prefetch_stream = prefetch_stream
self.cache = {}
dist.init_process_group('nccl', init_method=f'tcp://127.0.0.1:{setting["nccl_port"]}', rank=rank_id, world_size=world_size)
torch.cuda.set_device(rank_id)
model_cfg = get_model_config(weight_dir, dummy=input_params.dummy)
try:
self.model_type = model_cfg["model_type"]
if self.model_type == "llama":
if "num_key_value_heads" in model_cfg.keys():
self.model = Llama2TpPartModel(rank_id, world_size, weight_dir,
max_total_token_num,
mem_adapter_size=input_params.pool_size_lora,
load_way=load_way, mode=mode,
dummy=input_params.dummy)
else:
self.model = LlamaTpPartModel(rank_id, world_size, weight_dir,
max_total_token_num,
mem_adapter_size=input_params.pool_size_lora,
load_way=load_way, mode=mode,
dummy=input_params.dummy)
else:
raise Exception(f"can not support {self.model_type} now")
except Exception as e:
print("#" * 16)
print("load model error:", str(e), e, type(e))
raise e
''' init adapters '''
# TODO support TP for adapters
# print("adapter_dirs", adapter_dirs)
self.adapters = []
self.adapter_id = {}
for adapter_dir in tqdm(adapter_dirs, desc="load adapters"):
self.adapter_id[adapter_dir] = len(self.adapters)
self.adapters.append(LoraTpPartAdapter(rank_id, world_size, adapter_dir, model_cfg,
swap=input_params.swap, dummy=input_params.dummy,
no_lora_swap=input_params.no_lora_swap,
prefetch_stream=prefetch_stream))
self.adapter_id[None] = len(self.adapters)
self.adapters.append(None)
if input_params.no_mem_pool:
head_num = self.model.config["num_attention_heads"]
self.infer_adapter = NaiveInferAdapter.init(self.model.config["num_hidden_layers"],
head_num,
self.model.config["hidden_size"] // head_num)
else:
|
class ModelRpcServer(rpyc.Service):
def exposed_init_model(self, rank_id, world_size, weight_dir, adapter_dirs,
max_total_token_num, load_way, mode, input_params,
prefetch_stream):
if world_size != 1:
trans_list = [obtain(e) for e in (rank_id, world_size, weight_dir, adapter_dirs,
max_total_token_num, load_way, mode)]
rank_id, world_size, weight_dir, adapter_dirs, max_total_token_num, load_way, mode = trans_list
self.tp_rank = rank_id
self.world_size = world_size
self.load_way = load_way
self.mode = mode
self.input_params = input_params
self.prefetch_stream = prefetch_stream
self.cache = {}
dist.init_process_group('nccl', init_method=f'tcp://127.0.0.1:{setting["nccl_port"]}', rank=rank_id, world_size=world_size)
torch.cuda.set_device(rank_id)
model_cfg = get_model_config(weight_dir, dummy=input_params.dummy)
try:
self.model_type = model_cfg["model_type"]
if self.model_type == "llama":
if "num_key_value_heads" in model_cfg.keys():
self.model = Llama2TpPartModel(rank_id, world_size, weight_dir,
max_total_token_num,
mem_adapter_size=input_params.pool_size_lora,
load_way=load_way, mode=mode,
dummy=input_params.dummy)
else:
self.model = LlamaTpPartModel(rank_id, world_size, weight_dir,
max_total_token_num,
mem_adapter_size=input_params.pool_size_lora,
load_way=load_way, mode=mode,
dummy=input_params.dummy)
else:
raise Exception(f"can not support {self.model_type} now")
except Exception as e:
print("#" * 16)
print("load model error:", str(e), e, type(e))
raise e
''' init adapters '''
# TODO support TP for adapters
# print("adapter_dirs", adapter_dirs)
self.adapters = []
self.adapter_id = {}
for adapter_dir in tqdm(adapter_dirs, desc="load adapters"):
self.adapter_id[adapter_dir] = len(self.adapters)
self.adapters.append(LoraTpPartAdapter(rank_id, world_size, adapter_dir, model_cfg,
swap=input_params.swap, dummy=input_params.dummy,
no_lora_swap=input_params.no_lora_swap,
prefetch_stream=prefetch_stream))
self.adapter_id[None] = len(self.adapters)
self.adapters.append(None)
if input_params.no_mem_pool:
head_num = self.model.config["num_attention_heads"]
self.infer_adapter = NaiveInferAdapter.init(self.model.config["num_hidden_layers"],
head_num,
self.model.config["hidden_size"] // head_num)
else: | self.infer_adapter = InferAdapter.init(self.model.mem_manager, | 2 | 2023-10-16 02:39:47+00:00 | 12k |
MobileLLM/AutoDroid | droidbot/input_policy.py | [
{
"identifier": "UTG",
"path": "droidbot/utg.py",
"snippet": "class UTG(object):\n \"\"\"\n UI transition graph\n \"\"\"\n\n def __init__(self, device, app, random_input):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.device = device\n self.app = app\n self.random_input = random_input\n\n self.G = nx.DiGraph()\n self.G2 = nx.DiGraph() # graph with same-structure states clustered\n\n self.transitions = []\n self.effective_event_strs = set()\n self.ineffective_event_strs = set()\n self.explored_state_strs = set()\n self.reached_state_strs = set()\n self.reached_activities = set()\n\n self.first_state = None\n self.last_state = None\n\n self.start_time = datetime.datetime.now()\n\n @property\n def first_state_str(self):\n return self.first_state.state_str if self.first_state else None\n\n @property\n def last_state_str(self):\n return self.last_state.state_str if self.last_state else None\n\n @property\n def effective_event_count(self):\n return len(self.effective_event_strs)\n\n @property\n def num_transitions(self):\n return len(self.transitions)\n\n def add_transition(self, event, old_state, new_state):\n self.add_node(old_state)\n self.add_node(new_state)\n\n # make sure the states are not None\n if not old_state or not new_state:\n return\n\n event_str = event.get_event_str(old_state)\n self.transitions.append((old_state, event, new_state))\n\n if old_state.state_str == new_state.state_str:\n self.ineffective_event_strs.add(event_str)\n # delete the transitions including the event from utg\n for new_state_str in self.G[old_state.state_str]:\n if event_str in self.G[old_state.state_str][new_state_str][\"events\"]:\n self.G[old_state.state_str][new_state_str][\"events\"].pop(event_str)\n if event_str in self.effective_event_strs:\n self.effective_event_strs.remove(event_str)\n return\n\n self.effective_event_strs.add(event_str)\n\n if (old_state.state_str, new_state.state_str) not in self.G.edges():\n self.G.add_edge(old_state.state_str, new_state.state_str, events={})\n self.G[old_state.state_str][new_state.state_str][\"events\"][event_str] = {\n \"event\": event,\n \"id\": self.effective_event_count\n }\n\n if (old_state.structure_str, new_state.structure_str) not in self.G2.edges():\n self.G2.add_edge(old_state.structure_str, new_state.structure_str, events={})\n self.G2[old_state.structure_str][new_state.structure_str][\"events\"][event_str] = {\n \"event\": event,\n \"id\": self.effective_event_count\n }\n\n self.last_state = new_state\n self.__output_utg()\n\n def remove_transition(self, event, old_state, new_state):\n event_str = event.get_event_str(old_state)\n if (old_state.state_str, new_state.state_str) in self.G.edges():\n events = self.G[old_state.state_str][new_state.state_str][\"events\"]\n if event_str in events.keys():\n events.pop(event_str)\n if len(events) == 0:\n self.G.remove_edge(old_state.state_str, new_state.state_str)\n if (old_state.structure_str, new_state.structure_str) in self.G2.edges():\n events = self.G2[old_state.structure_str][new_state.structure_str][\"events\"]\n if event_str in events.keys():\n events.pop(event_str)\n if len(events) == 0:\n self.G2.remove_edge(old_state.structure_str, new_state.structure_str)\n\n def add_node(self, state):\n if not state:\n return\n if state.state_str not in self.G.nodes():\n state.save2dir()\n self.G.add_node(state.state_str, state=state)\n if self.first_state is None:\n self.first_state = state\n\n if state.structure_str not in self.G2.nodes():\n self.G2.add_node(state.structure_str, states=[])\n self.G2.nodes[state.structure_str]['states'].append(state)\n\n if state.foreground_activity.startswith(self.app.package_name):\n self.reached_activities.add(state.foreground_activity)\n\n def __output_utg(self):\n \"\"\"\n Output current UTG to a js file\n \"\"\"\n if not self.device.output_dir:\n return\n\n def list_to_html_table(dict_data):\n table = \"<table class=\\\"table\\\">\\n\"\n for (key, value) in dict_data:\n table += \"<tr><th>%s</th><td>%s</td></tr>\\n\" % (key, value)\n table += \"</table>\"\n return table\n\n utg_file_path = os.path.join(self.device.output_dir, \"utg.js\")\n utg_file = open(utg_file_path, \"w\")\n utg_nodes = []\n utg_edges = []\n for state_str in self.G.nodes():\n state = self.G.nodes[state_str][\"state\"]\n package_name = state.foreground_activity.split(\"/\")[0]\n activity_name = state.foreground_activity.split(\"/\")[1]\n short_activity_name = activity_name.split(\".\")[-1]\n\n state_desc = list_to_html_table([\n (\"package\", package_name),\n (\"activity\", activity_name),\n (\"state_str\", state.state_str),\n (\"structure_str\", state.structure_str)\n ])\n\n utg_node = {\n \"id\": state_str,\n \"shape\": \"image\",\n \"image\": os.path.relpath(state.screenshot_path, self.device.output_dir),\n \"label\": short_activity_name,\n # \"group\": state.foreground_activity,\n \"package\": package_name,\n \"activity\": activity_name,\n \"state_str\": state_str,\n \"structure_str\": state.structure_str,\n \"title\": state_desc,\n \"content\": \"\\n\".join([package_name, activity_name, state.state_str, state.search_content])\n }\n\n if state.state_str == self.first_state_str:\n utg_node[\"label\"] += \"\\n<FIRST>\"\n utg_node[\"font\"] = \"14px Arial red\"\n if state.state_str == self.last_state_str:\n utg_node[\"label\"] += \"\\n<LAST>\"\n utg_node[\"font\"] = \"14px Arial red\"\n\n utg_nodes.append(utg_node)\n\n for state_transition in self.G.edges():\n from_state = state_transition[0]\n to_state = state_transition[1]\n\n events = self.G[from_state][to_state][\"events\"]\n event_short_descs = []\n event_list = []\n\n for event_str, event_info in sorted(iter(events.items()), key=lambda x: x[1][\"id\"]):\n event_short_descs.append((event_info[\"id\"], event_str))\n if self.device.adapters[self.device.minicap]:\n view_images = [\"views/view_\" + view[\"view_str\"] + \".jpg\"\n for view in event_info[\"event\"].get_views()]\n else:\n view_images = [\"views/view_\" + view[\"view_str\"] + \".png\"\n for view in event_info[\"event\"].get_views()]\n event_list.append({\n \"event_str\": event_str,\n \"event_id\": event_info[\"id\"],\n \"event_type\": event_info[\"event\"].event_type,\n \"view_images\": view_images\n })\n\n utg_edge = {\n \"from\": from_state,\n \"to\": to_state,\n \"id\": from_state + \"-->\" + to_state,\n \"title\": list_to_html_table(event_short_descs),\n \"label\": \", \".join([str(x[\"event_id\"]) for x in event_list]),\n \"events\": event_list\n }\n\n # # Highlight last transition\n # if state_transition == self.last_transition:\n # utg_edge[\"color\"] = \"red\"\n\n utg_edges.append(utg_edge)\n\n utg = {\n \"nodes\": utg_nodes,\n \"edges\": utg_edges,\n\n \"num_nodes\": len(utg_nodes),\n \"num_edges\": len(utg_edges),\n \"num_effective_events\": len(self.effective_event_strs),\n \"num_reached_activities\": len(self.reached_activities),\n \"test_date\": self.start_time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"time_spent\": (datetime.datetime.now() - self.start_time).total_seconds(),\n \"num_transitions\": self.num_transitions,\n\n \"device_serial\": self.device.serial,\n \"device_model_number\": self.device.get_model_number(),\n \"device_sdk_version\": self.device.get_sdk_version(),\n\n \"app_sha256\": self.app.hashes[2],\n \"app_package\": self.app.package_name,\n \"app_main_activity\": self.app.main_activity,\n \"app_num_total_activities\": len(self.app.activities),\n }\n\n utg_json = json.dumps(utg, indent=2)\n utg_file.write(\"var utg = \\n\")\n utg_file.write(utg_json)\n utg_file.close()\n\n def is_event_explored(self, event, state):\n event_str = event.get_event_str(state)\n return event_str in self.effective_event_strs or event_str in self.ineffective_event_strs\n\n def is_state_explored(self, state):\n if state.state_str in self.explored_state_strs:\n return True\n for possible_event in state.get_possible_input():\n if not self.is_event_explored(possible_event, state):\n return False\n self.explored_state_strs.add(state.state_str)\n return True\n\n def is_state_reached(self, state):\n if state.state_str in self.reached_state_strs:\n return True\n self.reached_state_strs.add(state.state_str)\n return False\n\n def get_reachable_states(self, current_state):\n reachable_states = []\n for target_state_str in nx.descendants(self.G, current_state.state_str):\n target_state = self.G.nodes[target_state_str][\"state\"]\n reachable_states.append(target_state)\n return reachable_states\n\n def get_navigation_steps(self, from_state, to_state):\n if from_state is None or to_state is None:\n return None\n try:\n steps = []\n from_state_str = from_state.state_str\n to_state_str = to_state.state_str\n state_strs = nx.shortest_path(G=self.G, source=from_state_str, target=to_state_str)\n if not isinstance(state_strs, list) or len(state_strs) < 2:\n self.logger.warning(f\"Error getting path from {from_state_str} to {to_state_str}\")\n start_state_str = state_strs[0]\n for state_str in state_strs[1:]:\n edge = self.G[start_state_str][state_str]\n edge_event_strs = list(edge[\"events\"].keys())\n if self.random_input:\n random.shuffle(edge_event_strs)\n start_state = self.G.nodes[start_state_str]['state']\n event = edge[\"events\"][edge_event_strs[0]][\"event\"]\n steps.append((start_state, event))\n start_state_str = state_str\n return steps\n except Exception as e:\n print(e)\n self.logger.warning(f\"Cannot find a path from {from_state.state_str} to {to_state.state_str}\")\n return None\n\n # def get_simplified_nav_steps(self, from_state, to_state):\n # nav_steps = self.get_navigation_steps(from_state, to_state)\n # if nav_steps is None:\n # return None\n # simple_nav_steps = []\n # last_state, last_action = nav_steps[-1]\n # for state, action in nav_steps:\n # if state.structure_str == last_state.structure_str:\n # simple_nav_steps.append((state, last_action))\n # break\n # simple_nav_steps.append((state, action))\n # return simple_nav_steps\n\n def get_G2_nav_steps(self, from_state, to_state):\n if from_state is None or to_state is None:\n return None\n from_state_str = from_state.structure_str\n to_state_str = to_state.structure_str\n try:\n nav_steps = []\n state_strs = nx.shortest_path(G=self.G2, source=from_state_str, target=to_state_str)\n if not isinstance(state_strs, list) or len(state_strs) < 2:\n return None\n start_state_str = state_strs[0]\n for state_str in state_strs[1:]:\n edge = self.G2[start_state_str][state_str]\n edge_event_strs = list(edge[\"events\"].keys())\n start_state = random.choice(self.G2.nodes[start_state_str]['states'])\n event_str = random.choice(edge_event_strs)\n event = edge[\"events\"][event_str][\"event\"]\n nav_steps.append((start_state, event))\n start_state_str = state_str\n if nav_steps is None:\n return None\n # return nav_steps\n # simplify the path\n simple_nav_steps = []\n last_state, last_action = nav_steps[-1]\n for state, action in nav_steps:\n if state.structure_str == last_state.structure_str:\n simple_nav_steps.append((state, last_action))\n break\n simple_nav_steps.append((state, action))\n return simple_nav_steps\n except Exception as e:\n print(e)\n return None"
},
{
"identifier": "ScrollEvent",
"path": "droidbot/input_event.py",
"snippet": "class ScrollEvent(UIEvent):\n \"\"\"\n swipe gesture\n \"\"\"\n\n def __init__(self, x=None, y=None, view=None, direction=\"DOWN\", event_dict=None):\n super().__init__(view)\n self.event_type = KEY_ScrollEvent\n self.x = x\n self.y = y\n self.view = view\n self.direction = direction\n\n if event_dict is not None:\n self.__dict__.update(event_dict)\n\n @staticmethod\n def get_random_instance(device, app):\n x = random.uniform(0, device.get_width())\n y = random.uniform(0, device.get_height())\n direction = random.choice([\"UP\", \"DOWN\", \"LEFT\", \"RIGHT\"])\n return ScrollEvent(x, y, direction)\n\n def send(self, device):\n if self.view is not None:\n from .device_state import DeviceState\n width = DeviceState.get_view_width(view_dict=self.view)\n height = DeviceState.get_view_height(view_dict=self.view)\n else:\n width = device.get_width()\n height = device.get_height()\n\n x, y = UIEvent.get_xy(x=self.x, y=self.y, view=self.view)\n if not x or not y:\n # If no view and no coordinate specified, use the screen center coordinate\n x = width / 2\n y = height / 2\n\n start_x, start_y = x, y\n end_x, end_y = x, y\n duration = 500\n\n drag_length = 3/10\n # bias = 5/11\n\n # if self.direction == \"UP\":\n # start_y -= height * 2 / 5\n # end_y += height * 2 / 5\n # elif self.direction == \"DOWN\":\n # start_y += height * 2 / 5\n # end_y -= height * 2 / 5\n # elif self.direction == \"LEFT\":\n # start_x -= width * 2 / 5\n # end_x += width * 2 / 5\n # elif self.direction == \"RIGHT\":\n # start_x += width * 2 / 5\n # end_x -= width * 2 / 5\n\n if self.direction == \"UP\":\n start_y -= height * drag_length\n end_y += height * drag_length\n # do not drag from the center to avoid mis-touch\n # start_x += width * bias\n # end_x += width * bias\n # print(height, start_y, end_y, start_x, end_x, width)\n elif self.direction == \"DOWN\":\n start_y += height * drag_length\n end_y -= height * drag_length\n # do not drag from the center to avoid mis-touch\n # start_x += width * bias\n # end_x += width * bias\n # print(height, start_y, end_y)\n elif self.direction == \"LEFT\":\n start_x -= width * drag_length\n end_x += width * drag_length\n elif self.direction == \"RIGHT\":\n start_x += width * drag_length\n end_x -= width * drag_length\n '''\n this has been used for special case for calendar application. You can change 200 due to other special cases\n '''\n if abs(end_y - start_y) >= 200:\n device.view_drag((start_x, start_y), (end_x, end_y), duration)\n return True\n\n def get_event_str(self, state):\n if self.view is not None:\n return \\\n f\"{self.__class__.__name__}({UIEvent.view_str(state, self.view)}, direction={self.direction})\"\n elif self.x is not None and self.y is not None:\n return \"%s(state=%s, x=%s, y=%s, direction=%s)\" %\\\n (self.__class__.__name__, state.state_str, self.x, self.y, self.direction)\n else:\n return \"%s(state=%s, direction=%s)\" % \\\n (self.__class__.__name__, state.state_str, self.direction)\n\n def get_views(self):\n return [self.view] if self.view else []"
},
{
"identifier": "prompt_llm_with_history",
"path": "query_lmql.py",
"snippet": "@lmql.query(model=model,decoder='argmax')\ndef prompt_llm_with_history(task,history,ui_desc,ids):\n '''lmql\n \"\"\"You are a smartphone assistant to help users complete tasks by interacting with mobile apps.Given a task, the previous UI actions, and the content of current UI state, your job is to decide whether the task is already finished by the previous actions, and if not, decide which UI element in current UI state should be interacted.\n Task:{task}\n Previous UI actions: {history}\n Current UI State:{ui_desc}\n Your answer should always use the following format:1. Completing this task on a smartphone usually involves these steps: <?>.\\n2. Analyses of the relations between the task and the previous UI actions and current UI state: <?>.\\n3. Based on the previous actions, is the task already finished? <Y/N>. The next step should be <?/None>.\\n4. Can the task be proceeded with the current UI state? <Y/N>. Fill in the blanks about the next one interaction: - id=<id number> - action=<tap/input> - input text=<text or N/A>\n - id=[ID] - action=[ACTION] - input text=[INPUT_TEXT]. \"\"\" where ACTION in [\"tap\", \"input\", \"N/A\"] and ID in {ids} and len(TOKENS(INPUT_TEXT))<6\n\n return ID,ACTION,INPUT_TEXT\n '''"
}
] | import sys
import json
import re
import logging
import random
import yaml
import copy
import requests
import ast
import time
import tools
import pdb
import os
import traceback
import time
import time
import os
import time
import numpy as np
from abc import abstractmethod
from .input_event import *
from .utg import UTG
from .input_event import ScrollEvent
from query_lmql import prompt_llm_with_history
from xmlrpc.client import ServerProxy
from xmlrpclib import ServerProxy
from InstructorEmbedding import INSTRUCTOR
from sklearn.metrics.pairwise import cosine_similarity | 7,471 | # if current app is in background, bring it to foreground
component = self.app.get_package_name()
if self.app.get_main_activity():
component += "/%s" % self.app.get_main_activity()
return IntentEvent(Intent(suffix=component))
self.logger.info("Replaying %s" % event_path)
self.event_idx = curr_event_idx
self.num_replay_tries = 0
# return InputEvent.from_dict(event_dict["event"])
event = InputEvent.from_dict(event_dict["event"])
self.last_state = self.current_state
self.last_event = event
return event
time.sleep(5)
# raise InputInterruptedException("No more record can be replayed.")
def __update_utg(self):
self.utg.add_transition(self.last_event, self.last_state, self.current_state)
class ManualPolicy(UtgBasedInputPolicy):
"""
manually explore UFG
"""
def __init__(self, device, app):
super(ManualPolicy, self).__init__(device, app, False)
self.logger = logging.getLogger(self.__class__.__name__)
self.__first_event = True
def generate_event_based_on_utg(self):
"""
generate an event based on current UTG
@return: InputEvent
"""
if self.__first_event:
self.__first_event = False
self.logger.info("Trying to start the app...")
start_app_intent = self.app.get_start_intent()
return IntentEvent(intent=start_app_intent)
else:
return ManualEvent()
class TaskPolicy(UtgBasedInputPolicy):
def __init__(self, device, app, random_input, task, use_memory=True, debug_mode=False):
super(TaskPolicy, self).__init__(device, app, random_input)
self.logger = logging.getLogger(self.__class__.__name__)
self.task = task
self.__nav_target = None
self.__nav_num_steps = -1
self.__num_restarts = 0
self.__num_steps_outside = 0
self.__event_trace = ""
self.__missed_states = set()
self.__random_explore = random_input
self.__action_history = []
self.__thought_history = []
self.use_memory = use_memory
# if use_memory:
# self.memory = Memory(app_name=self.app.app_name, app_output_path=self.device.output_dir)
if self.use_memory:
self.similar_ele_path, self.similar_ele_function, self.similar_ele_statement = self.get_most_similar_element()
if not self.similar_ele_function:
self.use_memory = False
print('=============\nWarning: Did not find the memory of this app, the app memory is disabled\n=============')
else:
print(f'============\nFound element: {self.similar_ele_statement}\nPath: {self.similar_ele_path}\nFunction: {self.similar_ele_function}\n============')
self.state_ele_memory = {} # memorize some important states that contain elements of insight
def get_most_similar_element(self):
model = INSTRUCTOR('hkunlp/instructor-xl')
task_embedding = model.encode('task: ' + self.task).reshape(1, -1)
with open('memory/node_filtered_elements.json') as file:
ele_statements = json.load(file)
with open('memory/element_description.json') as file:
ele_functions = json.load(file)
with open('memory/embedded_elements_desc.json') as file:
embeddings = json.load(file)
app_name = self.device.output_dir.split('/')[-1]
if app_name not in embeddings.keys():
return None, None, None
app_embeddings = embeddings[app_name]
# similarities = {}
max_similarity, similar_ele_idx = -9999, -9999
for state_str, elements in app_embeddings.items():
# if the target element is in the first ui, no onclick is needed
# if ele_statements[app_name][state_str]['path'] == []:
# continue
# similarities[state_str] = []
for idx, ele in enumerate(elements):
if ele:
npele = np.array(ele).reshape(1, -1)
similarity = cosine_similarity(task_embedding, npele)[0][0]
else:
similarity = -9999
# similarities[state_str].append(similarity)
if similarity > max_similarity:
max_similarity = similarity
similar_ele_idx = idx
similar_state_str = state_str
similar_ele = ele_statements[app_name][similar_state_str]['elements'][similar_ele_idx]
similar_ele_path = ele_statements[app_name][similar_state_str]['path']
similar_ele_desc = ele_functions[app_name][similar_state_str][similar_ele_idx]
del model
return similar_ele_path, similar_ele_desc, similar_ele
def _scroll_to_top(self, scroller, all_views_for_mark, old_state=None):
prefix_scroll_event = []
if old_state is None:
old_state = self.current_state
for _ in range(MAX_SCROLL_NUM): # first scroll up to the top
| # from memory.memory_builder import Memory
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# Max number of restarts
MAX_NUM_RESTARTS = 5
# Max number of steps outside the app
MAX_NUM_STEPS_OUTSIDE = 1000
MAX_NUM_STEPS_OUTSIDE_KILL = 1000
# Max number of replay tries
MAX_REPLY_TRIES = 5
# Some input event flags
EVENT_FLAG_STARTED = "+started"
EVENT_FLAG_START_APP = "+start_app"
EVENT_FLAG_STOP_APP = "+stop_app"
EVENT_FLAG_EXPLORE = "+explore"
EVENT_FLAG_NAVIGATE = "+navigate"
EVENT_FLAG_TOUCH = "+touch"
# Policy taxanomy
POLICY_NAIVE_DFS = "dfs_naive"
POLICY_GREEDY_DFS = "dfs_greedy"
POLICY_NAIVE_BFS = "bfs_naive"
POLICY_GREEDY_BFS = "bfs_greedy"
POLICY_REPLAY = "replay"
POLICY_MANUAL = "manual"
POLICY_MONKEY = "monkey"
POLICY_TASK = "task"
POLICY_NONE = "none"
POLICY_MEMORY_GUIDED = "memory_guided" # implemented in input_policy2
FINISHED = "task_completed"
MAX_SCROLL_NUM = 7
USE_LMQL = False
class InputInterruptedException(Exception):
pass
def safe_dict_get(view_dict, key, default=None):
return_itm = view_dict[key] if (key in view_dict) else default
if return_itm == None:
return_itm = ''
return return_itm
class InputPolicy(object):
"""
This class is responsible for generating events to stimulate more app behaviour
It should call AppEventManager.send_event method continuously
"""
def __init__(self, device, app):
self.logger = logging.getLogger(self.__class__.__name__)
self.device = device
self.app = app
self.action_count = 0
self.master = None
def start(self, input_manager):
"""
start producing events
:param input_manager: instance of InputManager
"""
self.action_count = 0
while input_manager.enabled and self.action_count < input_manager.event_count:
try:
# # make sure the first event is go to HOME screen
# # the second event is to start the app
# if self.action_count == 0 and self.master is None:
# event = KeyEvent(name="HOME")
# elif self.action_count == 1 and self.master is None:
# event = IntentEvent(self.app.get_start_intent())
if self.action_count == 0 and self.master is None:
event = KillAppEvent(app=self.app)
else:
event = self.generate_event(input_manager)
if event == FINISHED:
break
input_manager.add_event(event)
except KeyboardInterrupt:
break
except InputInterruptedException as e:
self.logger.warning("stop sending events: %s" % e)
break
# except RuntimeError as e:
# self.logger.warning(e.message)
# break
except Exception as e:
self.logger.warning("exception during sending events: %s" % e)
traceback.print_exc()
continue
self.action_count += 1
@abstractmethod
def generate_event(self, input_manager):
"""
generate an event
@return:
"""
pass
class NoneInputPolicy(InputPolicy):
"""
do not send any event
"""
def __init__(self, device, app):
super(NoneInputPolicy, self).__init__(device, app)
def generate_event(self):
"""
generate an event
@return:
"""
return None
class UtgBasedInputPolicy(InputPolicy):
"""
state-based input policy
"""
def __init__(self, device, app, random_input):
super(UtgBasedInputPolicy, self).__init__(device, app)
self.random_input = random_input
self.script = None
self.master = None
self.script_events = []
self.last_event = None
self.last_state = None
self.current_state = None
self.utg = UTG(device=device, app=app, random_input=random_input)
self.script_event_idx = 0
if self.device.humanoid is not None:
self.humanoid_view_trees = []
self.humanoid_events = []
def generate_event(self, input_manager):
"""
generate an event
@return:
"""
# Get current device state
self.current_state = self.device.get_current_state()
if self.current_state is None:
time.sleep(5)
return KeyEvent(name="BACK")
self.__update_utg()
# update last view trees for humanoid
if self.device.humanoid is not None:
self.humanoid_view_trees = self.humanoid_view_trees + [self.current_state.view_tree]
if len(self.humanoid_view_trees) > 4:
self.humanoid_view_trees = self.humanoid_view_trees[1:]
event = None
# if the previous operation is not finished, continue
if len(self.script_events) > self.script_event_idx:
event = self.script_events[self.script_event_idx].get_transformed_event(self)
self.script_event_idx += 1
# First try matching a state defined in the script
if event is None and self.script is not None:
operation = self.script.get_operation_based_on_state(self.current_state)
if operation is not None:
self.script_events = operation.events
# restart script
event = self.script_events[0].get_transformed_event(self)
self.script_event_idx = 1
if event is None:
old_state, event = self.generate_event_based_on_utg(input_manager)
time.sleep(3)
# update last events for humanoid
if self.device.humanoid is not None:
self.humanoid_events = self.humanoid_events + [event]
if len(self.humanoid_events) > 3:
self.humanoid_events = self.humanoid_events[1:]
self.last_state = self.current_state if old_state is None else old_state
self.last_event = event
return event
def __update_utg(self):
self.utg.add_transition(self.last_event, self.last_state, self.current_state)
@abstractmethod
def generate_event_based_on_utg(self, input_manager):
"""
generate an event based on UTG
:return: InputEvent
"""
pass
class UtgNaiveSearchPolicy(UtgBasedInputPolicy):
"""
depth-first strategy to explore UFG (old)
"""
def __init__(self, device, app, random_input, search_method):
super(UtgNaiveSearchPolicy, self).__init__(device, app, random_input)
self.logger = logging.getLogger(self.__class__.__name__)
self.explored_views = set()
self.state_transitions = set()
self.search_method = search_method
self.last_event_flag = ""
self.last_event_str = None
self.last_state = None
self.preferred_buttons = ["yes", "ok", "activate", "detail", "more", "access",
"allow", "check", "agree", "try", "go", "next"]
def generate_event_based_on_utg(self):
"""
generate an event based on current device state
note: ensure these fields are properly maintained in each transaction:
last_event_flag, last_touched_view, last_state, exploited_views, state_transitions
@return: InputEvent
"""
self.save_state_transition(self.last_event_str, self.last_state, self.current_state)
if self.device.is_foreground(self.app):
# the app is in foreground, clear last_event_flag
self.last_event_flag = EVENT_FLAG_STARTED
else:
number_of_starts = self.last_event_flag.count(EVENT_FLAG_START_APP)
# If we have tried too many times but the app is still not started, stop DroidBot
if number_of_starts > MAX_NUM_RESTARTS:
raise InputInterruptedException("The app cannot be started.")
# if app is not started, try start it
if self.last_event_flag.endswith(EVENT_FLAG_START_APP):
# It seems the app stuck at some state, and cannot be started
# just pass to let viewclient deal with this case
self.logger.info("The app had been restarted %d times.", number_of_starts)
self.logger.info("Trying to restart app...")
pass
else:
start_app_intent = self.app.get_start_intent()
self.last_event_flag += EVENT_FLAG_START_APP
self.last_event_str = EVENT_FLAG_START_APP
return IntentEvent(start_app_intent)
# select a view to click
view_to_touch = self.select_a_view(self.current_state)
# if no view can be selected, restart the app
if view_to_touch is None:
stop_app_intent = self.app.get_stop_intent()
self.last_event_flag += EVENT_FLAG_STOP_APP
self.last_event_str = EVENT_FLAG_STOP_APP
return IntentEvent(stop_app_intent)
view_to_touch_str = view_to_touch['view_str']
if view_to_touch_str.startswith('BACK'):
result = KeyEvent('BACK')
else:
result = TouchEvent(view=view_to_touch)
self.last_event_flag += EVENT_FLAG_TOUCH
self.last_event_str = view_to_touch_str
self.save_explored_view(self.current_state, self.last_event_str)
return result
def select_a_view(self, state):
"""
select a view in the view list of given state, let droidbot touch it
@param state: DeviceState
@return:
"""
views = []
for view in state.views:
if view['enabled'] and len(view['children']) == 0:
views.append(view)
if self.random_input:
random.shuffle(views)
# add a "BACK" view, consider go back first/last according to search policy
mock_view_back = {'view_str': 'BACK_%s' % state.foreground_activity,
'text': 'BACK_%s' % state.foreground_activity}
if self.search_method == POLICY_NAIVE_DFS:
views.append(mock_view_back)
elif self.search_method == POLICY_NAIVE_BFS:
views.insert(0, mock_view_back)
# first try to find a preferable view
for view in views:
view_text = view['text'] if view['text'] is not None else ''
view_text = view_text.lower().strip()
if view_text in self.preferred_buttons \
and (state.foreground_activity, view['view_str']) not in self.explored_views:
self.logger.info("selected an preferred view: %s" % view['view_str'])
return view
# try to find a un-clicked view
for view in views:
if (state.foreground_activity, view['view_str']) not in self.explored_views:
self.logger.info("selected an un-clicked view: %s" % view['view_str'])
return view
# if all enabled views have been clicked, try jump to another activity by clicking one of state transitions
if self.random_input:
random.shuffle(views)
transition_views = {transition[0] for transition in self.state_transitions}
for view in views:
if view['view_str'] in transition_views:
self.logger.info("selected a transition view: %s" % view['view_str'])
return view
# no window transition found, just return a random view
# view = views[0]
# self.logger.info("selected a random view: %s" % view['view_str'])
# return view
# DroidBot stuck on current state, return None
self.logger.info("no view could be selected in state: %s" % state.tag)
return None
def save_state_transition(self, event_str, old_state, new_state):
"""
save the state transition
@param event_str: str, representing the event cause the transition
@param old_state: DeviceState
@param new_state: DeviceState
@return:
"""
if event_str is None or old_state is None or new_state is None:
return
if new_state.is_different_from(old_state):
self.state_transitions.add((event_str, old_state.tag, new_state.tag))
def save_explored_view(self, state, view_str):
"""
save the explored view
@param state: DeviceState, where the view located
@param view_str: str, representing a view
@return:
"""
if not state:
return
state_activity = state.foreground_activity
self.explored_views.add((state_activity, view_str))
class UtgGreedySearchPolicy(UtgBasedInputPolicy):
"""
DFS/BFS (according to search_method) strategy to explore UFG (new)
"""
def __init__(self, device, app, random_input, search_method):
super(UtgGreedySearchPolicy, self).__init__(device, app, random_input)
self.logger = logging.getLogger(self.__class__.__name__)
self.search_method = search_method
self.preferred_buttons = ["yes", "ok", "activate", "detail", "more", "access",
"allow", "check", "agree", "try", "go", "next"]
self.__nav_target = None
self.__nav_num_steps = -1
self.__num_restarts = 0
self.__num_steps_outside = 0
self.__event_trace = ""
self.__missed_states = set()
self.__random_explore = False
def generate_event_based_on_utg(self, input_manager):
"""
generate an event based on current UTG
@return: InputEvent
"""
current_state = self.current_state
self.logger.info("Current state: %s" % current_state.state_str)
if current_state.state_str in self.__missed_states:
self.__missed_states.remove(current_state.state_str)
if current_state.get_app_activity_depth(self.app) < 0:
# If the app is not in the activity stack
start_app_intent = self.app.get_start_intent()
# It seems the app stucks at some state, has been
# 1) force stopped (START, STOP)
# just start the app again by increasing self.__num_restarts
# 2) started at least once and cannot be started (START)
# pass to let viewclient deal with this case
# 3) nothing
# a normal start. clear self.__num_restarts.
if self.__event_trace.endswith(EVENT_FLAG_START_APP + EVENT_FLAG_STOP_APP) \
or self.__event_trace.endswith(EVENT_FLAG_START_APP):
self.__num_restarts += 1
self.logger.info("The app had been restarted %d times.", self.__num_restarts)
else:
self.__num_restarts = 0
# pass (START) through
if not self.__event_trace.endswith(EVENT_FLAG_START_APP):
if self.__num_restarts > MAX_NUM_RESTARTS:
# If the app had been restarted too many times, enter random mode
msg = "The app had been restarted too many times. Entering random mode."
self.logger.info(msg)
self.__random_explore = True
else:
# Start the app
self.__event_trace += EVENT_FLAG_START_APP
self.logger.info("Trying to start the app...")
return IntentEvent(intent=start_app_intent)
elif current_state.get_app_activity_depth(self.app) > 0:
# If the app is in activity stack but is not in foreground
self.__num_steps_outside += 1
if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE:
# If the app has not been in foreground for too long, try to go back
if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE_KILL:
stop_app_intent = self.app.get_stop_intent()
go_back_event = IntentEvent(stop_app_intent)
else:
go_back_event = KeyEvent(name="BACK")
self.__event_trace += EVENT_FLAG_NAVIGATE
self.logger.info("Going back to the app...")
return go_back_event
else:
# If the app is in foreground
self.__num_steps_outside = 0
# Get all possible input events
possible_events = current_state.get_possible_input()
if self.random_input:
random.shuffle(possible_events)
if self.search_method == POLICY_GREEDY_DFS:
possible_events.append(KeyEvent(name="BACK"))
elif self.search_method == POLICY_GREEDY_BFS:
possible_events.insert(0, KeyEvent(name="BACK"))
# get humanoid result, use the result to sort possible events
# including back events
if self.device.humanoid is not None:
possible_events = self.__sort_inputs_by_humanoid(possible_events)
# If there is an unexplored event, try the event first
for input_event in possible_events:
if not self.utg.is_event_explored(event=input_event, state=current_state):
self.logger.info("Trying an unexplored event.")
self.__event_trace += EVENT_FLAG_EXPLORE
return input_event
target_state = self.__get_nav_target(current_state)
if target_state:
navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=target_state)
if navigation_steps and len(navigation_steps) > 0:
self.logger.info("Navigating to %s, %d steps left." % (target_state.state_str, len(navigation_steps)))
self.__event_trace += EVENT_FLAG_NAVIGATE
return navigation_steps[0][1]
if self.__random_explore:
self.logger.info("Trying random event.")
random.shuffle(possible_events)
return possible_events[0]
# If couldn't find a exploration target, stop the app
stop_app_intent = self.app.get_stop_intent()
self.logger.info("Cannot find an exploration target. Trying to restart app...")
self.__event_trace += EVENT_FLAG_STOP_APP
return IntentEvent(intent=stop_app_intent)
def __sort_inputs_by_humanoid(self, possible_events):
if sys.version.startswith("3"):
else:
proxy = ServerProxy("http://%s/" % self.device.humanoid)
request_json = {
"history_view_trees": self.humanoid_view_trees,
"history_events": [x.__dict__ for x in self.humanoid_events],
"possible_events": [x.__dict__ for x in possible_events],
"screen_res": [self.device.display_info["width"],
self.device.display_info["height"]]
}
result = json.loads(proxy.predict(json.dumps(request_json)))
new_idx = result["indices"]
text = result["text"]
new_events = []
# get rid of infinite recursive by randomizing first event
if not self.utg.is_state_reached(self.current_state):
new_first = random.randint(0, len(new_idx) - 1)
new_idx[0], new_idx[new_first] = new_idx[new_first], new_idx[0]
for idx in new_idx:
if isinstance(possible_events[idx], SetTextEvent):
possible_events[idx].text = text
new_events.append(possible_events[idx])
return new_events
def __get_nav_target(self, current_state):
# If last event is a navigation event
if self.__nav_target and self.__event_trace.endswith(EVENT_FLAG_NAVIGATE):
navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=self.__nav_target)
if navigation_steps and 0 < len(navigation_steps) <= self.__nav_num_steps:
# If last navigation was successful, use current nav target
self.__nav_num_steps = len(navigation_steps)
return self.__nav_target
else:
# If last navigation was failed, add nav target to missing states
self.__missed_states.add(self.__nav_target.state_str)
reachable_states = self.utg.get_reachable_states(current_state)
if self.random_input:
random.shuffle(reachable_states)
for state in reachable_states:
# Only consider foreground states
if state.get_app_activity_depth(self.app) != 0:
continue
# Do not consider missed states
if state.state_str in self.__missed_states:
continue
# Do not consider explored states
if self.utg.is_state_explored(state):
continue
self.__nav_target = state
navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=self.__nav_target)
if len(navigation_steps) > 0:
self.__nav_num_steps = len(navigation_steps)
return state
self.__nav_target = None
self.__nav_num_steps = -1
return None
class UtgReplayPolicy(InputPolicy):
"""
Replay DroidBot output generated by UTG policy
"""
def __init__(self, device, app, replay_output):
super(UtgReplayPolicy, self).__init__(device, app)
self.logger = logging.getLogger(self.__class__.__name__)
self.replay_output = replay_output
event_dir = os.path.join(replay_output, "events")
self.event_paths = sorted([os.path.join(event_dir, x) for x in
next(os.walk(event_dir))[2]
if x.endswith(".json")])
# skip HOME and start app intent
self.device = device
self.app = app
self.event_idx = 2
self.num_replay_tries = 0
self.utg = UTG(device=device, app=app, random_input=None)
self.last_event = None
self.last_state = None
self.current_state = None
def generate_event(self):
"""
generate an event based on replay_output
@return: InputEvent
"""
while self.event_idx < len(self.event_paths) and \
self.num_replay_tries < MAX_REPLY_TRIES:
self.num_replay_tries += 1
current_state = self.device.get_current_state()
if current_state is None:
time.sleep(5)
self.num_replay_tries = 0
return KeyEvent(name="BACK")
curr_event_idx = self.event_idx
self.__update_utg()
while curr_event_idx < len(self.event_paths):
event_path = self.event_paths[curr_event_idx]
with open(event_path, "r") as f:
curr_event_idx += 1
try:
event_dict = json.load(f)
except Exception as e:
self.logger.info("Loading %s failed" % event_path)
continue
if event_dict["start_state"] != current_state.state_str:
continue
if not self.device.is_foreground(self.app):
# if current app is in background, bring it to foreground
component = self.app.get_package_name()
if self.app.get_main_activity():
component += "/%s" % self.app.get_main_activity()
return IntentEvent(Intent(suffix=component))
self.logger.info("Replaying %s" % event_path)
self.event_idx = curr_event_idx
self.num_replay_tries = 0
# return InputEvent.from_dict(event_dict["event"])
event = InputEvent.from_dict(event_dict["event"])
self.last_state = self.current_state
self.last_event = event
return event
time.sleep(5)
# raise InputInterruptedException("No more record can be replayed.")
def __update_utg(self):
self.utg.add_transition(self.last_event, self.last_state, self.current_state)
class ManualPolicy(UtgBasedInputPolicy):
"""
manually explore UFG
"""
def __init__(self, device, app):
super(ManualPolicy, self).__init__(device, app, False)
self.logger = logging.getLogger(self.__class__.__name__)
self.__first_event = True
def generate_event_based_on_utg(self):
"""
generate an event based on current UTG
@return: InputEvent
"""
if self.__first_event:
self.__first_event = False
self.logger.info("Trying to start the app...")
start_app_intent = self.app.get_start_intent()
return IntentEvent(intent=start_app_intent)
else:
return ManualEvent()
class TaskPolicy(UtgBasedInputPolicy):
def __init__(self, device, app, random_input, task, use_memory=True, debug_mode=False):
super(TaskPolicy, self).__init__(device, app, random_input)
self.logger = logging.getLogger(self.__class__.__name__)
self.task = task
self.__nav_target = None
self.__nav_num_steps = -1
self.__num_restarts = 0
self.__num_steps_outside = 0
self.__event_trace = ""
self.__missed_states = set()
self.__random_explore = random_input
self.__action_history = []
self.__thought_history = []
self.use_memory = use_memory
# if use_memory:
# self.memory = Memory(app_name=self.app.app_name, app_output_path=self.device.output_dir)
if self.use_memory:
self.similar_ele_path, self.similar_ele_function, self.similar_ele_statement = self.get_most_similar_element()
if not self.similar_ele_function:
self.use_memory = False
print('=============\nWarning: Did not find the memory of this app, the app memory is disabled\n=============')
else:
print(f'============\nFound element: {self.similar_ele_statement}\nPath: {self.similar_ele_path}\nFunction: {self.similar_ele_function}\n============')
self.state_ele_memory = {} # memorize some important states that contain elements of insight
def get_most_similar_element(self):
model = INSTRUCTOR('hkunlp/instructor-xl')
task_embedding = model.encode('task: ' + self.task).reshape(1, -1)
with open('memory/node_filtered_elements.json') as file:
ele_statements = json.load(file)
with open('memory/element_description.json') as file:
ele_functions = json.load(file)
with open('memory/embedded_elements_desc.json') as file:
embeddings = json.load(file)
app_name = self.device.output_dir.split('/')[-1]
if app_name not in embeddings.keys():
return None, None, None
app_embeddings = embeddings[app_name]
# similarities = {}
max_similarity, similar_ele_idx = -9999, -9999
for state_str, elements in app_embeddings.items():
# if the target element is in the first ui, no onclick is needed
# if ele_statements[app_name][state_str]['path'] == []:
# continue
# similarities[state_str] = []
for idx, ele in enumerate(elements):
if ele:
npele = np.array(ele).reshape(1, -1)
similarity = cosine_similarity(task_embedding, npele)[0][0]
else:
similarity = -9999
# similarities[state_str].append(similarity)
if similarity > max_similarity:
max_similarity = similarity
similar_ele_idx = idx
similar_state_str = state_str
similar_ele = ele_statements[app_name][similar_state_str]['elements'][similar_ele_idx]
similar_ele_path = ele_statements[app_name][similar_state_str]['path']
similar_ele_desc = ele_functions[app_name][similar_state_str][similar_ele_idx]
del model
return similar_ele_path, similar_ele_desc, similar_ele
def _scroll_to_top(self, scroller, all_views_for_mark, old_state=None):
prefix_scroll_event = []
if old_state is None:
old_state = self.current_state
for _ in range(MAX_SCROLL_NUM): # first scroll up to the top | self.device.send_event(ScrollEvent(view=scroller, direction="UP")) | 1 | 2023-10-23 03:32:58+00:00 | 12k |
cvlab-yonsei/ACLS | calibrate/evaluation/calibrate_evaluator.py | [
{
"identifier": "DatasetEvaluator",
"path": "calibrate/evaluation/evaluator.py",
"snippet": "class DatasetEvaluator(metaclass=ABCMeta):\n \"\"\"\n Base class for a dataset evaluator\n \"\"\"\n @abstractmethod\n def reset(self):\n \"\"\"\n Preparation for a new round of evaluation.\n Should be called before starting a round of evaluation.\n \"\"\"\n pass\n\n @abstractmethod\n def update(self):\n \"\"\"\n Update status given a mini-batch results\n \"\"\"\n pass\n\n def curr_score(self):\n \"\"\"\n Return curr score after last batch\n \"\"\"\n pass\n\n @abstractmethod\n def mean_score(self):\n \"\"\"\n Return mean score across all classes/samples\n \"\"\"\n pass\n\n def class_score(self):\n \"\"\"\n Return score for different classes\n \"\"\"\n pass\n\n @abstractmethod\n def num_samples(self):\n \"\"\"\n return the evaluated samples\n \"\"\"\n pass\n\n @abstractmethod\n def main_metric(self):\n \"return the name of the main metric\"\n pass"
},
{
"identifier": "ECELoss",
"path": "calibrate/evaluation/metrics.py",
"snippet": "class ECELoss(nn.Module):\n '''\n Compute ECE (Expected Calibration Error)\n '''\n def __init__(self, n_bins=15):\n super(ECELoss, self).__init__()\n bin_boundaries = torch.linspace(0, 1, n_bins + 1)\n self.bin_lowers = bin_boundaries[:-1]\n self.bin_uppers = bin_boundaries[1:]\n\n def forward(self, logits, labels):\n softmaxes = F.softmax(logits, dim=1)\n confidences, predictions = torch.max(softmaxes, 1)\n accuracies = predictions.eq(labels)\n\n ece = torch.zeros(1, device=logits.device)\n for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):\n # Calculated |confidence - accuracy| in each bin\n in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n accuracy_in_bin = accuracies[in_bin].float().mean()\n avg_confidence_in_bin = confidences[in_bin].mean()\n ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin\n\n return ece"
},
{
"identifier": "AdaptiveECELoss",
"path": "calibrate/evaluation/metrics.py",
"snippet": "class AdaptiveECELoss(nn.Module):\n '''\n Compute Adaptive ECE\n '''\n def __init__(self, n_bins=15):\n super(AdaptiveECELoss, self).__init__()\n self.nbins = n_bins\n\n def histedges_equalN(self, x):\n npt = len(x)\n return np.interp(np.linspace(0, npt, self.nbins + 1),\n np.arange(npt),\n np.sort(x))\n def forward(self, logits, labels):\n softmaxes = F.softmax(logits, dim=1)\n confidences, predictions = torch.max(softmaxes, 1)\n accuracies = predictions.eq(labels)\n n, bin_boundaries = np.histogram(confidences.cpu().detach(), self.histedges_equalN(confidences.cpu().detach()))\n #print(n,confidences,bin_boundaries)\n self.bin_lowers = bin_boundaries[:-1]\n self.bin_uppers = bin_boundaries[1:]\n ece = torch.zeros(1, device=logits.device)\n for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):\n # Calculated |confidence - accuracy| in each bin\n in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n accuracy_in_bin = accuracies[in_bin].float().mean()\n avg_confidence_in_bin = confidences[in_bin].mean()\n ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin\n return ece"
},
{
"identifier": "ClasswiseECELoss",
"path": "calibrate/evaluation/metrics.py",
"snippet": "class ClasswiseECELoss(nn.Module):\n '''\n Compute Classwise ECE\n '''\n def __init__(self, n_bins=15):\n super(ClasswiseECELoss, self).__init__()\n bin_boundaries = torch.linspace(0, 1, n_bins + 1)\n self.bin_lowers = bin_boundaries[:-1]\n self.bin_uppers = bin_boundaries[1:]\n\n def forward(self, logits, labels):\n num_classes = int((torch.max(labels) + 1).item())\n softmaxes = F.softmax(logits, dim=1)\n per_class_sce = None\n\n for i in range(num_classes):\n class_confidences = softmaxes[:, i]\n class_sce = torch.zeros(1, device=logits.device)\n labels_in_class = labels.eq(i) # one-hot vector of all positions where the label belongs to the class i\n\n for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):\n in_bin = class_confidences.gt(bin_lower.item()) * class_confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n accuracy_in_bin = labels_in_class[in_bin].float().mean()\n avg_confidence_in_bin = class_confidences[in_bin].mean()\n class_sce += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin\n\n if (i == 0):\n per_class_sce = class_sce\n else:\n per_class_sce = torch.cat((per_class_sce, class_sce), dim=0)\n\n sce = torch.mean(per_class_sce)\n return sce"
},
{
"identifier": "ReliabilityDiagram",
"path": "calibrate/evaluation/reliability_diagram.py",
"snippet": "class ReliabilityDiagram(object):\n \"\"\"\n Plot Confidence Histogram and Reliability Diagram to visualize miscalibration.\n On classification, plot the gaps between average confidence and observed accuracy bin-wise over the confidence\n space [1]_, [2]_.\n On detection, plot the miscalibration w.r.t. the additional regression information provided (1-D or 2-D) [3]_.\n\n Parameters\n ----------\n bins : int or iterable, default: 10\n Number of bins used by the ACE/ECE/MCE.\n On detection mode: if int, use same amount of bins for each dimension (nx1 = nx2 = ... = bins).\n If iterable, use different amount of bins for each dimension (nx1, nx2, ... = bins).\n equal_intervals : bool, optional, default: True\n If True, the bins have the same width. If False, the bins are splitted to equalize\n the number of samples in each bin.\n detection : bool, default: False\n If False, the input array 'X' is treated as multi-class confidence input (softmax)\n with shape (n_samples, [n_classes]).\n If True, the input array 'X' is treated as a box predictions with several box features (at least\n box confidence must be present) with shape (n_samples, [n_box_features]).\n fmin : float, optional, default: None\n Minimum value for scale color.\n fmax : float, optional, default: None\n Maximum value for scale color.\n metric : str, default: 'ECE'\n Metric to measure miscalibration. Might be either 'ECE', 'ACE' or 'MCE'.\n\n References\n ----------\n .. [1] Chuan Guo, Geoff Pleiss, Yu Sun and Kilian Q. Weinberger:\n \"On Calibration of Modern Neural Networks.\"\n Proceedings of the 34th International Conference on Machine Learning-Volume 70. JMLR. org, 2017.\n `Get source online <https://arxiv.org/abs/1706.04599>`_\n\n .. [2] A. Niculescu-Mizil and R. Caruana:\n “Predicting good probabilities with supervised learning.”\n Proceedings of the 22nd International Conference on Machine Learning, 2005, pp. 625–632.\n `Get source online <https://www.cs.cornell.edu/~alexn/papers/calibration.icml05.crc.rev3.pdf>`_\n\n .. [3] Fabian Küppers, Jan Kronenberger, Amirhossein Shantia and Anselm Haselhoff:\n \"Multivariate Confidence Calibration for Object Detection.\"\n The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2020.\n `Get source online <https://openaccess.thecvf.com/content_CVPRW_2020/papers/w20/Kuppers_Multivariate_Confidence_Calibration_for_Object_Detection_CVPRW_2020_paper.pdf>`_\n \"\"\"\n\n def __init__(self, bins: Union[int, Iterable[int]] = 10, equal_intervals: bool = True,\n detection: bool = False, sample_threshold: int = 1,\n fmin: float = None, fmax: float = None,\n metric: str = 'ECE', style: str = \"curve\", **kwargs):\n \"\"\" Constructor. For detailed parameter documentation view classdocs. \"\"\"\n\n assert style in [\"curve\", \"bar\"]\n self.bins = bins\n self.detection = detection\n self.sample_threshold = sample_threshold\n self.fmin = fmin\n self.fmax = fmax\n self.metric = metric\n self.style = style\n\n if 'feature_names' in kwargs:\n self.feature_names = kwargs['feature_names']\n\n if 'title_suffix' in kwargs:\n self.title_suffix = kwargs['title_suffix']\n\n self._miscalibration = _Miscalibration(bins=bins, equal_intervals=equal_intervals,\n detection=detection, sample_threshold=sample_threshold)\n\n def plot(self, X: Union[Iterable[np.ndarray], np.ndarray], y: Union[Iterable[np.ndarray], np.ndarray],\n batched: bool = False, uncertainty: str = None, filename: str = None, tikz: bool = False,\n title_suffix: str = None, feature_names: List[str] = None, **save_args) -> Union[plt.Figure, str]:\n \"\"\"\n Reliability diagram to visualize miscalibration. This could be either in classical way for confidences only\n or w.r.t. additional properties (like x/y-coordinates of detection boxes, width, height, etc.). The additional\n properties get binned. Afterwards, the miscalibration will be calculated for each bin. This is\n visualized as a 2-D plots.\n\n Parameters\n ----------\n X : iterable of np.ndarray, or np.ndarray of shape=([n_bayes], n_samples, [n_classes/n_box_features])\n NumPy array with confidence values for each prediction on classification with shapes\n 1-D for binary classification, 2-D for multi class (softmax).\n If 3-D, interpret first dimension as samples from an Bayesian estimator with mulitple data points\n for a single sample (e.g. variational inference or MC dropout samples).\n If this is an iterable over multiple instances of np.ndarray and parameter batched=True,\n interpret this parameter as multiple predictions that should be averaged.\n On detection, this array must have 2 dimensions with number of additional box features in last dim.\n y : iterable of np.ndarray with same length as X or np.ndarray of shape=([n_bayes], n_samples, [n_classes])\n NumPy array with ground truth labels.\n Either as label vector (1-D) or as one-hot encoded ground truth array (2-D).\n If 3-D, interpret first dimension as samples from an Bayesian estimator with mulitple data points\n for a single sample (e.g. variational inference or MC dropout samples).\n If iterable over multiple instances of np.ndarray and parameter batched=True,\n interpret this parameter as multiple predictions that should be averaged.\n batched : bool, optional, default: False\n Multiple predictions can be evaluated at once (e.g. cross-validation examinations) using batched-mode.\n All predictions given by X and y are separately evaluated and their results are averaged afterwards\n for visualization.\n uncertainty : str, optional, default: False\n Define uncertainty handling if input X has been sampled e.g. by Monte-Carlo dropout or similar methods\n that output an ensemble of predictions per sample. Choose one of the following options:\n - flatten: treat everything as a separate prediction - this option will yield into a slightly better\n calibration performance but without the visualization of a prediction interval.\n - mean: compute Monte-Carlo integration to obtain a simple confidence estimate for a sample\n (mean) with a standard deviation that is visualized.\n filename : str, optional, default: None\n Optional filename to save the plotted figure.\n tikz : bool, optional, default: False\n If True, use 'tikzplotlib' package to return tikz-code for Latex rather than a Matplotlib figure.\n title_suffix : str, optional, default: None\n Suffix for plot title.\n feature_names : list, optional, default: None\n Names of the additional features that are attached to the axes of a reliability diagram.\n **save_args : args\n Additional arguments passed to 'matplotlib.pyplot.Figure.savefig' function if 'tikz' is False.\n If 'tikz' is True, the argument are passed to 'tikzplotlib.get_tikz_code' function.\n\n Returns\n -------\n matplotlib.pyplot.Figure if 'tikz' is False else str with tikz code.\n\n Raises\n ------\n AttributeError\n - If parameter metric is not string or string is not 'ACE', 'ECE' or 'MCE'\n - If parameter 'feature_names' is set but length does not fit to second dim of X\n - If no ground truth samples are provided\n - If length of bins parameter does not match the number of features given by X\n - If more than 3 feature dimensions (including confidence) are provided\n \"\"\"\n\n # assign deprecated constructor parameter to title_suffix and feature_names\n if hasattr(self, 'title_suffix') and title_suffix is None:\n title_suffix = self.title_suffix\n\n if hasattr(self, 'feature_names') and feature_names is None:\n feature_names = self.feature_names\n\n # check if metric is correct\n if not isinstance(self.metric, str):\n raise AttributeError('Parameter \\'metric\\' must be string with either \\'ece\\', \\'ace\\' or \\'mce\\'.')\n\n # check metrics parameter\n if self.metric.lower() not in ['ece', 'ace', 'mce']:\n raise AttributeError('Parameter \\'metric\\' must be string with either \\'ece\\', \\'ace\\' or \\'mce\\'.')\n else:\n self.metric = self.metric.lower()\n\n # perform checks and prepare input data\n X, matched, sample_uncertainty, bin_bounds, num_features = self._miscalibration.prepare(X, y, batched, uncertainty)\n if num_features > 3:\n raise AttributeError(\"Diagram is not defined for more than 2 additional feature dimensions.\")\n\n histograms = []\n for batch_X, batch_matched, batch_uncertainty, bounds in zip(X, matched, sample_uncertainty, bin_bounds):\n batch_histograms = self._miscalibration.binning(bounds, batch_X, batch_matched, batch_X[:, 0], batch_uncertainty[:, 0])\n histograms.append(batch_histograms[:-1])\n\n # no additional dimensions? compute standard reliability diagram\n if num_features == 1:\n fig1, fig2 = self.__plot_confidence_histogram(X, matched, histograms, bin_bounds, title_suffix)\n return fig1, fig2\n\n # one additional feature? compute 1D-plot\n elif num_features == 2:\n fig = self.__plot_1d(histograms, bin_bounds, title_suffix, feature_names)\n\n # two additional features? compute 2D plot\n elif num_features == 3:\n fig = self.__plot_2d(histograms, bin_bounds, title_suffix, feature_names)\n\n # number of dimensions exceeds 3? quit\n else:\n raise AttributeError(\"Diagram is not defined for more than 2 additional feature dimensions.\")\n\n # if tikz is true, create tikz code from matplotlib figure\n if tikz:\n\n # get tikz code for our specific figure and also pass filename to store possible bitmaps\n tikz_fig = tikzplotlib.get_tikz_code(fig, filepath=filename, **save_args)\n\n # close matplotlib figure when tikz figure is requested to save memory\n plt.close(fig)\n fig = tikz_fig\n\n # save figure either as matplotlib PNG or as tikz output file\n if filename is not None:\n if tikz:\n with open(filename, \"w\") as open_file:\n open_file.write(fig)\n else:\n fig.savefig(filename, **save_args)\n\n return fig\n\n @classmethod\n def __interpolate_grid(cls, metric_map: np.ndarray) -> np.ndarray:\n \"\"\" Interpolate missing values in a 2D-grid using the mean of the data. The interpolation is done inplace. \"\"\"\n\n # get all NaNs\n nans = np.isnan(metric_map)\n x = lambda z: z.nonzero()\n\n # get mean of the remaining values and interpolate missing by the mean\n mean = float(np.mean(metric_map[~nans]))\n metric_map[nans] = griddata(x(~nans), metric_map[~nans], x(nans), method='cubic', fill_value=mean)\n return metric_map\n\n def __plot_confidence_histogram(self, X: List[np.ndarray], matched: List[np.ndarray], histograms: List[np.ndarray],\n bin_bounds: List, title_suffix: str = None) -> plt.Figure:\n \"\"\" Plot confidence histogram and reliability diagram to visualize miscalibration for condidences only. \"\"\"\n\n # get number of bins (self.bins has not been processed yet)\n n_bins = len(bin_bounds[0][0])-1\n\n median_confidence = [(bounds[0][1:] + bounds[0][:-1]) * 0.5 for bounds in bin_bounds]\n mean_acc, mean_conf = [], []\n for batch_X, batch_matched, batch_hist, batch_median in zip(X, matched, histograms, median_confidence):\n acc_hist, conf_hist, _, num_samples_hist = batch_hist\n empty_bins, = np.nonzero(num_samples_hist == 0)\n\n # calculate overall mean accuracy and confidence\n mean_acc.append(np.mean(batch_matched))\n mean_conf.append(np.mean(batch_X))\n\n # set empty bins to median bin value\n acc_hist[empty_bins] = batch_median[empty_bins]\n conf_hist[empty_bins] = batch_median[empty_bins]\n\n # convert num_samples to relative afterwards (inplace denoted by [:])\n num_samples_hist[:] = num_samples_hist / np.sum(num_samples_hist)\n\n # import ipdb; ipdb.set_trace()\n # get mean histograms and values over all batches\n acc = np.mean([hist[0] for hist in histograms], axis=0)\n conf = np.mean([hist[1] for hist in histograms], axis=0)\n uncertainty = np.sqrt(np.mean([hist[2] for hist in histograms], axis=0))\n num_samples = np.mean([hist[3] for hist in histograms], axis=0)\n mean_acc = np.mean(mean_acc)\n mean_conf = np.mean(mean_conf)\n median_confidence = np.mean(median_confidence, axis=0)\n bar_width = np.mean([np.diff(bounds[0]) for bounds in bin_bounds], axis=0)\n\n # compute credible interval of uncertainty\n p = 0.05\n z_score = norm.ppf(1. - (p / 2))\n uncertainty = z_score * uncertainty\n\n # if no uncertainty is given, set variable uncertainty to None in order to prevent drawing error bars\n if np.count_nonzero(uncertainty) == 0:\n uncertainty = None\n\n # calculate deviation\n deviation = conf - acc\n\n fig1 = plt.figure(\"Reliability {}\".format(title_suffix))\n ax = fig1.add_subplot()\n # set title suffix if given\n # if title_suffix is not None:\n # ax.set_title('Reliability Diagram' + \" - \" + title_suffix)\n # else:\n # ax.set_title('Reliability Diagram')\n \n # create two overlaying bar charts with bin accuracy and the gap of each bin to the perfect calibration\n if self.style == \"bar\":\n # ax.bar(median_confidence, height=median_confidence, width=bar_width, align='center',\n # edgecolor='black', color='pink', alpha=0.6)\n ax.bar(median_confidence, height=acc, width=bar_width, align='center',\n edgecolor='black', yerr=uncertainty, capsize=2)\n # ax.bar(median_confidence, height=deviation, bottom=acc, width=bar_width, align='center',\n # edgecolor='black', color='red', alpha=0.6)\n else:\n ax.plot(median_confidence, acc, color=\"blue\", linestyle=\"-\")\n\n # draw diagonal as perfect calibration line\n ax.plot([0, 1], [0, 1], color='red', linestyle='-.')\n # ax.set_xlim((0.0, 1.0))\n # ax.set_ylim((0.0, 1.0))\n\n # labels and legend of second plot\n # ax.set_xlabel('Confidence')\n # ax.set_ylabel('Accuracy')\n ax.legend(['Output', 'Expected'], fontsize=14)\n\n\n fig2 = plt.figure(\"Conf. Hist.\")\n ax = fig2.add_subplot()\n ax.bar(median_confidence, height=num_samples, width=bar_width, align='center', edgecolor='black')\n ax.plot([mean_acc, mean_acc], [0.0, 1.0], color='red', linestyle='--')\n ax.plot([mean_conf, mean_conf], [0.0, 1.0], color='blue', linestyle='--')\n ax.set_xlim((0.0, 1.0))\n ax.set_ylim((0.0, 1.0))\n\n plt.tight_layout()\n\n return fig1, fig2\n\n # -----------------------------------------\n # plot data distribution histogram first\n fig, axes = plt.subplots(2, squeeze=True, figsize=(7, 6))\n ax = axes[0]\n\n # set title suffix is given\n if title_suffix is not None:\n ax.set_title('Confidence Histogram - ' + title_suffix)\n else:\n ax.set_title('Confidence Histogram')\n\n # create bar chart with relative amount of samples in each bin\n # as well as average confidence and accuracy\n ax.bar(median_confidence, height=num_samples, width=bar_width, align='center', edgecolor='black')\n ax.plot([mean_acc, mean_acc], [0.0, 1.0], color='black', linestyle='--')\n ax.plot([mean_conf, mean_conf], [0.0, 1.0], color='gray', linestyle='--')\n ax.set_xlim((0.0, 1.0))\n ax.set_ylim((0.0, 1.0))\n\n # labels and legend\n ax.set_xlabel('Confidence')\n ax.set_ylabel('% of Samples')\n ax.legend(['Avg. Accuracy', 'Avg. Confidence', 'Relative Amount of Samples'])\n\n # second plot: reliability histogram\n ax = axes[1]\n\n # set title suffix if given\n if title_suffix is not None:\n ax.set_title('Reliability Diagram' + \" - \" + title_suffix)\n else:\n ax.set_title('Reliability Diagram')\n\n # create two overlaying bar charts with bin accuracy and the gap of each bin to the perfect calibration\n ax.bar(median_confidence, height=acc, width=bar_width, align='center',\n edgecolor='black', yerr=uncertainty, capsize=4)\n ax.bar(median_confidence, height=deviation, bottom=acc, width=bar_width, align='center',\n edgecolor='black', color='red', alpha=0.6)\n\n # draw diagonal as perfect calibration line\n ax.plot([0, 1], [0, 1], color='red', linestyle='--')\n ax.set_xlim((0.0, 1.0))\n ax.set_ylim((0.0, 1.0))\n\n # labels and legend of second plot\n ax.set_xlabel('Confidence')\n ax.set_ylabel('Accuracy')\n ax.legend(['Perfect Calibration', 'Output', 'Gap'])\n\n plt.tight_layout()\n return fig\n\n def __plot_1d(self, histograms: List[np.ndarray], bin_bounds: List,\n title_suffix: str = None, feature_names: List[str] = None) -> plt.Figure:\n \"\"\" Plot 1-D miscalibration w.r.t. one additional feature. \"\"\"\n\n # z score for credible interval (if uncertainty is given)\n p = 0.05\n z_score = norm.ppf(1. - (p / 2))\n\n results = []\n for batch_hist, bounds in zip(histograms, bin_bounds):\n result = self._miscalibration.process(self.metric, *batch_hist)\n bin_median = (bounds[-1][:-1] + bounds[-1][1:]) * 0.5\n\n # interpolate missing values\n x = np.linspace(0.0, 1.0, 1000)\n miscalibration = interp1d(bin_median, result[1], kind='cubic', fill_value='extrapolate')(x)\n acc = interp1d(bin_median, result[2], kind='cubic', fill_value='extrapolate')(x)\n conf = interp1d(bin_median, result[3], kind='cubic', fill_value='extrapolate')(x)\n uncertainty = interp1d(bin_median, result[4], kind='cubic', fill_value='extrapolate')(x)\n\n results.append((miscalibration, acc, conf, uncertainty))\n\n # get mean over all batches and convert mean variance to a std deviation afterwards\n miscalibration = np.mean([result[0] for result in results], axis=0)\n acc = np.mean([result[1] for result in results], axis=0)\n conf = np.mean([result[2] for result in results], axis=0)\n uncertainty = np.sqrt(np.mean([result[3] for result in results], axis=0))\n\n # draw routines\n fig, ax1 = plt.subplots()\n conf_color = 'tab:blue'\n\n # set name of the additional feature\n if feature_names is not None:\n ax1.set_xlabel(feature_names[0])\n\n ax1.set_xlim([0.0, 1.0])\n ax1.set_ylim([0.0, 1.0])\n ax1.set_ylabel('accuracy/confidence', color=conf_color)\n\n # draw confidence and accuracy on the same (left) axis\n x = np.linspace(0.0, 1.0, 1000)\n line1, = ax1.plot(x, acc, '-.', color='black')\n line2, = ax1.plot(x, conf, '--', color=conf_color)\n ax1.tick_params('y', labelcolor=conf_color)\n\n # if uncertainty is given, compute average of variances over all bins and get std deviation by sqrt\n # compute credible interval afterwards\n # define lower and upper bound\n uncertainty = z_score * uncertainty\n lb = conf - uncertainty\n ub = conf + uncertainty\n\n # create second axis for miscalibration\n ax11 = ax1.twinx()\n miscal_color = 'tab:red'\n line3, = ax11.plot(x, miscalibration, '-', color=miscal_color)\n\n if self.metric == 'ace':\n ax11.set_ylabel('Average Calibration Error (ACE)', color=miscal_color)\n elif self.metric == 'ece':\n ax11.set_ylabel('Expected Calibration Error (ECE)', color=miscal_color)\n elif self.metric == 'mce':\n ax11.set_ylabel('Maximum Calibration Error (MCE)', color=miscal_color)\n\n ax11.tick_params('y', labelcolor=miscal_color)\n\n # set miscalibration limits if given\n if self.fmin is not None and self.fmax is not None:\n ax11.set_ylim([self.fmin, self.fmax])\n\n ax1.legend((line1, line2, line3),\n ('accuracy', 'confidence', '%s' % self.metric.upper()),\n loc='best')\n\n if title_suffix is not None:\n ax1.set_title('Accuracy, confidence and %s\\n- %s -' % (self.metric.upper(), title_suffix))\n else:\n ax1.set_title('Accuracy, confidence and %s' % self.metric.upper())\n\n ax1.grid(True)\n\n fig.tight_layout()\n return fig\n\n def __plot_2d(self, histograms: List[np.ndarray], bin_bounds: List[np.ndarray],\n title_suffix: str = None, feature_names: List[str] = None) -> plt.Figure:\n \"\"\" Plot 2D miscalibration reliability diagram heatmap. \"\"\"\n\n results = []\n for batch_hist in histograms:\n result = self._miscalibration.process(self.metric, *batch_hist)\n\n # interpolate 2D data inplace to avoid \"empty\" bins\n batch_samples = result[-1]\n for map in result[1:-1]:\n map[batch_samples == 0.0] = 0.0\n # TODO: check what to do here\n # map[batch_samples == 0.0] = np.nan\n # self.__interpolate_grid(map)\n\n # on interpolation, it is sometimes possible that empty bins have negative values\n # however, this is invalid for variance\n result[4][result[4] < 0] = 0.0\n results.append(result)\n\n # calculate mean over all batches and transpose\n # transpose is necessary. Miscalibration is calculated in the order given by the features\n # however, imshow expects arrays in format [rows, columns] or [height, width]\n # e.g., miscalibration with additional x/y (in this order) will be drawn [y, x] otherwise\n miscalibration = np.mean([result[1] for result in results], axis=0).T\n acc = np.mean([result[2] for result in results], axis=0).T\n conf = np.mean([result[3] for result in results], axis=0).T\n mean = np.mean([result[4] for result in results], axis=0).T\n uncertainty = np.sqrt(mean)\n\n # -----------------------------------------------------------------------------------------\n # draw routines\n\n def set_axis(ax, map, vmin=None, vmax=None):\n \"\"\" Generic function to set all subplots equally \"\"\"\n # TODO: set proper fmin, fmax values\n img = ax.imshow(map, origin='lower', interpolation=\"gaussian\", cmap='jet', aspect=1, vmin=vmin, vmax=vmax)\n\n # set correct x- and y-ticks\n ax.set_xticks(np.linspace(0., len(bin_bounds[0][1])-2, 5))\n ax.set_xticklabels(np.linspace(0., 1., 5))\n ax.set_yticks(np.linspace(0., len(bin_bounds[0][2])-2, 5))\n ax.set_yticklabels(np.linspace(0., 1., 5))\n ax.set_xlim([0.0, len(bin_bounds[0][1])-2])\n ax.set_ylim([0.0, len(bin_bounds[0][2])-2])\n\n # draw feature names on axes if given\n if feature_names is not None:\n ax.set_xlabel(feature_names[0])\n ax.set_ylabel(feature_names[1])\n\n fig.colorbar(img, ax=ax, fraction=0.046, pad=0.04)\n\n return ax, img\n\n # -----------------------------------\n\n # create only two subplots if no additional uncertainty is given\n if np.count_nonzero(uncertainty) == 0:\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5))\n\n # process additional uncertainty if given\n else:\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, squeeze=True, figsize=(10, 10))\n ax4, img4 = set_axis(ax4, uncertainty)\n\n if title_suffix is not None:\n ax4.set_title(\"Confidence std deviation\\n- %s -\" % title_suffix)\n else:\n ax4.set_title(\"Confidence std deviation\")\n\n ax1, img1 = set_axis(ax1, acc, vmin=0, vmax=1)\n ax2, img2 = set_axis(ax2, conf, vmin=0, vmax=1)\n ax3, img3 = set_axis(ax3, miscalibration, vmin=self.fmin, vmax=self.fmax)\n\n # draw title if given\n if title_suffix is not None:\n ax1.set_title(\"Average accuracy\\n- %s -\" % title_suffix)\n ax2.set_title(\"Average confidence\\n- %s -\" % title_suffix)\n ax3.set_title(\"%s\\n- %s -\" % (self.metric.upper(), title_suffix))\n else:\n ax1.set_title(\"Average accuracy\")\n ax2.set_title(\"Average confidence\")\n ax3.set_title(\"%s\" % self.metric.upper())\n\n # -----------------------------------------------------------------------------------------\n\n return fig"
},
{
"identifier": "to_numpy",
"path": "calibrate/utils/torch_helper.py",
"snippet": "def to_numpy(x: torch.Tensor):\n return x.detach().cpu().numpy()"
}
] | import logging
import numpy as np
import torch
import torch.nn.functional as F
from terminaltables import AsciiTable
from torch import nn
from .evaluator import DatasetEvaluator
from .metrics import ECELoss, AdaptiveECELoss, ClasswiseECELoss
from .reliability_diagram import ReliabilityDiagram
from calibrate.utils.torch_helper import to_numpy | 8,777 |
logger = logging.getLogger(__name__)
class CalibrateEvaluator(DatasetEvaluator):
def __init__(self, num_classes, num_bins=15, device="cuda:0") -> None:
self.num_classes = num_classes
self.num_bins = num_bins
self.device = device
self.reset()
def reset(self) -> None:
self.logits = None
self.labels = None
def num_samples(self):
return (
self.labels.shape[0]
if self.labels is not None
else 0
)
def main_metric(self) -> None:
return "ece"
def update(self, logits: torch.Tensor, labels: torch.Tensor) -> None:
"""update
Args:
logits (torch.Tensor): n x num_classes
label (torch.Tensor): n x 1
"""
assert logits.shape[0] == labels.shape[0]
if self.logits is None:
self.logits = logits
self.labels = labels
else:
self.logits = torch.cat((self.logits, logits), dim=0)
self.labels = torch.cat((self.labels, labels), dim=0)
def mean_score(self, print=False, all_metric=True):
nll_criterion = nn.CrossEntropyLoss().to(self.device)
ece_criterion = ECELoss(self.num_bins).to(self.device)
aece_criterion = AdaptiveECELoss(self.num_bins).to(self.device)
cece_criterion = ClasswiseECELoss(self.num_bins).to(self.device)
nll = nll_criterion(self.logits, self.labels).item()
ece = ece_criterion(self.logits, self.labels).item()
aece = aece_criterion(self.logits, self.labels).item()
cece = cece_criterion(self.logits, self.labels).item()
metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece}
columns = ["samples", "nll", "ece", "aece", "cece"]
table_data = [columns]
table_data.append(
[
self.num_samples(),
"{:.5f}".format(nll),
"{:.5f}".format(ece),
"{:.5f}".format(aece),
"{:.5f}".format(cece),
]
)
if print:
table = AsciiTable(table_data)
logger.info("\n" + table.table)
if all_metric:
return metric, table_data
else:
return metric[self.main_metric()]
def plot_reliability_diagram(self, title=""):
diagram = ReliabilityDiagram(bins=25, style="curve")
probs = F.softmax(self.logits, dim=1)
fig_reliab, fig_hist = diagram.plot(
|
logger = logging.getLogger(__name__)
class CalibrateEvaluator(DatasetEvaluator):
def __init__(self, num_classes, num_bins=15, device="cuda:0") -> None:
self.num_classes = num_classes
self.num_bins = num_bins
self.device = device
self.reset()
def reset(self) -> None:
self.logits = None
self.labels = None
def num_samples(self):
return (
self.labels.shape[0]
if self.labels is not None
else 0
)
def main_metric(self) -> None:
return "ece"
def update(self, logits: torch.Tensor, labels: torch.Tensor) -> None:
"""update
Args:
logits (torch.Tensor): n x num_classes
label (torch.Tensor): n x 1
"""
assert logits.shape[0] == labels.shape[0]
if self.logits is None:
self.logits = logits
self.labels = labels
else:
self.logits = torch.cat((self.logits, logits), dim=0)
self.labels = torch.cat((self.labels, labels), dim=0)
def mean_score(self, print=False, all_metric=True):
nll_criterion = nn.CrossEntropyLoss().to(self.device)
ece_criterion = ECELoss(self.num_bins).to(self.device)
aece_criterion = AdaptiveECELoss(self.num_bins).to(self.device)
cece_criterion = ClasswiseECELoss(self.num_bins).to(self.device)
nll = nll_criterion(self.logits, self.labels).item()
ece = ece_criterion(self.logits, self.labels).item()
aece = aece_criterion(self.logits, self.labels).item()
cece = cece_criterion(self.logits, self.labels).item()
metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece}
columns = ["samples", "nll", "ece", "aece", "cece"]
table_data = [columns]
table_data.append(
[
self.num_samples(),
"{:.5f}".format(nll),
"{:.5f}".format(ece),
"{:.5f}".format(aece),
"{:.5f}".format(cece),
]
)
if print:
table = AsciiTable(table_data)
logger.info("\n" + table.table)
if all_metric:
return metric, table_data
else:
return metric[self.main_metric()]
def plot_reliability_diagram(self, title=""):
diagram = ReliabilityDiagram(bins=25, style="curve")
probs = F.softmax(self.logits, dim=1)
fig_reliab, fig_hist = diagram.plot( | to_numpy(probs), to_numpy(self.labels), | 5 | 2023-10-23 09:55:13+00:00 | 12k |
myshell-ai/AIlice | ailice/AIliceWeb.py | [
{
"identifier": "config",
"path": "ailice/common/AConfig.py",
"snippet": "class AConfig():\n def __init__(self):\n def Initialize(self, needOpenaiGPTKey = False):\n def Load(self, configFile: str) -> dict:\n def Store(self, configFile: str):"
},
{
"identifier": "AProcessor",
"path": "ailice/core/AProcessor.py",
"snippet": "class AProcessor():\n def __init__(self, name, modelID, promptName, outputCB, collection = None):\n self.name = name\n self.modelID = modelID\n self.llm = llmPool.GetModel(modelID)\n self.interpreter = AInterpreter()\n self.conversation = AConversations()\n self.subProcessors = dict()\n self.modules = {}\n \n self.RegisterModules([config.services['storage']['addr']])\n self.interpreter.RegisterAction(\"CALL\", {\"func\": self.EvalCall})\n self.interpreter.RegisterAction(\"RESPOND\", {\"func\": self.EvalRespond})\n self.interpreter.RegisterAction(\"COMPLETE\", {\"func\": self.EvalComplete})\n self.interpreter.RegisterAction(\"STORE\", {\"func\": self.EvalStore})\n self.interpreter.RegisterAction(\"QUERY\", {\"func\": self.EvalQuery})\n self.interpreter.RegisterAction(\"WAIT\", {\"func\": self.EvalWait})\n \n self.outputCB = outputCB\n self.collection = \"ailice\" + str(time.time()) if collection is None else collection\n self.prompt = promptsManager[promptName](processor=self, storage=self.modules['storage']['module'], collection=self.collection, conversations=self.conversation, formatter=self.llm.formatter, outputCB=self.outputCB)\n for nodeType, action in self.prompt.GetActions().items():\n self.interpreter.RegisterAction(nodeType, action)\n for nodeType, patterns in self.prompt.GetPatterns().items():\n for p in patterns:\n self.interpreter.RegisterPattern(nodeType, p[\"re\"], p[\"isEntry\"])\n self.result = \"None.\"\n return\n \n def RegisterAction(self, nodeType: str, action: dict):\n self.interpreter.RegisterAction(nodeType, action)\n return\n \n def RegisterModules(self, moduleAddrs):\n ret = []\n for moduleAddr in moduleAddrs:\n module = clientPool.GetClient(moduleAddr)\n if (not hasattr(module, \"ModuleInfo\")) or (not callable(getattr(module, \"ModuleInfo\"))):\n raise Exception(\"EXCEPTION: ModuleInfo() not found in module.\")\n info = module.ModuleInfo()\n if \"NAME\" not in info:\n raise Exception(\"EXCEPTION: 'NAME' is not found in module info.\")\n if \"ACTIONS\" not in info:\n raise Exception(\"EXCEPTION: 'ACTIONS' is not found in module info.\")\n \n self.modules[info['NAME']] = {'addr': moduleAddr, 'module': module}\n for actionName, actionMeta in info[\"ACTIONS\"].items():\n ret.append({\"action\": actionName, \"signature\": actionMeta[\"sig\"], \"prompt\": actionMeta[\"prompt\"]})\n actionFunc = actionMeta[\"sig\"][:actionMeta[\"sig\"].find(\"(\")]\n self.RegisterAction(nodeType=actionName, action={\"func\": self.CreateActionCB(actionName, module, actionFunc),\n \"signatureExpr\": actionMeta[\"sig\"]})\n return ret\n \n def CreateActionCB(self, actionName, module, actionFunc):\n def callback(*args,**kwargs):\n return f\"{actionName}_RESULT=[{getattr(module, actionFunc)(*args,**kwargs)}]\"\n return callback\n \n def GetPromptName(self) -> str:\n return self.prompt.PROMPT_NAME\n \n def __call__(self, txt: str) -> str:\n self.conversation.Add(role = \"USER\", msg = txt)\n self.EvalStore(txt)\n self.outputCB(\"<\")\n self.outputCB(f\"USER_{self.name}\", txt)\n\n while True:\n prompt = self.prompt.BuildPrompt()\n ret = self.llm.Generate(prompt, proc=partial(self.outputCB, \"ASSISTANT_\" + self.name), endchecker=self.interpreter.EndChecker, temperature = config.temperature)\n self.conversation.Add(role = \"ASSISTANT\", msg = ret)\n self.EvalStore(ret)\n self.result = ret\n \n resp = self.interpreter.EvalEntries(ret)\n \n if \"\" != resp:\n self.conversation.Add(role = \"SYSTEM\", msg = \"Function returned: {\" + resp + \"}\")\n self.EvalStore(\"Function returned: {\" + resp + \"}\")\n self.outputCB(f\"SYSTEM_{self.name}\", resp)\n else:\n self.outputCB(\">\")\n return self.result\n\n def EvalCall(self, agentType: str, agentName: str, msg: str) -> str:\n if agentType not in promptsManager:\n return f\"CALL FAILED. specified agentType {agentType} does not exist. This may be caused by using an agent type that does not exist or by getting the parameters in the wrong order.\"\n if (agentName not in self.subProcessors) or (agentType != self.subProcessors[agentName].GetPromptName()):\n self.subProcessors[agentName] = AProcessor(name=agentName, modelID=self.modelID, promptName=agentType, outputCB=self.outputCB, collection=self.collection)\n self.subProcessors[agentName].RegisterModules([self.modules[moduleName]['addr'] for moduleName in self.modules])\n resp = f\"Agent {agentName} returned: {self.subProcessors[agentName](msg)}\"\n return resp\n \n def EvalRespond(self, message: str):\n self.result = message\n return\n \n def EvalStore(self, txt: str):\n if not self.modules['storage']['module'].Store(self.collection, txt):\n return \"STORE FAILED, please check your input.\"\n return\n \n def EvalQuery(self, keywords: str) -> str:\n res = self.modules['storage']['module'].Query(self.collection, keywords)\n if (0 == len(res)) or (res[0][1] > 0.5):\n return \"Nothing found.\"\n return \"QUERY_RESULT={\" + res[0][0] +\"}\"\n \n def EvalComplete(self, result: str):\n self.result = result\n self.prompt.Reset()\n return\n \n def EvalWait(self, duration: int) -> str:\n time.sleep(duration)\n return f\"Waiting is over. It has been {duration} seconds.\"\n \n def ToJson(self) -> str:\n return {\"name\": self.name,\n \"modelID\": self.modelID,\n \"conversations\": self.conversation.ToJson(),\n \"subProcessors\": {k: p.ToJson() for k, p in self.subProcessors.items()},\n \"modules\": {k:{'addr': m['addr']} for k, m in self.modules.items()},\n \"collection\": self.collection}"
},
{
"identifier": "llmPool",
"path": "ailice/core/llm/ALLMPool.py",
"snippet": "class ALLMPool():\n def __init__(self):\n def ParseID(self, id):\n def Init(self, llmIDs: [str]):\n def GetModel(self, modelID: str):"
},
{
"identifier": "ALogger",
"path": "ailice/common/utils/ALogger.py",
"snippet": "class ALogger():\n def __init__(self, speech):\n self.colorMap = {'CONTEXT': 'blue', 'USER': 'green', 'ASSISTANT': 'green', 'SYSTEM': 'yellow', 'OUTPUT': 'green'}\n self.depth = -1\n self.speech = speech\n self.queue = queue.Queue()\n return\n \n def ParseChannel(self, channel: str) -> tuple[str]:\n l = channel.find(\"_\")\n channelType, agentName = channel[:l], channel[l+1:]\n return channelType, agentName\n \n def SinkPrint(self, channel: str, txt: str = None, action: str = ''):\n channelType, agentName = self.ParseChannel(channel)\n if 'open' == action:\n print(colored(channel + \": \", self.colorMap[channelType]), txt, end=\"\", flush=True)\n elif 'append' == action:\n print(txt, end=\"\", flush=True)\n elif 'close' == action:\n print(txt, end=\"\", flush=True)\n print(\"\")\n else:\n print(colored(channel + \": \", self.colorMap[channelType]), txt)\n return\n \n def SinkSpeech(self, channel: str, txt: str = None, action: str = ''):\n self.speech.Play(txt)\n return\n \n def SinkQueue(self, channel: str, txt: str = None, action: str = ''):\n if 'open' == action:\n self.txtBuf = {\"channel\": channel, \"txt\": txt}\n elif 'append' == action:\n assert self.txtBuf['channel'] == channel, \"assert self.txtBuf['channel'] == channel FAILED.\"\n self.txtBuf['txt'] += txt\n elif 'close' == action:\n assert self.txtBuf['channel'] == channel, \"assert self.txtBuf['channel'] == channel FAILED.\"\n self.txtBuf['txt'] += txt\n self.queue.put((channel, self.txtBuf['txt']))\n else:\n self.queue.put((channel, txt))\n return\n\n def Receiver(self, channel: str, txt: str = None, action: str = ''):\n braketMap = {\"<\": 1, \">\": -1}\n self.depth += (braketMap[channel] if channel in braketMap else 0)\n \n channelType, _ = self.ParseChannel(channel)\n if (channelType in [\"ASSISTANT\", \"SYSTEM\"]):\n self.SinkPrint(channel=channel, txt=txt, action=action)\n if config.speechOn and ((channelType in [\"ASSISTANT\"]) and (0 == self.depth)):\n self.SinkSpeech(channel=channel, txt=txt, action=action)\n if ((channelType in [\"OUTPUT\"]) and (1 == self.depth)) or\\\n (((channelType in [\"ASSISTANT\"]) and (0 == self.depth))):\n self.SinkQueue(channel=channel, txt=txt, action=action)\n if (channel in [\">\"]) and (-1 == self.depth):\n self.SinkQueue(channel=channel, txt=None, action=None)\n return"
},
{
"identifier": "clientPool",
"path": "ailice/common/ARemoteAccessors.py",
"snippet": "class AClientPool():\n def __init__(self):\n def Init(self):\n def GetClient(self, moduleAddr: str):"
},
{
"identifier": "StartServices",
"path": "ailice/AServices.py",
"snippet": "def StartServices():\n if config.localExecution:\n config.services['scripter'] = {\"cmd\": \"docker stop scripter; python3 -m ailice.modules.AScripter\", \"addr\": \"tcp://127.0.0.1:59000\"}\n else:\n try:\n subprocess.run(\"docker -v\", shell=True, check=True)\n except Exception:\n print(\"It looks like docker is not installed correctly. If you do not plan to use other virtual environments to execute scripts, please ensure that docker is installed correctly or use --localExecution to execute locally.\")\n \n for serviceName, cfg in config.services.items():\n if (\"speech\" == serviceName) and not config.speechOn:\n continue\n if (\"cmd\" not in cfg) or (\"\" == cfg['cmd'].strip()):\n print(f\"{serviceName}'s cmd is not configured and will attempt to connect {cfg['addr']} directly.\")\n continue\n p = subprocess.Popen(cfg['cmd'], shell=True, cwd=None, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n processes.append(p)\n print(serviceName,\" started.\")\n signal.signal(signal.SIGINT, TerminateSubprocess)\n signal.signal(signal.SIGTERM, TerminateSubprocess)"
},
{
"identifier": "promptsManager",
"path": "ailice/common/APrompts.py",
"snippet": "class APromptsManager():\n def __init__(self):\n def RegisterPrompt(self, promptClass):\n def __getitem__(self, promptName: str):\n def __iter__(self):"
},
{
"identifier": "APromptChat",
"path": "ailice/prompts/APromptChat.py",
"snippet": "class APromptChat():\n PROMPT_NAME = \"chat\"\n\n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = \"You are a helpful assistant.\"\n self.PATTERNS = {}\n self.ACTIONS= {}\n return\n \n def Reset(self):\n return\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n \n def ParameterizedBuildPrompt(self, n: int):\n prompt = f\"\"\"\n{self.prompt0}\n\"\"\"\n #prompt += \"\\nConversations:\"\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt"
},
{
"identifier": "APromptMain",
"path": "ailice/prompts/APromptMain.py",
"snippet": "class APromptMain():\n PROMPT_NAME = \"main\"\n\n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text(\"ailice.prompts\", \"prompt_simple.txt\")\n self.PATTERNS = {\"CALL\": [{\"re\": GenerateRE4FunctionCalling(\"CALL<!|agentType: str, agentName: str, msg: str|!> -> str\"), \"isEntry\": True}]}\n self.ACTIONS= {}\n return\n \n def Recall(self, key: str):\n ret = self.storage.Query(self.collection, key)\n if (0 != len(ret)) and (ret[0][1] <= 0.5):\n return ret[0][0]\n else:\n return \"None.\"\n \n def Reset(self):\n return\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n \n def ParameterizedBuildPrompt(self, n: int):\n context = str(self.formatter(prompt0 = \"\", conversations = self.conversations.GetConversations(frm = -1), encode = False))\n prompt = f\"\"\"\n{self.prompt0}\n\nEnd of general instructions.\n\nActive Agents: {[k+\": agentType \"+p.GetPromptName() for k,p in self.processor.subProcessors.items()]}\nRelevant Information:\n{self.Recall(context)}\n\"\"\"\n #prompt += \"\\nConversations:\"\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt"
},
{
"identifier": "APromptSearchEngine",
"path": "ailice/prompts/APromptSearchEngine.py",
"snippet": "class APromptSearchEngine():\n PROMPT_NAME = \"search-engine\"\n\n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text(\"ailice.prompts\", \"prompt_searchengine.txt\")\n self.PATTERNS = {\"QUERY\": [{\"re\": GenerateRE4FunctionCalling(\"QUERY<!|request: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"ARXIV\": [{\"re\": GenerateRE4FunctionCalling(\"ARXIV<!|keywords: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLDOWNARXIV\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLDOWNARXIV<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"GOOGLE\": [{\"re\": GenerateRE4FunctionCalling(\"GOOGLE<!|keywords: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLDOWNGOOGLE\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLDOWNGOOGLE<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"DUCKDUCKGO\": [{\"re\": GenerateRE4FunctionCalling(\"DUCKDUCKGO<!|keywords: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLDOWNDUCKDUCKGO\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLDOWNDUCKDUCKGO<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"BROWSE\": [{\"re\": GenerateRE4FunctionCalling(\"BROWSE<!|url: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLDOWN\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLDOWN<!||!> -> str\"), \"isEntry\": True}],\n \"RESPOND\": [{\"re\": GenerateRE4FunctionCalling(\"RESPOND<!|message: str|!> -> None\", faultTolerance = True), \"isEntry\": True}]}\n self.ACTIONS= {}\n return\n \n def Reset(self):\n return\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n \n def ParameterizedBuildPrompt(self, n: int):\n prompt = f\"\"\"\n{self.prompt0}\n\nEnd of general instructions.\n\n\"\"\"\n #prompt += \"Conversations:\"\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt"
},
{
"identifier": "APromptResearcher",
"path": "ailice/prompts/APromptResearcher.py",
"snippet": "class APromptResearcher():\n PROMPT_NAME = \"researcher\"\n \n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text(\"ailice.prompts\", \"prompt_researcher.txt\")\n self.PATTERNS = {\"CALL\": [{\"re\": GenerateRE4FunctionCalling(\"CALL<!|agentType: str, agentName: str, msg: str|!> -> str\"), \"isEntry\": True}],\n \"RESPOND\": [{\"re\": GenerateRE4FunctionCalling(\"RESPOND<!|message: str|!> -> None\", faultTolerance = True), \"isEntry\": True}],\n \"BROWSE\": [{\"re\": GenerateRE4FunctionCalling(\"BROWSE<!|url: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLDOWN\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLDOWN<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"BASH\": [{\"re\": GenerateRE4FunctionCalling(\"BASH<!|code: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLUPBASH\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLUPBASH<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"PYTHON\": [{\"re\": GenerateRE4FunctionCalling(\"PYTHON<!|code: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLUPPY\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLUPPY<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"STORE\": [{\"re\": GenerateRE4FunctionCalling(\"STORE<!|txt: str|!> -> None\", faultTolerance = True), \"isEntry\": True}],\n \"QUERY\": [{\"re\": GenerateRE4FunctionCalling(\"QUERY<!|keywords: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"VAR\": [{\"re\": GenerateRE4FunctionCalling(\"VAR<!|name: str, content: str|!> -> None\", faultTolerance = True), \"isEntry\": True}]}\n self.ACTIONS = {\"VAR\": {\"func\": self.Var}}\n self.variables = dict()\n return\n \n def Var(self, name: str, content: str):\n self.variables[name] = content\n return\n \n def Recall(self, key: str):\n ret = self.storage.Query(self.collection, key)\n if (0 != len(ret)): #and (ret[0][1] <= 0.5):\n return ret[0][0]\n else:\n return \"None.\"\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n\n def ParameterizedBuildPrompt(self, n: int):\n context = str(self.formatter(prompt0 = \"\", conversations = self.conversations.GetConversations(frm = -1), encode = False))\n prompt = f\"\"\"\n{self.prompt0}\n\nEnd of general instructions.\n\nActive Agents: {[k+\": agentType \"+p.GetPromptName() for k,p in self.processor.subProcessors.items()]}\n\nVariables:\n{[f\"{varName}: {content}\" for varName, content in self.variables.items()]}\n\nRelevant Information: {self.Recall(context).strip()}\n\n\"\"\"\n #print(prompt)\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt"
},
{
"identifier": "APromptCoder",
"path": "ailice/prompts/APromptCoder.py",
"snippet": "class APromptCoder():\n PROMPT_NAME = \"coder\"\n\n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text(\"ailice.prompts\", \"prompt_coder.txt\")\n self.PATTERNS = {}\n self.ACTIONS= {}\n\n return\n \n def Reset(self):\n return\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n \n def ParameterizedBuildPrompt(self, n: int):\n prompt = f\"\"\"\n{self.prompt0}\n\"\"\"\n #prompt += \"\\nConversations:\"\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt"
},
{
"identifier": "APromptModuleCoder",
"path": "ailice/prompts/APromptModuleCoder.py",
"snippet": "class APromptModuleCoder():\n PROMPT_NAME = \"module-coder\"\n\n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text(\"ailice.prompts\", \"prompt_module_coder.txt\")\n self.PATTERNS = {}\n self.ACTIONS= {}\n\n return\n \n def Reset(self):\n return\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n \n def ParameterizedBuildPrompt(self, n: int):\n prompt = f\"\"\"\n{self.prompt0}\n\"\"\"\n #prompt += \"\\nConversations:\"\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt"
},
{
"identifier": "APromptModuleLoader",
"path": "ailice/prompts/APromptModuleLoader.py",
"snippet": "class APromptModuleLoader():\n PROMPT_NAME = \"module-loader\"\n\n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text(\"ailice.prompts\", \"prompt_module_loader.txt\")\n self.memory = \"\"\n self.PATTERNS = {\"LOADMODULE\": [{\"re\": GenerateRE4FunctionCalling(\"LOADMODULE<!|addr: str|!> -> str\", faultTolerance = True), \"isEntry\": True}]}\n self.ACTIONS= {\"LOADMODULE\": {\"func\": self.LoadModule}}\n return\n \n def Reset(self):\n return\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n \n def LoadModule(self, addr: str) -> str:\n try:\n ret = self.processor.RegisterModules([addr])\n prompts = []\n for r in ret:\n t = r['signature'].replace(r['signature'][:r['signature'].find('(')], r['action'], 1)\n newSig = t.replace('(', '<!|').replace(')', '|!>')\n self.processor.interpreter.RegisterPattern(nodeType=r['action'], pattern=GenerateRE4FunctionCalling(newSig, faultTolerance = True), isEntry=True)\n prompts.append(f\"{newSig}: {r['prompt']}\")\n self.memory = \"\\n\".join(prompts)\n ret = self.memory\n except Exception as e:\n ret = f\"Exception: {str(e)}\"\n return ret\n \n def ParameterizedBuildPrompt(self, n: int):\n prompt = f\"\"\"\n{self.prompt0}\n\nMODULE DETAILS:\n{self.memory}\n\n\"\"\"\n #prompt += \"\\nConversations:\"\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt"
},
{
"identifier": "APromptCoderProxy",
"path": "ailice/prompts/APromptCoderProxy.py",
"snippet": "class APromptCoderProxy():\n PROMPT_NAME = \"coder-proxy\"\n\n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text(\"ailice.prompts\", \"prompt_coderproxy.txt\")\n self.PATTERNS = {\"CALL\": [{\"re\": GenerateRE4FunctionCalling(\"CALL<!|agentType: str, agentName: str, msg: str|!> -> str\"), \"isEntry\": True}],\n \"RESPOND\": [{\"re\": GenerateRE4FunctionCalling(\"RESPOND<!|message: str|!> -> None\", faultTolerance = True), \"isEntry\": True}],\n \"BASH\": [{\"re\": GenerateRE4FunctionCalling(\"BASH<!|code: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLUPBASH\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLUPBASH<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"PYTHON\": [{\"re\": GenerateRE4FunctionCalling(\"PYTHON<!|code: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLUPPY\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLUPPY<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"WAIT\": [{\"re\": GenerateRE4FunctionCalling(\"WAIT<!|duration: int|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"UpdateMemory\": [{\"re\": r\"UPDATED MEMORY(?P<newState>.*?)\", \"isEntry\": True}],\n \"SetVar\": [{\"re\": r\"(?P<varName>[a-zA-Z0-9_-]+)[ ]*=[ ]*<!\\|(?P<varValue>.*?)\\|!>\", \"isEntry\": True}],\n \"PrintVar\": [{\"re\": GenerateRE4FunctionCalling(\"PRINT<!|varName: str|!> -> str\", faultTolerance = True), \"isEntry\": True}]}\n self.ACTIONS= {\"UpdateMemory\": {\"func\": self.UpdateMemory},\n \"SetVar\": {\"func\": self.SetVar},\n \"PrintVar\": {\"func\": self.GetVar}}\n self.memory = \"\"\n self.vars = {}\n return\n \n def Reset(self):\n return\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n \n def Recall(self, key: str):\n ret = self.storage.Query(self.collection, key)\n if (0 != len(ret)): #and (ret[0][1] <= 0.5):\n return ret[0][0]\n else:\n return \"None.\"\n \n def UpdateMemory(self, newMemory: str):\n self.memory = newMemory\n return\n \n def SetVar(self, varName: str, varValue: str):\n self.vars[varName] = varValue\n return\n \n def GetVar(self, varName: str) -> str:\n return self.vars.get(varName, f\"Variable {varName} NOT DEFINED. Only defined variable names are legal, this includes: {[k for k in self.vars]}\")\n \n def ParameterizedBuildPrompt(self, n: int):\n context = str(self.formatter(prompt0 = \"\", conversations = self.conversations.GetConversations(frm = -1), encode = False))\n prompt = f\"\"\"\n{self.prompt0}\n\nEnd of general instructions.\n\nActive Agents: {[k+\": agentType \"+p.GetPromptName() for k,p in self.processor.subProcessors.items()]}\n\nRelevant Information: {self.Recall(context).strip()}\n\n\"\"\"\n #prompt += \"\\nConversations:\"\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt"
},
{
"identifier": "APromptArticleDigest",
"path": "ailice/prompts/APromptArticleDigest.py",
"snippet": "class APromptArticleDigest():\n PROMPT_NAME = \"article-digest\"\n \n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text('ailice.prompts', 'prompt_article_digest.txt')\n self.PATTERNS = {\"BROWSE\": [{\"re\": GenerateRE4FunctionCalling(\"BROWSE<!|url: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLDOWN\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLDOWN<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"Output\": [{\"re\": r\"REPORT:(?P<txt>.*?)NOTEBOOK:\", \"isEntry\": True}],\n \"RESPOND\": [{\"re\": GenerateRE4FunctionCalling(\"RESPOND<!|message: str|!> -> None\", faultTolerance = True), \"isEntry\": True}]}\n self.ACTIONS = {\"Output\": {\"func\": self.Output}}\n return\n \n def Reset(self):\n return\n\n def Output(self, txt: str):\n txt = txt.strip()\n self.storage.Store(self.collection, txt)\n self.outputCB(f\"OUTPUT_{self.processor.name}\", txt)\n return\n \n def Recall(self, key: str):\n ret = self.storage.Query(self.collection, key)\n if (0 != len(ret)): #and (ret[0][1] <= 0.5):\n return ret[0][0]\n else:\n return \"None.\"\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n\n def BuildPrompt(self):\n context = str(self.formatter(prompt0 = \"\", conversations = self.conversations.GetConversations(frm = -1), encode = False))\n prompt = f\"\"\"\n{self.prompt0}\n\nEnd of general instructions.\n\nRELEVANT INFORMATION: {self.Recall(context).strip()}\n\n\"\"\"\n #print(prompt)\n return self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -2))"
}
] | import time
import simplejson as json
import threading
import gradio as gr
import argparse
from termcolor import colored
from ailice.common.AConfig import config
from ailice.core.AProcessor import AProcessor
from ailice.core.llm.ALLMPool import llmPool
from ailice.common.utils.ALogger import ALogger
from ailice.common.ARemoteAccessors import clientPool
from ailice.AServices import StartServices
from ailice.common.APrompts import promptsManager
from ailice.prompts.APromptChat import APromptChat
from ailice.prompts.APromptMain import APromptMain
from ailice.prompts.APromptSearchEngine import APromptSearchEngine
from ailice.prompts.APromptResearcher import APromptResearcher
from ailice.prompts.APromptCoder import APromptCoder
from ailice.prompts.APromptModuleCoder import APromptModuleCoder
from ailice.prompts.APromptModuleLoader import APromptModuleLoader
from ailice.prompts.APromptCoderProxy import APromptCoderProxy
from ailice.prompts.APromptArticleDigest import APromptArticleDigest | 8,625 |
def mainLoop(modelID: str, quantization: str, maxMemory: dict, prompt: str, temperature: float, flashAttention2: bool, contextWindowRatio: float, localExecution: bool, trace: str):
config.Initialize(needOpenaiGPTKey = ("oai:" in modelID))
config.quantization = quantization
config.maxMemory = maxMemory
config.temperature = temperature
config.flashAttention2 = flashAttention2
config.contextWindowRatio = contextWindowRatio
config.localExecution = localExecution
print(colored("The port range of the ext-modules has been changed from 2005-2016 to 59000-59200. If you are using an old version, startup failure will occur after updating the code. Please modify the port number in config.json and rebuild the docker image.", "yellow"))
StartServices()
clientPool.Init()
|
def mainLoop(modelID: str, quantization: str, maxMemory: dict, prompt: str, temperature: float, flashAttention2: bool, contextWindowRatio: float, localExecution: bool, trace: str):
config.Initialize(needOpenaiGPTKey = ("oai:" in modelID))
config.quantization = quantization
config.maxMemory = maxMemory
config.temperature = temperature
config.flashAttention2 = flashAttention2
config.contextWindowRatio = contextWindowRatio
config.localExecution = localExecution
print(colored("The port range of the ext-modules has been changed from 2005-2016 to 59000-59200. If you are using an old version, startup failure will occur after updating the code. Please modify the port number in config.json and rebuild the docker image.", "yellow"))
StartServices()
clientPool.Init()
| for promptCls in [APromptChat, APromptMain, APromptSearchEngine, APromptResearcher, APromptCoder, APromptModuleCoder, APromptModuleLoader, APromptCoderProxy, APromptArticleDigest]: | 15 | 2023-10-16 01:51:14+00:00 | 12k |
city96/ComfyUI_ExtraModels | PixArt/models/PixArtMS.py | [
{
"identifier": "auto_grad_checkpoint",
"path": "PixArt/models/utils.py",
"snippet": "def _ntuple(n):\n def parse(x):\ndef set_grad_checkpoint(model, use_fp32_attention=False, gc_step=1):\n def set_attr(module):\ndef auto_grad_checkpoint(module, *args, **kwargs):\ndef checkpoint_sequential(functions, step, input, *args, **kwargs):\n def run_function(start, end, functions):\n def forward(input):\ndef get_rel_pos(q_size, k_size, rel_pos):\ndef add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size):"
},
{
"identifier": "t2i_modulate",
"path": "PixArt/models/PixArt_blocks.py",
"snippet": "def t2i_modulate(x, shift, scale):\n return x * (1 + scale) + shift"
},
{
"identifier": "CaptionEmbedder",
"path": "PixArt/models/PixArt_blocks.py",
"snippet": "class CaptionEmbedder(nn.Module):\n \"\"\"\n Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.\n \"\"\"\n\n def __init__(self, in_channels, hidden_size, uncond_prob, act_layer=nn.GELU(approximate='tanh'), token_num=120):\n super().__init__()\n self.y_proj = Mlp(in_features=in_channels, hidden_features=hidden_size, out_features=hidden_size, act_layer=act_layer, drop=0)\n self.register_buffer(\"y_embedding\", nn.Parameter(torch.randn(token_num, in_channels) / in_channels ** 0.5))\n self.uncond_prob = uncond_prob\n\n def token_drop(self, caption, force_drop_ids=None):\n \"\"\"\n Drops labels to enable classifier-free guidance.\n \"\"\"\n if force_drop_ids is None:\n drop_ids = torch.rand(caption.shape[0]).cuda() < self.uncond_prob\n else:\n drop_ids = force_drop_ids == 1\n caption = torch.where(drop_ids[:, None, None, None], self.y_embedding, caption)\n return caption\n\n def forward(self, caption, train, force_drop_ids=None):\n if train:\n assert caption.shape[2:] == self.y_embedding.shape\n use_dropout = self.uncond_prob > 0\n if (train and use_dropout) or (force_drop_ids is not None):\n caption = self.token_drop(caption, force_drop_ids)\n caption = self.y_proj(caption)\n return caption"
},
{
"identifier": "WindowAttention",
"path": "PixArt/models/PixArt_blocks.py",
"snippet": "class WindowAttention(Attention_):\n \"\"\"Multi-head Attention block with relative position embeddings.\"\"\"\n\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=True,\n use_rel_pos=False,\n rel_pos_zero_init=True,\n input_size=None,\n **block_kwargs,\n ):\n \"\"\"\n Args:\n dim (int): Number of input channels.\n num_heads (int): Number of attention heads.\n qkv_bias (bool: If True, add a learnable bias to query, key, value.\n rel_pos (bool): If True, add relative positional embeddings to the attention map.\n rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\n input_size (int or None): Input resolution for calculating the relative positional\n parameter size.\n \"\"\"\n super().__init__(dim, num_heads=num_heads, qkv_bias=qkv_bias, **block_kwargs)\n\n self.use_rel_pos = use_rel_pos\n if self.use_rel_pos:\n # initialize relative positional embeddings\n self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, self.head_dim))\n self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, self.head_dim))\n\n if not rel_pos_zero_init:\n nn.init.trunc_normal_(self.rel_pos_h, std=0.02)\n nn.init.trunc_normal_(self.rel_pos_w, std=0.02)\n\n def forward(self, x, mask=None):\n B, N, C = x.shape # 2 4096 1152\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)\n\n if model_management.xformers_enabled():\n q, k, v = qkv.unbind(2)\n\n if getattr(self, 'fp32_attention', False):\n q, k, v = q.float(), k.float(), v.float()\n\n attn_bias = None\n if mask is not None:\n attn_bias = torch.zeros([B * self.num_heads, q.shape[1], k.shape[1]], dtype=q.dtype, device=q.device)\n attn_bias.masked_fill_(mask.squeeze(1).repeat(self.num_heads, 1, 1) == 0, float('-inf'))\n # Switch between torch / xformers attention\n x = xformers.ops.memory_efficient_attention(q, k, v, p=self.attn_drop.p, attn_bias=attn_bias)\n x = x.view(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n else:\n q, k, v = qkv.permute(2, 0, 3, 1, 4).unbind(0)\n\n q = q * self.scale\n attn = q @ k.transpose(-2, -1)\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n x = attn @ v\n\n x = x.transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x"
},
{
"identifier": "MultiHeadCrossAttention",
"path": "PixArt/models/PixArt_blocks.py",
"snippet": "class MultiHeadCrossAttention(nn.Module):\n def __init__(self, d_model, num_heads, attn_drop=0., proj_drop=0., **block_kwargs):\n super(MultiHeadCrossAttention, self).__init__()\n assert d_model % num_heads == 0, \"d_model must be divisible by num_heads\"\n\n self.d_model = d_model\n self.num_heads = num_heads\n self.head_dim = d_model // num_heads\n\n self.q_linear = nn.Linear(d_model, d_model)\n self.kv_linear = nn.Linear(d_model, d_model*2)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(d_model, d_model)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x, cond, mask=None):\n # query/value: img tokens; key: condition; mask: if padding tokens\n B, N, C = x.shape\n\n if model_management.xformers_enabled():\n q = self.q_linear(x).view(1, -1, self.num_heads, self.head_dim)\n kv = self.kv_linear(cond).view(1, -1, 2, self.num_heads, self.head_dim)\n k, v = kv.unbind(2)\n attn_bias = None\n if mask is not None:\n attn_bias = xformers.ops.fmha.BlockDiagonalMask.from_seqlens([N] * B, mask)\n x = xformers.ops.memory_efficient_attention(q, k, v, p=self.attn_drop.p, attn_bias=attn_bias)\n x = x.view(B, -1, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n else:\n global competent_attention_implementation\n if not competent_attention_implementation:\n print(\"\"\"\\nYou should REALLY consider installing/enabling xformers.\\nAlternatively, open up ExtraModels/PixArt/models/PixArt_blocks.py and\\n- Fix the attention map on line 77 if you know how to\\n- Add scaled_dot_product_attention on line 150\\n- Send a PR and remove this message on line 32/66-69\\n\"\"\")\n competent_attention_implementation = True\n\n q = self.q_linear(x).view(1, -1, self.num_heads, self.head_dim)\n kv = self.kv_linear(cond).view(1, -1, 2, self.num_heads, self.head_dim)\n k, v = kv.unbind(2)\n q, k, v = map(lambda t: t.permute(0, 2, 1, 3),(q, k, v),)\n \n attn_mask = None\n if mask is not None and len(mask) > 1:\n # This is probably wrong\n attn_mask = torch.zeros(\n [1, q.shape[1], q.shape[2], v.shape[2]],\n dtype=q.dtype,\n device=q.device\n )\n attn_mask[:, :, (q.shape[2]//2):, mask[0]:] = True\n attn_mask[:, :, :(q.shape[2]//2), :mask[1]] = True\n\n x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask, dropout_p=self.attn_drop.p)\n x = x.permute(0, 2, 1, 3).contiguous()\n x = x.view(B, -1, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x"
},
{
"identifier": "T2IFinalLayer",
"path": "PixArt/models/PixArt_blocks.py",
"snippet": "class T2IFinalLayer(nn.Module):\n \"\"\"\n The final layer of PixArt.\n \"\"\"\n\n def __init__(self, hidden_size, patch_size, out_channels):\n super().__init__()\n self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)\n self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)\n self.scale_shift_table = nn.Parameter(torch.randn(2, hidden_size) / hidden_size ** 0.5)\n self.out_channels = out_channels\n\n def forward(self, x, t):\n shift, scale = (self.scale_shift_table[None] + t[:, None]).chunk(2, dim=1)\n x = t2i_modulate(self.norm_final(x), shift, scale)\n x = self.linear(x)\n return x"
},
{
"identifier": "TimestepEmbedder",
"path": "PixArt/models/PixArt_blocks.py",
"snippet": "class TimestepEmbedder(nn.Module):\n \"\"\"\n Embeds scalar timesteps into vector representations.\n \"\"\"\n\n def __init__(self, hidden_size, frequency_embedding_size=256):\n super().__init__()\n self.mlp = nn.Sequential(\n nn.Linear(frequency_embedding_size, hidden_size, bias=True),\n nn.SiLU(),\n nn.Linear(hidden_size, hidden_size, bias=True),\n )\n self.frequency_embedding_size = frequency_embedding_size\n\n @staticmethod\n def timestep_embedding(t, dim, max_period=10000):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param t: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an (N, D) Tensor of positional embeddings.\n \"\"\"\n # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=t.device) / half)\n args = t[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n return embedding\n\n def forward(self, t):\n t_freq = self.timestep_embedding(t, self.frequency_embedding_size)\n t_emb = self.mlp(t_freq.to(t.dtype))\n return t_emb"
},
{
"identifier": "SizeEmbedder",
"path": "PixArt/models/PixArt_blocks.py",
"snippet": "class SizeEmbedder(TimestepEmbedder):\n \"\"\"\n Embeds scalar timesteps into vector representations.\n \"\"\"\n\n def __init__(self, hidden_size, frequency_embedding_size=256):\n super().__init__(hidden_size=hidden_size, frequency_embedding_size=frequency_embedding_size)\n self.mlp = nn.Sequential(\n nn.Linear(frequency_embedding_size, hidden_size, bias=True),\n nn.SiLU(),\n nn.Linear(hidden_size, hidden_size, bias=True),\n )\n self.frequency_embedding_size = frequency_embedding_size\n self.outdim = hidden_size\n\n def forward(self, s, bs):\n if s.ndim == 1:\n s = s[:, None]\n assert s.ndim == 2\n if s.shape[0] != bs:\n s = s.repeat(bs//s.shape[0], 1)\n assert s.shape[0] == bs\n b, dims = s.shape[0], s.shape[1]\n s = rearrange(s, \"b d -> (b d)\")\n s_freq = self.timestep_embedding(s, self.frequency_embedding_size)\n s_emb = self.mlp(s_freq.to(s.dtype))\n s_emb = rearrange(s_emb, \"(b d) d2 -> b (d d2)\", b=b, d=dims, d2=self.outdim)\n return s_emb"
},
{
"identifier": "PixArt",
"path": "PixArt/models/PixArt.py",
"snippet": "class PixArt(nn.Module):\n \"\"\"\n Diffusion model with a Transformer backbone.\n \"\"\"\n\n def __init__(\n self,\n input_size=32,\n patch_size=2,\n in_channels=4,\n hidden_size=1152,\n depth=28,\n num_heads=16,\n mlp_ratio=4.0,\n class_dropout_prob=0.1,\n pred_sigma=True,\n drop_path: float = 0.,\n window_size=0,\n window_block_indexes=[],\n use_rel_pos=False,\n caption_channels=4096,\n lewei_scale=1.0,\n config=None,\n **kwargs,\n ):\n super().__init__()\n self.pred_sigma = pred_sigma\n self.in_channels = in_channels\n self.out_channels = in_channels * 2 if pred_sigma else in_channels\n self.patch_size = patch_size\n self.num_heads = num_heads\n self.lewei_scale = lewei_scale,\n self.dtype = torch.get_default_dtype()\n\n self.x_embedder = PatchEmbed(input_size, patch_size, in_channels, hidden_size, bias=True)\n self.t_embedder = TimestepEmbedder(hidden_size)\n num_patches = self.x_embedder.num_patches\n self.base_size = input_size // self.patch_size\n # Will use fixed sin-cos embedding:\n self.register_buffer(\"pos_embed\", torch.zeros(1, num_patches, hidden_size))\n\n approx_gelu = lambda: nn.GELU(approximate=\"tanh\")\n self.t_block = nn.Sequential(\n nn.SiLU(),\n nn.Linear(hidden_size, 6 * hidden_size, bias=True)\n )\n self.y_embedder = CaptionEmbedder(in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, act_layer=approx_gelu)\n drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule\n self.blocks = nn.ModuleList([\n PixArtBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i],\n input_size=(input_size // patch_size, input_size // patch_size),\n window_size=window_size if i in window_block_indexes else 0,\n use_rel_pos=use_rel_pos if i in window_block_indexes else False)\n for i in range(depth)\n ])\n self.final_layer = T2IFinalLayer(hidden_size, patch_size, self.out_channels)\n\n self.initialize_weights()\n\n print(f'Warning: lewei scale: {self.lewei_scale}, base size: {self.base_size}')\n\n def forward_raw(self, x, t, y, mask=None, data_info=None):\n \"\"\"\n Original forward pass of PixArt.\n x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)\n t: (N,) tensor of diffusion timesteps\n y: (N, 1, 120, C) tensor of class labels\n \"\"\"\n self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size\n x = self.x_embedder(x) + self.pos_embed # (N, T, D), where T = H * W / patch_size ** 2\n t = self.t_embedder(t) # (N, D)\n t0 = self.t_block(t)\n y = self.y_embedder(y, self.training) # (N, 1, L, D)\n if mask is not None:\n if mask.shape[0] != y.shape[0]:\n mask = mask.repeat(y.shape[0] // mask.shape[0], 1)\n mask = mask.squeeze(1).squeeze(1)\n y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1])\n y_lens = mask.sum(dim=1).tolist()\n else:\n y_lens = [y.shape[2]] * y.shape[0]\n y = y.squeeze(1).view(1, -1, x.shape[-1])\n for block in self.blocks:\n x = auto_grad_checkpoint(block, x, y, t0, y_lens) # (N, T, D) #support grad checkpoint\n x = self.final_layer(x, t) # (N, T, patch_size ** 2 * out_channels)\n x = self.unpatchify(x) # (N, out_channels, H, W)\n return x\n\n def forward(self, x, timesteps, context, y=None, **kwargs):\n \"\"\"\n Forward pass that adapts comfy input to original forward function\n x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)\n timesteps: (N,) tensor of diffusion timesteps\n context: (N, 1, 120, C) conditioning\n y: extra conditioning.\n \"\"\"\n ## Still accepts the input w/o that dim but returns garbage\n if len(context.shape) == 3:\n context = context.unsqueeze(1)\n\n ## run original forward pass\n out = self.forward_raw(\n x = x.to(self.dtype),\n t = timesteps.to(self.dtype),\n y = context.to(self.dtype),\n )\n\n ## only return EPS\n out = out.to(torch.float)\n eps, rest = out[:, :self.in_channels], out[:, self.in_channels:]\n return eps\n\n def forward_with_dpmsolver(self, x, t, y, mask=None, **kwargs):\n \"\"\"\n dpm solver donnot need variance prediction\n \"\"\"\n # https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb\n model_out = self.forward(x, t, y, mask)\n return model_out.chunk(2, dim=1)[0]\n\n def forward_with_cfg(self, x, t, y, cfg_scale, **kwargs):\n \"\"\"\n Forward pass of PixArt, but also batches the unconditional forward pass for classifier-free guidance.\n \"\"\"\n # https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb\n half = x[: len(x) // 2]\n combined = torch.cat([half, half], dim=0)\n model_out = self.forward(combined, t, y, kwargs)\n model_out = model_out['x'] if isinstance(model_out, dict) else model_out\n eps, rest = model_out[:, :3], model_out[:, 3:]\n cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0)\n half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps)\n eps = torch.cat([half_eps, half_eps], dim=0)\n return torch.cat([eps, rest], dim=1)\n\n def unpatchify(self, x):\n \"\"\"\n x: (N, T, patch_size**2 * C)\n imgs: (N, H, W, C)\n \"\"\"\n c = self.out_channels\n p = self.x_embedder.patch_size[0]\n h = w = int(x.shape[1] ** 0.5)\n assert h * w == x.shape[1]\n\n x = x.reshape(shape=(x.shape[0], h, w, p, p, c))\n x = torch.einsum('nhwpqc->nchpwq', x)\n imgs = x.reshape(shape=(x.shape[0], c, h * p, h * p))\n return imgs\n\n def initialize_weights(self):\n # Initialize transformer layers:\n def _basic_init(module):\n if isinstance(module, nn.Linear):\n torch.nn.init.xavier_uniform_(module.weight)\n if module.bias is not None:\n nn.init.constant_(module.bias, 0)\n\n self.apply(_basic_init)\n\n # Initialize (and freeze) pos_embed by sin-cos embedding:\n pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.x_embedder.num_patches ** 0.5), lewei_scale=self.lewei_scale, base_size=self.base_size)\n self.pos_embed.data.copy_(torch.from_numpy(pos_embed).unsqueeze(0).to(self.dtype))\n\n # Initialize patch_embed like nn.Linear (instead of nn.Conv2d):\n w = self.x_embedder.proj.weight.data\n nn.init.xavier_uniform_(w.view([w.shape[0], -1]))\n\n # Initialize timestep embedding MLP:\n nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)\n nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)\n nn.init.normal_(self.t_block[1].weight, std=0.02)\n\n # Initialize caption embedding MLP:\n nn.init.normal_(self.y_embedder.y_proj.fc1.weight, std=0.02)\n nn.init.normal_(self.y_embedder.y_proj.fc2.weight, std=0.02)\n\n # Zero-out adaLN modulation layers in PixArt blocks:\n for block in self.blocks:\n nn.init.constant_(block.cross_attn.proj.weight, 0)\n nn.init.constant_(block.cross_attn.proj.bias, 0)\n\n # Zero-out output layers:\n nn.init.constant_(self.final_layer.linear.weight, 0)\n nn.init.constant_(self.final_layer.linear.bias, 0)"
},
{
"identifier": "get_2d_sincos_pos_embed",
"path": "PixArt/models/PixArt.py",
"snippet": "def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0, lewei_scale=1.0, base_size=16):\n \"\"\"\n grid_size: int of the grid height and width\n return:\n pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)\n \"\"\"\n if isinstance(grid_size, int):\n grid_size = to_2tuple(grid_size)\n grid_h = np.arange(grid_size[0], dtype=np.float32) / (grid_size[0]/base_size) / lewei_scale\n grid_w = np.arange(grid_size[1], dtype=np.float32) / (grid_size[1]/base_size) / lewei_scale\n grid = np.meshgrid(grid_w, grid_h) # here w goes first\n grid = np.stack(grid, axis=0)\n grid = grid.reshape([2, 1, grid_size[1], grid_size[0]])\n\n pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)\n if cls_token and extra_tokens > 0:\n pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0)\n return pos_embed"
}
] | import torch
import torch.nn as nn
from tqdm import tqdm
from timm.models.layers import DropPath
from timm.models.vision_transformer import Mlp
from .utils import auto_grad_checkpoint, to_2tuple
from .PixArt_blocks import t2i_modulate, CaptionEmbedder, WindowAttention, MultiHeadCrossAttention, T2IFinalLayer, TimestepEmbedder, SizeEmbedder
from .PixArt import PixArt, get_2d_sincos_pos_embed
| 7,303 | x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
class PixArtMSBlock(nn.Module):
"""
A PixArt block with adaptive layer norm zero (adaLN-Zero) conditioning.
"""
def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, drop_path=0., window_size=0, input_size=None, use_rel_pos=False, **block_kwargs):
super().__init__()
self.hidden_size = hidden_size
self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.attn = WindowAttention(hidden_size, num_heads=num_heads, qkv_bias=True,
input_size=input_size if window_size == 0 else (window_size, window_size),
use_rel_pos=use_rel_pos, **block_kwargs)
self.cross_attn = MultiHeadCrossAttention(hidden_size, num_heads, **block_kwargs)
self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
# to be compatible with lower version pytorch
approx_gelu = lambda: nn.GELU(approximate="tanh")
self.mlp = Mlp(in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, drop=0)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.window_size = window_size
self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size ** 0.5)
def forward(self, x, y, t, mask=None, **kwargs):
B, N, C = x.shape
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None] + t.reshape(B, 6, -1)).chunk(6, dim=1)
x = x + self.drop_path(gate_msa * self.attn(t2i_modulate(self.norm1(x), shift_msa, scale_msa)))
x = x + self.cross_attn(x, y, mask)
x = x + self.drop_path(gate_mlp * self.mlp(t2i_modulate(self.norm2(x), shift_mlp, scale_mlp)))
return x
#############################################################################
# Core PixArt Model #
#################################################################################
class PixArtMS(PixArt):
"""
Diffusion model with a Transformer backbone.
"""
def __init__(
self,
input_size=32,
patch_size=2,
in_channels=4,
hidden_size=1152,
depth=28,
num_heads=16,
mlp_ratio=4.0,
class_dropout_prob=0.1,
learn_sigma=True,
pred_sigma=True,
drop_path: float = 0.,
window_size=0,
window_block_indexes=[],
use_rel_pos=False,
caption_channels=4096,
lewei_scale=1.,
config=None,
**kwargs,
):
super().__init__(
input_size=input_size,
patch_size=patch_size,
in_channels=in_channels,
hidden_size=hidden_size,
depth=depth,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
class_dropout_prob=class_dropout_prob,
learn_sigma=learn_sigma,
pred_sigma=pred_sigma,
drop_path=drop_path,
window_size=window_size,
window_block_indexes=window_block_indexes,
use_rel_pos=use_rel_pos,
lewei_scale=lewei_scale,
config=config,
**kwargs,
)
self.dtype = torch.get_default_dtype()
self.h = self.w = 0
approx_gelu = lambda: nn.GELU(approximate="tanh")
self.t_block = nn.Sequential(
nn.SiLU(),
nn.Linear(hidden_size, 6 * hidden_size, bias=True)
)
self.x_embedder = PatchEmbed(patch_size, in_channels, hidden_size, bias=True)
self.y_embedder = CaptionEmbedder(in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, act_layer=approx_gelu)
self.csize_embedder = SizeEmbedder(hidden_size//3) # c_size embed
self.ar_embedder = SizeEmbedder(hidden_size//3) # aspect ratio embed
drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
PixArtMSBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i],
input_size=(input_size // patch_size, input_size // patch_size),
window_size=window_size if i in window_block_indexes else 0,
use_rel_pos=use_rel_pos if i in window_block_indexes else False)
for i in range(depth)
])
self.final_layer = T2IFinalLayer(hidden_size, patch_size, self.out_channels)
self.training = False
self.initialize()
def forward_raw(self, x, t, y, mask=None, data_info=None, **kwargs):
"""
Original forward pass of PixArt.
x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
t: (N,) tensor of diffusion timesteps
y: (N, 1, 120, C) tensor of class labels
"""
bs = x.shape[0]
c_size, ar = data_info['img_hw'], data_info['aspect_ratio']
self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# GLIDE: https://github.com/openai/glide-text2im
# MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py
# --------------------------------------------------------
class PatchEmbed(nn.Module):
""" 2D Image to Patch Embedding
"""
def __init__(
self,
patch_size=16,
in_chans=3,
embed_dim=768,
norm_layer=None,
flatten=True,
bias=True,
):
super().__init__()
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.flatten = flatten
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
class PixArtMSBlock(nn.Module):
"""
A PixArt block with adaptive layer norm zero (adaLN-Zero) conditioning.
"""
def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, drop_path=0., window_size=0, input_size=None, use_rel_pos=False, **block_kwargs):
super().__init__()
self.hidden_size = hidden_size
self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.attn = WindowAttention(hidden_size, num_heads=num_heads, qkv_bias=True,
input_size=input_size if window_size == 0 else (window_size, window_size),
use_rel_pos=use_rel_pos, **block_kwargs)
self.cross_attn = MultiHeadCrossAttention(hidden_size, num_heads, **block_kwargs)
self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
# to be compatible with lower version pytorch
approx_gelu = lambda: nn.GELU(approximate="tanh")
self.mlp = Mlp(in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, drop=0)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.window_size = window_size
self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size ** 0.5)
def forward(self, x, y, t, mask=None, **kwargs):
B, N, C = x.shape
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None] + t.reshape(B, 6, -1)).chunk(6, dim=1)
x = x + self.drop_path(gate_msa * self.attn(t2i_modulate(self.norm1(x), shift_msa, scale_msa)))
x = x + self.cross_attn(x, y, mask)
x = x + self.drop_path(gate_mlp * self.mlp(t2i_modulate(self.norm2(x), shift_mlp, scale_mlp)))
return x
#############################################################################
# Core PixArt Model #
#################################################################################
class PixArtMS(PixArt):
"""
Diffusion model with a Transformer backbone.
"""
def __init__(
self,
input_size=32,
patch_size=2,
in_channels=4,
hidden_size=1152,
depth=28,
num_heads=16,
mlp_ratio=4.0,
class_dropout_prob=0.1,
learn_sigma=True,
pred_sigma=True,
drop_path: float = 0.,
window_size=0,
window_block_indexes=[],
use_rel_pos=False,
caption_channels=4096,
lewei_scale=1.,
config=None,
**kwargs,
):
super().__init__(
input_size=input_size,
patch_size=patch_size,
in_channels=in_channels,
hidden_size=hidden_size,
depth=depth,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
class_dropout_prob=class_dropout_prob,
learn_sigma=learn_sigma,
pred_sigma=pred_sigma,
drop_path=drop_path,
window_size=window_size,
window_block_indexes=window_block_indexes,
use_rel_pos=use_rel_pos,
lewei_scale=lewei_scale,
config=config,
**kwargs,
)
self.dtype = torch.get_default_dtype()
self.h = self.w = 0
approx_gelu = lambda: nn.GELU(approximate="tanh")
self.t_block = nn.Sequential(
nn.SiLU(),
nn.Linear(hidden_size, 6 * hidden_size, bias=True)
)
self.x_embedder = PatchEmbed(patch_size, in_channels, hidden_size, bias=True)
self.y_embedder = CaptionEmbedder(in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, act_layer=approx_gelu)
self.csize_embedder = SizeEmbedder(hidden_size//3) # c_size embed
self.ar_embedder = SizeEmbedder(hidden_size//3) # aspect ratio embed
drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
PixArtMSBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i],
input_size=(input_size // patch_size, input_size // patch_size),
window_size=window_size if i in window_block_indexes else 0,
use_rel_pos=use_rel_pos if i in window_block_indexes else False)
for i in range(depth)
])
self.final_layer = T2IFinalLayer(hidden_size, patch_size, self.out_channels)
self.training = False
self.initialize()
def forward_raw(self, x, t, y, mask=None, data_info=None, **kwargs):
"""
Original forward pass of PixArt.
x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
t: (N,) tensor of diffusion timesteps
y: (N, 1, 120, C) tensor of class labels
"""
bs = x.shape[0]
c_size, ar = data_info['img_hw'], data_info['aspect_ratio']
self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size
| pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.pos_embed.shape[-1], (self.h, self.w), lewei_scale=self.lewei_scale, base_size=self.base_size)).unsqueeze(0).to(x.device).to(self.dtype)
| 9 | 2023-10-20 21:19:44+00:00 | 12k |
apple/ml-nvas3d | demo/generate_demo_video.py | [
{
"identifier": "convolve_moving_receiver",
"path": "nvas3d/utils/dynamic_utils.py",
"snippet": "def convolve_moving_receiver(\n source_audio: np.ndarray,\n rirs: np.ndarray,\n interp_index: T.List[int],\n interp_weight: T.List[float]\n) -> np.ndarray:\n \"\"\"\n Apply convolution between an audio signal and moving impulse responses (IRs).\n\n Args:\n - source_audio: Source audio of shape (audio_len,)\n - rirs: RIRs of shape (num_positions, num_channels, ir_length)\n - interp_index: Indices representing the start positions for interpolation of shape (audio_len,).\n - interp_weight: Weight values for linear interpolation of shape (audio_len,).\n\n Returns:\n - Convolved audio signal of shape (num_channels, audio_len)\n \"\"\"\n\n num_channels = rirs.shape[1]\n audio_len = source_audio.shape[0]\n\n # Perform convolution for each position and channel\n convolved_audios = oaconvolve(source_audio[None, None, :], rirs, axes=-1)[..., :audio_len]\n\n # NumPy fancy indexing and broadcasting for interpolation\n start_audio = convolved_audios[interp_index, np.arange(num_channels)[:, None], np.arange(audio_len)]\n end_audio = convolved_audios[interp_index + 1, np.arange(num_channels)[:, None], np.arange(audio_len)]\n interp_weight = interp_weight[None, :]\n\n # Apply linear interpolation\n moving_audio = (1 - interp_weight) * start_audio + interp_weight * end_audio\n\n return moving_audio"
},
{
"identifier": "setup_dynamic_interp",
"path": "nvas3d/utils/dynamic_utils.py",
"snippet": "def setup_dynamic_interp(\n receiver_position: np.ndarray,\n total_samples: int,\n) -> T.Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Setup moving path with a constant speed for a receiver, given its positions in 3D space.\n\n Args:\n - receiver_position: Receiver positions in 3D space of shape (num_positions, 3).\n - total_samples: Total number of samples in the audio.\n\n Returns:\n - interp_index: Indices representing the start positions for interpolation.\n - interp_weight: Weight values for linear interpolation.\n \"\"\"\n\n # Calculate the number of samples per interval\n distance = np.linalg.norm(np.diff(receiver_position, axis=0), axis=1)\n speed_per_sample = distance.sum() / total_samples\n samples_per_interval = np.round(distance / speed_per_sample).astype(int)\n\n # Distribute rounding errors\n error = total_samples - samples_per_interval.sum()\n for i in np.random.choice(len(samples_per_interval), abs(error)):\n samples_per_interval[i] += np.sign(error)\n\n # Calculate indices and weights for linear interpolation\n interp_index = np.repeat(np.arange(len(distance)), samples_per_interval)\n interp_weight = np.concatenate([np.linspace(0, 1, num, endpoint=False) for num in samples_per_interval])\n\n return interp_index, interp_weight.astype(np.float32)"
},
{
"identifier": "clip_two",
"path": "nvas3d/utils/audio_utils.py",
"snippet": "def clip_two(audio1, audio2):\n \"\"\"\n Clips two audio signals to the same length.\n\n Args:\n audio1: First audio signal.\n audio2: Second audio signal.\n\n Returns: \n - Two audio signals of the same length.\n \"\"\"\n\n length_diff = audio1.shape[-1] - audio2.shape[-1]\n\n if length_diff == 0:\n return audio1, audio2\n elif length_diff > 0:\n audio1 = audio1[..., :audio2.shape[-1]]\n elif length_diff < 0:\n audio2 = audio2[..., :audio1.shape[-1]]\n\n return audio1, audio2"
},
{
"identifier": "clip_all",
"path": "nvas3d/utils/audio_utils.py",
"snippet": "def clip_all(audio_list):\n \"\"\"\n Clips all audio signals in a list to the same length.\n\n Args: \n audio_list: List of audio signals.\n\n Returns: \n - List of audio signals of the same length.\n \"\"\"\n\n min_length = min(audio.shape[-1] for audio in audio_list)\n clipped_audio_list = []\n for audio in audio_list:\n clipped_audio = audio[..., :min_length]\n clipped_audio_list.append(clipped_audio)\n\n return clipped_audio_list"
},
{
"identifier": "create_scene",
"path": "soundspaces_nvas3d/utils/ss_utils.py",
"snippet": "def create_scene(room: str,\n receiver_position: T.Tuple[float, float, float] = [0.0, 0.0, 0.0],\n sample_rate: float = 48000,\n image_size: T.Tuple[int, int] = (512, 256),\n include_visual_sensor: bool = True,\n hfov: float = 90.0\n ) -> Scene:\n \"\"\"\n Create a soundspaces scene to render IR.\n \"\"\"\n\n # Note: Make sure mp3d room is downloaded\n with suppress_stdout_and_stderr():\n # Create a receiver\n receiver = Receiver(\n position=receiver_position,\n rotation=0,\n sample_rate=sample_rate\n )\n\n scene = Scene(\n room,\n [None], # placeholder for source class\n receiver=receiver,\n include_visual_sensor=include_visual_sensor,\n add_source_mesh=False,\n device=torch.device('cpu'),\n add_source=False,\n image_size=image_size,\n hfov=hfov\n )\n\n return scene"
},
{
"identifier": "render_rir_parallel",
"path": "soundspaces_nvas3d/utils/ss_utils.py",
"snippet": "def render_rir_parallel(room_list: T.List[str],\n source_position_list: T.List[T.Tuple[float, float, float]],\n receiver_position_list: T.List[T.Tuple[float, float, float]],\n filename_list: T.List[str] = None,\n receiver_rotation_list: T.List[float] = None,\n batch_size: int = 64,\n sample_rate: float = 48000,\n use_default_material: bool = False,\n channel_type: str = 'Ambisonics',\n channel_order: int = 1\n ) -> T.List[torch.Tensor]:\n \"\"\"\n Run render_ir parallely for all elements of zip(source_position_list, receiver_position_list).\n \"\"\"\n\n assert len(room_list) == len(source_position_list)\n assert len(source_position_list) == len(receiver_position_list)\n\n if filename_list is None:\n is_return = True\n else:\n is_return = False\n\n if receiver_rotation_list is None:\n receiver_rotation_list = [0] * len(receiver_position_list)\n\n # Note: Make sure all rooms are downloaded\n\n # Calculate the number of batches\n num_points = len(source_position_list)\n num_batches = (num_points + batch_size - 1) // batch_size\n\n # Use tqdm to display the progress bar\n progress_bar = tqdm(total=num_points)\n\n def update_progress(*_):\n progress_bar.update()\n\n ir_list = []\n # Process the tasks in batches\n for batch_idx in range(num_batches):\n # Calculate the start and end indices of the current batch\n start_idx = batch_idx * batch_size\n end_idx = min(start_idx + batch_size, num_points)\n if is_return:\n batch = [(room_list[i], source_position_list[i], receiver_position_list[i], None, receiver_rotation_list[i]) for i in range(start_idx, end_idx)]\n else:\n batch = [(room_list[i], source_position_list[i], receiver_position_list[i], filename_list[i], receiver_rotation_list[i]) for i in range(start_idx, end_idx)]\n\n # Create a multiprocessing Pool for the current batch\n with multiprocessing.Pool() as pool:\n tasks = []\n for room, source_position, receiver_position, filename, receiver_rotation in batch:\n # Apply async mapping of process_ir function\n task = pool.apply_async(render_ir, args=(room, source_position, receiver_position, filename, receiver_rotation, sample_rate, use_default_material, channel_type, channel_order), callback=update_progress)\n tasks.append(task)\n\n # Wait for all tasks in the batch to complete and collect results\n for task in tasks:\n if is_return:\n ir = task.get() # Block until the result is ready\n ir_list.append(ir) # Append the result to the list\n else:\n task.get()\n if is_return:\n return ir_list"
},
{
"identifier": "load_room_grid",
"path": "soundspaces_nvas3d/utils/aihabitat_utils.py",
"snippet": "def load_room_grid(\n room: str,\n grid_distance: float\n) -> T.Dict:\n \"\"\"\n Load grid data for a specified room. If the grid data does not exist, it generates one.\n\n Args:\n - room: Name of the room.\n - grid_distance: The spacing between grid points.\n\n Returns:\n - A dictionary containing grid information for the specified room.\n \"\"\"\n\n grid_distance_str = str(grid_distance).replace(\".\", \"_\")\n dirname_grid = f'data/scene_datasets/metadata/mp3d/grid_{grid_distance_str}'\n filename_grid = f'{dirname_grid}/grid_{room}.npy'\n if not os.path.exists(filename_grid):\n os.makedirs(dirname_grid, exist_ok=True)\n print(f'Computing grid_{room}...')\n from soundspaces_nvas3d.rir_generation.generate_grid import save_xy_grid_points\n grid_info = save_xy_grid_points(room, grid_distance, dirname_grid)\n\n # load grid\n grid_info = np.load(filename_grid, allow_pickle=True).item()\n\n return grid_info"
},
{
"identifier": "Receiver",
"path": "soundspaces_nvas3d/soundspaces_nvas3d.py",
"snippet": "class Receiver:\n \"\"\"\n Receiver for SoundSpaces\n \"\"\"\n\n def __init__(self,\n position: T.Tuple[float, float, float],\n rotation: float,\n sample_rate: float = 48000,\n ):\n\n self.position = position\n self.rotation = rotation\n self.sample_rate = sample_rate"
},
{
"identifier": "Source",
"path": "soundspaces_nvas3d/soundspaces_nvas3d.py",
"snippet": "class Source:\n \"\"\"\n Source for Soundspaces\n \"\"\"\n\n def __init__(self,\n position: T.Tuple[float, float, float],\n rotation: float,\n dry_sound: str,\n mesh: str,\n device: torch.device\n ):\n\n self.position = position\n self.rotation = rotation\n self.device = device # where to store dry_sound\n self.dry_sound = dry_sound\n self.mesh = mesh"
},
{
"identifier": "Scene",
"path": "soundspaces_nvas3d/soundspaces_nvas3d.py",
"snippet": "class Scene:\n \"\"\"\n Soundspaces scene including room, receiver, and source list\n \"\"\"\n\n def __init__(self,\n room: str,\n source_name_list: T.List[str],\n receiver: Receiver = None,\n source_list: T.List[Source] = None,\n include_visual_sensor: bool = True,\n add_source_mesh: bool = True,\n device: torch.device = torch.device('cpu'),\n add_source: bool = True,\n image_size: T.Tuple[int, int] = (512, 256),\n hfov: float = 90.0,\n use_default_material: bool = False,\n channel_type: str = 'Ambisonics',\n channel_order: int = 1\n ):\n\n # Set scene\n self.room = room\n self.n_sources = len(source_name_list)\n assert self.n_sources > 0\n self.receiver = receiver\n self.source_list = source_list\n self.source_current = None\n self.include_visual_sensor = include_visual_sensor\n self.add_source_mesh = add_source_mesh\n self.device = device # where to store IR\n\n # Set channel config for soundspaces\n self.channel = {}\n self.channel['type'] = channel_type\n self.channel['order'] = channel_order\n if channel_type == 'Ambisonics':\n self.channel_count = (self.channel['order'] + 1)**2\n elif channel_type == 'Binaural':\n self.channel_count = 2\n\n # Set aihabitat config for soundspaces\n self.aihabitat = {}\n self.aihabitat['default_agent'] = 0\n self.aihabitat['sensor_height'] = 1.5\n self.aihabitat['height'] = image_size[0]\n self.aihabitat['width'] = image_size[1]\n self.aihabitat['hfov'] = hfov\n\n # Set acoustics config for soundspaces\n self.acoustic_config = {}\n self.acoustic_config['sampleRate'] = 48000\n self.acoustic_config['direct'] = True\n self.acoustic_config['indirect'] = True\n self.acoustic_config['diffraction'] = True\n self.acoustic_config['transmission'] = True\n self.acoustic_config['directSHOrder'] = 5\n self.acoustic_config['indirectSHOrder'] = 3\n self.acoustic_config['unitScale'] = 1\n self.acoustic_config['frequencyBands'] = 32\n self.acoustic_config['indirectRayCount'] = 50000\n\n # Set audio material\n if use_default_material:\n self.audio_material = './data/material/mp3d_material_config_default.json'\n else:\n self.audio_material = './data/material/mp3d_material_config.json'\n\n # Create simulation\n self.create_scene()\n\n # Randomly set source and receiver position\n source_position, source_rotation = None, None\n receiver_position, receiver_rotation = None, None\n\n # Create receiver (inside the room)\n if self.receiver is None:\n # random receiver\n self.create_receiver(receiver_position, receiver_rotation)\n else:\n # input receiver\n self.update_receiver(self.receiver)\n\n if add_source:\n # Create source\n if self.source_list is None:\n # random source\n self.source_list = [None] * self.n_sources\n for source_id, source_name in enumerate(source_name_list):\n self.create_source(source_name, source_id, source_position, source_rotation)\n else:\n # input source\n for source_id, _ in enumerate(source_name_list):\n self.update_source(self.source_list[source_id], source_id)\n\n def create_scene(self):\n \"\"\"\n Given the configuration, create a scene for soundspaces\n \"\"\"\n\n # Set backend configuration\n backend_cfg = habitat_sim.SimulatorConfiguration()\n backend_cfg.scene_id = f'./data/scene_datasets/mp3d/{self.room}/{self.room}.glb'\n backend_cfg.scene_dataset_config_file = './data/scene_datasets/mp3d/mp3d.scene_dataset_config.json'\n backend_cfg.load_semantic_mesh = True\n backend_cfg.enable_physics = False\n\n # Set agent configuration\n agent_config = habitat_sim.AgentConfiguration()\n\n if self.include_visual_sensor:\n # Set color sensor\n rgb_sensor_spec = habitat_sim.CameraSensorSpec()\n rgb_sensor_spec.uuid = \"color_sensor\"\n rgb_sensor_spec.sensor_type = habitat_sim.SensorType.COLOR\n rgb_sensor_spec.resolution = [self.aihabitat['height'], self.aihabitat['width']]\n rgb_sensor_spec.position = [0.0, self.aihabitat[\"sensor_height\"], 0.0]\n rgb_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n rgb_sensor_spec.hfov = self.aihabitat[\"hfov\"]\n agent_config.sensor_specifications = [rgb_sensor_spec]\n\n # Set depth sensor\n depth_sensor_spec = habitat_sim.CameraSensorSpec()\n depth_sensor_spec.uuid = \"depth_sensor\"\n depth_sensor_spec.sensor_type = habitat_sim.SensorType.DEPTH\n depth_sensor_spec.resolution = [self.aihabitat[\"height\"], self.aihabitat[\"width\"]]\n depth_sensor_spec.position = [0.0, self.aihabitat[\"sensor_height\"], 0.0]\n depth_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n depth_sensor_spec.hfov = self.aihabitat[\"hfov\"]\n agent_config.sensor_specifications.append(depth_sensor_spec)\n\n # # Set semantic sensor\n # semantic_sensor_spec = habitat_sim.CameraSensorSpec()\n # semantic_sensor_spec.uuid = \"semantic_sensor\"\n # semantic_sensor_spec.sensor_type = habitat_sim.SensorType.SEMANTIC\n # semantic_sensor_spec.resolution = [self.aihabitat[\"height\"], self.aihabitat[\"width\"]]\n # semantic_sensor_spec.position = [0.0, self.aihabitat[\"sensor_height\"], 0.0]\n # semantic_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n # semantic_sensor_spec.hfov = self.aihabitat[\"hfov\"]\n # agent_config.sensor_specifications.append(semantic_sensor_spec)\n\n # Set simulator configuration\n cfg = habitat_sim.Configuration(backend_cfg, [agent_config])\n\n # Set simulator\n sim = habitat_sim.Simulator(cfg)\n\n # set navmesh path for searching for navigatable points\n navmesh = f'./data/scene_datasets/mp3d/{self.room}/{self.room}.navmesh'\n sim.pathfinder.load_nav_mesh(navmesh)\n\n # seed for navmesh\n sim.seed(random.randint(0, 1024))\n\n # Set simulation\n self.sim = sim\n print('Scene created!')\n\n return self\n\n import torch\n\n def add_audio_sensor(self):\n \"\"\"\n Add audio sensor to the scene\n \"\"\"\n\n # set audio sensor\n audio_sensor_spec = habitat_sim.AudioSensorSpec()\n audio_sensor_spec.uuid = \"audio_sensor\"\n audio_sensor_spec.enableMaterials = True # make sure _semantic.ply file is in the scene folder\n audio_sensor_spec.channelLayout.type = getattr(habitat_sim.sensor.RLRAudioPropagationChannelLayoutType, self.channel['type'])\n audio_sensor_spec.channelLayout.channelCount = self.channel_count # ambisonics\n\n # Set acoustic configuration\n audio_sensor_spec.acousticsConfig.sampleRate = self.acoustic_config['sampleRate']\n audio_sensor_spec.acousticsConfig.direct = self.acoustic_config['direct']\n audio_sensor_spec.acousticsConfig.indirect = self.acoustic_config['indirect']\n audio_sensor_spec.acousticsConfig.diffraction = self.acoustic_config['diffraction']\n audio_sensor_spec.acousticsConfig.transmission = self.acoustic_config['transmission']\n audio_sensor_spec.acousticsConfig.directSHOrder = self.acoustic_config['directSHOrder']\n audio_sensor_spec.acousticsConfig.indirectSHOrder = self.acoustic_config['indirectSHOrder']\n audio_sensor_spec.acousticsConfig.unitScale = self.acoustic_config['unitScale']\n audio_sensor_spec.acousticsConfig.frequencyBands = self.acoustic_config['frequencyBands']\n audio_sensor_spec.acousticsConfig.indirectRayCount = self.acoustic_config['indirectRayCount']\n # audio_sensor_spec.acousticsConfig.maxIRLength = 40.0\n # audio_sensor_spec.acousticsConfig.sourceRayCount = 2000\n # audio_sensor_spec.acousticsConfig.meshSimplification = False\n\n # Initialize receiver\n audio_sensor_spec.position = [0.0, self.aihabitat['sensor_height'], 0.0] # audio sensor has a height of 1.5m\n self.sim.add_sensor(audio_sensor_spec)\n\n audio_sensor = self.sim.get_agent(self.aihabitat['default_agent'])._sensors['audio_sensor']\n audio_sensor.setAudioMaterialsJSON(self.audio_material)\n\n return self\n\n def create_receiver(self,\n position: T.Tuple[float, float, float] = None,\n rotation: float = None\n ):\n \"\"\"\n Randomly sample receiver position and rotation\n \"\"\"\n\n if position is None:\n # Randomly set receiver position in the room\n position = self.sim.pathfinder.get_random_navigable_point()\n rotation = random.uniform(0, 360)\n\n # Set sample rate\n sample_rate = self.acoustic_config['sampleRate']\n\n # Set receiver\n receiver = Receiver(position, rotation, sample_rate)\n\n # Update receiver\n self.update_receiver(receiver)\n\n return self\n\n def update_receiver(self,\n receiver: Receiver\n ):\n \"\"\"\n Update receiver\n \"\"\"\n\n agent = self.sim.get_agent(self.aihabitat[\"default_agent\"])\n new_state = self.sim.get_agent(self.aihabitat[\"default_agent\"]).get_state()\n new_state.position = np.array(receiver.position + np.array([0, 0.0, 0])) # agent height is already applied in audio_sensor_spec.position\n new_state.rotation = quat_from_angle_axis(math.radians(receiver.rotation), np.array([0, 1.0, 0])) # + -> left\n # new_state.rotation *= quat_from_angle_axis(math.radians(-30), np.array([1.0, 0, 0])) # + -> up\n new_state.sensor_states = {}\n agent.set_state(new_state, True)\n\n self.receiver = receiver # for reference\n\n return self\n\n def update_receiver_position(self,\n receiver_position: T.Tuple[float, float, float]\n ):\n \"\"\"\n Update receiver position\n \"\"\"\n\n self.receiver.position = receiver_position\n\n agent = self.sim.get_agent(self.aihabitat[\"default_agent\"])\n new_state = self.sim.get_agent(self.aihabitat[\"default_agent\"]).get_state()\n new_state.position = np.array(receiver_position + np.array([0, 0.0, 0])) # agent height is already applied in audio_sensor_spec.position\n new_state.sensor_states = {}\n agent.set_state(new_state, True)\n\n return self\n\n def create_source(self,\n source_name: str,\n source_id: int,\n position: T.Tuple[float, float, float] = None,\n rotation: float = None\n ):\n \"\"\"\n Set source given the source name, position, and rotation\n \"\"\"\n\n if position is None:\n # Randomly set source position in the room\n position = self.sim.pathfinder.get_random_navigable_point()\n rotation = random.uniform(0, 360) # only for mesh as source sound is omnidirectional\n\n # Randomly set source sound\n dry_sound, mesh = sample_dry_sound_and_mesh(source_name)\n\n # Set source\n source = Source(position, rotation, dry_sound, mesh, device=self.device)\n\n # Save source\n self.update_source(source, source_id)\n\n return self\n\n def update_source(self,\n source: Source,\n source_id: int = None\n ):\n \"\"\"\n Update source\n \"\"\"\n\n if source_id is not None:\n # update source list\n self.source_list[source_id] = source\n\n # Add mesh\n if self.add_source_mesh:\n ########## Add mesh (source.position, source.rotation) ##########\n obj_templates_mgr = self.sim.get_object_template_manager()\n rigid_obj_mgr = self.sim.get_rigid_object_manager()\n\n # Load the object template from the configuration file\n obj_templates_mgr.load_configs(str(os.path.join(\"data/objects\")))\n\n # Insert the object relative to the agent\n object_ids = []\n object_orientation = mn.Quaternion.rotation(mn.Deg(source.rotation), mn.Vector3.y_axis())\n object_template_handle = obj_templates_mgr.get_template_handles(f'data/objects/{source.mesh}')[0] # debug\n if source.mesh == 'male':\n scale = 0.5\n height_offset = 0.935\n elif source.mesh == 'female':\n scale = 1.0\n height_offset = 0.85\n elif source.mesh == 'guitar':\n scale = 1 / 1239.1628 * 2\n height_offset = 1.5\n object_orientation *= mn.Quaternion.rotation(mn.Deg(-90), mn.Vector3.x_axis())\n elif source.mesh == 'drum':\n scale = 1 / 1.8\n height_offset = 0.6\n elif source.mesh == 'classic_microphone':\n scale = 1 / 1.15\n height_offset = 0.67\n elif source.mesh == 'bluetooth_speaker':\n scale = 1 / 70\n height_offset = 1.0\n\n # Scale the object to fit the scene\n scaled_object_template = obj_templates_mgr.get_template_by_handle(object_template_handle)\n scaled_object_template.scale = np.array([scale, scale, scale])\n obj_templates_mgr.register_template(scaled_object_template, \"scaled\")\n object = rigid_obj_mgr.add_object_by_template_handle(\"scaled\")\n object.translation = np.array(source.position) + np.array([0, height_offset, 0])\n object.rotation = object_orientation\n\n object_ids.append(object.object_id)\n\n # rigid_obj_mgr.remove_all_objects()\n\n else:\n # update current source\n audio_sensor = self.sim.get_agent(self.aihabitat['default_agent'])._sensors['audio_sensor']\n audio_sensor.setAudioSourceTransform(source.position + np.array([0, self.aihabitat[\"sensor_height\"], 0])) # add 1.5m to the height calculation\n\n self.source_current = source # for reference\n\n return self\n\n def update_source_position(self,\n source_position\n ):\n \"\"\"\n Update Source position\n \"\"\"\n\n audio_sensor = self.sim.get_agent(self.aihabitat['default_agent'])._sensors['audio_sensor']\n audio_sensor.setAudioSourceTransform(source_position + np.array([0, self.aihabitat[\"sensor_height\"], 0])) # add 1.5m to the height calculation\n\n def render_ir(self,\n source_id: int\n ) -> torch.Tensor:\n \"\"\"\n Render IR given the source ID\n \"\"\"\n\n source = self.source_list[source_id]\n self.update_source(source)\n ir = torch.tensor(self.sim.get_sensor_observations()['audio_sensor'], device=self.device)\n\n return ir\n\n def render_ir_simple(self,\n source_position: T.Tuple[float, float, float],\n receiver_position: T.Tuple[float, float, float],\n ) -> torch.Tensor:\n \"\"\"\n Render IR given the source ID\n \"\"\"\n\n # source\n self.update_source_position(source_position)\n\n # receiver\n self.update_receiver_position(receiver_position)\n\n # render ir\n ir = torch.tensor(self.sim.get_sensor_observations()['audio_sensor'], device=self.device)\n\n return ir\n\n def render_ir_all(self) -> T.List[torch.Tensor]:\n \"\"\"\n Render IR for all sources\n \"\"\"\n\n ir_list = []\n for source_id in range(self.n_sources):\n print(f'Rendering IR {source_id}/{self.n_sources}...')\n ir = self.render_ir(source_id)\n ir_list.append(ir)\n\n return ir_list\n\n def render_image(self,\n is_instance=False\n ):\n \"\"\"\n Render image including rgb, depth, and semantic\n \"\"\"\n\n observation = self.sim.get_sensor_observations()\n rgb = observation[\"color_sensor\"]\n depth = observation[\"depth_sensor\"]\n\n # Semantic\n # semantic = sim.get_sensor_observations()[\"semantic_sensor\"]\n # is_valid = (depth != 0)\n # semantic[~is_valid] = semantic.max() + 1\n\n # if is_instance:\n # # Display instance id\n # aihabitat_utils.display_sample(rgb, semantic, depth, filename=f'{dir_results}/view/view_instance.png')\n # else:\n # # Display category id\n # category = aihabitat_utils.semantic_id_to_category_id(semantic, sim.semantic_scene.objects)\n # void_id = 0\n # category[~is_valid] = void_id\n # aihabitat_utils.display_sample(rgb, category, depth, filename=f'{dir_results}/view/view_category.png')\n\n return rgb, depth\n\n def render_envmap(self):\n \"\"\"\n Render environment map in *** format\n \"\"\"\n\n with suppress_stdout_and_stderr():\n angles = [0, 270, 180, 90]\n rgb_panorama = []\n depth_panorama = []\n\n for angle_offset in angles:\n angle = self.receiver.rotation + angle_offset\n agent = self.sim.get_agent(self.aihabitat[\"default_agent\"])\n new_state = self.sim.get_agent(self.aihabitat[\"default_agent\"]).get_state()\n new_state.rotation = quat_from_angle_axis(\n math.radians(angle), np.array([0, 1.0, 0])\n ) * quat_from_angle_axis(math.radians(0), np.array([1.0, 0, 0]))\n new_state.sensor_states = {}\n agent.set_state(new_state, True)\n\n observation = self.sim.get_sensor_observations()\n rgb_panorama.append(observation[\"color_sensor\"])\n depth_panorama.append((observation['depth_sensor']))\n envmap_rgb = np.concatenate(rgb_panorama, axis=1)\n envmap_depth = np.concatenate(depth_panorama, axis=1)\n\n # rotate receiver to original angle\n self.update_receiver(self.receiver)\n\n return envmap_rgb, envmap_depth\n\n def generate_xy_grid_points(self,\n grid_distance: float,\n height: float = None,\n filename_png: str = None,\n meters_per_pixel: float = 0.005\n ) -> torch.Tensor:\n \"\"\"\n Generate the 3D positions of grid points at the given height\n \"\"\"\n\n pathfinder = self.sim.pathfinder\n assert pathfinder.is_loaded\n # agent_height = pathfinder.nav_mesh_settings.agent_height # to be navigable, full body of the agent should be inside\n if height is None: # height of the agent foot\n height = 0\n # height = pathfinder.get_bounds()[0][1] # floor height\n\n # Sample grid\n bounds = pathfinder.get_bounds()\n x_points = torch.arange(bounds[0][0], bounds[1][0] + grid_distance, grid_distance)\n z_points = torch.arange(bounds[0][2], bounds[1][2] + grid_distance, grid_distance)\n x_grid, z_grid = torch.meshgrid(x_points, z_points)\n y_value = height * torch.ones_like(x_grid.reshape(-1))\n\n # Combine x, y, and z coordinates into a single tensor of points\n points = torch.stack([x_grid.reshape(-1), y_value.reshape(-1), z_grid.reshape(-1)], dim=-1)\n is_points_navigable = []\n for point in points:\n is_points_navigable.append(pathfinder.is_navigable(point)) # navigable points\n torch.tensor(is_points_navigable).sum()\n\n # Flatten the tensor of points into a list\n grid_points = points[is_points_navigable]\n\n # assert len(grid_points) > 0\n # save image\n if filename_png is not None:\n aihabitat_utils.save_town_map_grid(filename_png, pathfinder, grid_points, meters_per_pixel=meters_per_pixel)\n\n return grid_points\n\n def generate_data(self, use_dry_sound: bool = False):\n \"\"\"\n Generate all data including IR, envmap, audio, image\n \"\"\"\n\n # env map\n if self.include_visual_sensor:\n envmap_rgb, envmap_depth = self.render_image()\n else:\n envmap_rgb, envmap_depth = None, None\n\n # IR\n self.add_audio_sensor() # add audio_sensor after image rendering for faster image rendering\n ir_list = self.render_ir_all()\n # ir_total = sum_arrays_with_different_length(ir_list).detach().cpu()\n\n # audio_list\n dry_sound_list = []\n audio_list = []\n # audio_total = None\n if use_dry_sound:\n for source_id, source in enumerate(self.source_list):\n # load dry sound\n dry_sound = source.dry_sound\n if isinstance(dry_sound, str):\n dry_sound, sample_rate = torchaudio.load(dry_sound)\n self.dry_sound = dry_sound.to(self.device)\n self.sample_rate = sample_rate\n\n ir = ir_list[source_id]\n audio = torch.stack([audio_utils.fft_conv(dry_sound[0], ir_channel, is_cpu=True) for ir_channel in ir])\n dry_sound_list.append(dry_sound.detach().cpu())\n audio_list.append(audio.detach().cpu())\n\n # audio_total\n # audio_total = sum_arrays_with_different_length(audio_list)\n\n # cpu\n ir_list = [tensor.detach().cpu() for tensor in ir_list]\n\n # dirname = '.'\n # with open(f'{dirname}/debug.txt', 'w') as f:\n # f.write(f'NavMesh area: {self.sim.pathfinder.navigable_area}\\n')\n # f.write(f'NavMesh bounds: {self.sim.pathfinder.get_bounds()}\\n')\n # f.write(f'Receiver position: {self.receiver.position}\\n')\n # for s, source in enumerate(self.source_list):\n # f.write(f'Source {s} position: {source.position}\\n')\n # f.write(f'\\n')\n\n return dict(\n ir_list=ir_list,\n sample_rate=self.receiver.sample_rate,\n envmap=[envmap_rgb, envmap_depth],\n audio_list=audio_list,\n dry_sound_list=dry_sound_list,\n )"
}
] | import os
import json
import argparse
import itertools
import subprocess
import typing as T
import torch
import imageio
import torchaudio
import numpy as np
import matplotlib.pyplot as plt
from moviepy.editor import *
from nvas3d.utils.dynamic_utils import convolve_moving_receiver, setup_dynamic_interp
from nvas3d.utils.audio_utils import clip_two, clip_all
from soundspaces_nvas3d.utils.ss_utils import create_scene, render_rir_parallel
from soundspaces_nvas3d.utils.aihabitat_utils import load_room_grid
from soundspaces_nvas3d.soundspaces_nvas3d import Receiver, Source, Scene | 8,312 | #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2023 Apple Inc. All Rights Reserved.
#
def normalize(input: torch.Tensor) -> torch.Tensor:
output = (input - input.min()) / (input.max() - input.min())
output = 2 * output - 1
return output
def configure_scene_from_metadata(
metadata: T.Dict[str, T.Any],
image_size: T.Tuple[int, int] = (1000, 1000),
hfov: float = 90.0,
use_placeholder_mesh: bool = False
| #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2023 Apple Inc. All Rights Reserved.
#
def normalize(input: torch.Tensor) -> torch.Tensor:
output = (input - input.min()) / (input.max() - input.min())
output = 2 * output - 1
return output
def configure_scene_from_metadata(
metadata: T.Dict[str, T.Any],
image_size: T.Tuple[int, int] = (1000, 1000),
hfov: float = 90.0,
use_placeholder_mesh: bool = False | ) -> Scene: | 9 | 2023-10-19 05:35:54+00:00 | 12k |
tiejundong/FlexPose | FlexPose/utils/prediction.py | [
{
"identifier": "FlexPose",
"path": "FlexPose/model/layers.py",
"snippet": "class FlexPose(torch.nn.Module):\n def __init__(self, args=None, param_path=None):\n super(FlexPose, self).__init__()\n if args is not None:\n self.init_param(args)\n else:\n self.init_param_with_save(param_path)\n\n def forward(self, complex_graph, explicit_cycle=False, cycle_i=0, args=None, epoch=1e+5):\n # for pregen data\n if self.do_pregen_data:\n complex_graph = self.pretrain(complex_graph)\n return complex_graph\n\n # with explicit cycle\n # if explicit_cycle:\n # if cycle_i == 0:\n # complex_graph = self.pretrain(complex_graph)\n # complex_graph = self.init_embed(complex_graph)\n # complex_graph = self.run_cycle(complex_graph, cycle_i)\n # tup_pred = self.pred_label(complex_graph)\n # return tup_pred\n\n # first embed\n complex_graph = self.init_embed(complex_graph)\n\n # MC cycle\n complex_graph = self.run_cycling(complex_graph)\n\n # prediction\n tup_pred = self.pred_label(complex_graph)\n\n return tup_pred\n\n def run_cycling(self, complex_graph):\n # MC cycle\n if self.training:\n cycle_num = random.sample(range(1, self.n_cycle + 1), 1)[0]\n for cycle_i in range(cycle_num - 1):\n with torch.no_grad():\n complex_graph = self.run_single_cycle(complex_graph, cycle_i)\n if self.use_min and cycle_i > 0:\n complex_graph = self.energy_min(complex_graph)\n complex_graph = self.run_single_cycle(complex_graph, cycle_num - 1)\n else:\n for cycle_i in range(self.n_cycle):\n complex_graph = self.run_single_cycle(complex_graph, cycle_i)\n if self.use_min and cycle_i > 0 and cycle_i < self.n_cycle-1:\n complex_graph = self.energy_min(complex_graph)\n return complex_graph\n\n def init_param(self, args):\n self.args = args\n self.n_cycle = args.n_cycle\n self.use_pretrain = args.use_pretrain\n self.do_pregen_data = args.do_pregen_data # do pre-generation\n self.use_pregen_data = args.use_pregen_data # use pre-generated data\n self.add_l_dismap = args.add_l_dismap\n self.coor_scale = args.coor_scale\n\n # pretrained\n if self.use_pretrain:\n self.p_encoder = PocketEncoder(args)\n self.l_feat_encoder = LigandFeatEncoder(args)\n self.load_encoder(args)\n\n # decoder\n self.c_decoder = ComplexDecoder(args)\n\n # E min\n self.use_min = args.MMFF_min\n if self.use_min:\n self.coor_min_object = CoorMin(args)\n\n # embedding\n # extra embedding for encoder (pretrain) and decoder\n if args.use_pretrain:\n # ligand embed\n self.l_extra_embed = True if args.l_x_sca_hidden != args.c_x_sca_hidden else False\n if self.l_extra_embed:\n self.l_x_sca_embed = make_embed(args.l_x_sca_hidden, args.c_x_sca_hidden)\n if self.add_l_dismap:\n self.l_edge_sca_embed = make_embed(args.l_edge_sca_hidden + 1, args.c_edge_sca_hidden)\n else:\n self.l_edge_sca_embed = make_embed(args.l_edge_sca_hidden, args.c_edge_sca_hidden)\n self.l_x_vec_embed = VecExpansion(args.l_x_vec_indim, args.c_x_vec_hidden)\n self.l_edge_vec_embed = VecExpansion(args.l_edge_vec_indim, args.c_edge_vec_hidden)\n\n # pocekt embed\n self.p_extra_embed = True if args.p_x_sca_hidden != args.c_x_sca_hidden else False\n self.p_x_sca_embed = make_embed(args.p_x_sca_hidden + 12, args.c_x_sca_hidden) # explicit torsion\n if self.p_extra_embed:\n self.p_edge_sca_embed = make_embed(args.p_edge_sca_hidden, args.c_edge_sca_hidden)\n self.p_x_vec_embed = VNL(args.p_x_vec_hidden, args.c_x_vec_hidden, leaky_relu=False)\n self.p_edge_vec_embed = VNL(args.p_edge_vec_hidden, args.c_edge_vec_hidden, leaky_relu=False)\n else:\n # ligand embed\n self.l_x_sca_embed = make_embed(args.l_x_sca_indim + 1, args.c_x_sca_hidden)\n if self.add_l_dismap:\n self.l_edge_sca_embed = make_embed(args.l_edge_sca_indim + 1 + 1, args.c_edge_sca_hidden)\n else:\n self.l_edge_sca_embed = make_embed(args.l_edge_sca_indim + 1, args.c_edge_sca_hidden)\n self.l_x_vec_embed = VecExpansion(args.l_x_vec_indim, args.c_x_vec_hidden)\n self.l_edge_vec_embed = VecExpansion(args.l_edge_vec_indim, args.c_edge_vec_hidden)\n\n # pocekt embed\n self.p_x_sca_embed = make_embed(args.p_x_sca_indim + 12 + 1, args.c_x_sca_hidden) # explicit torsion input\n self.p_edge_sca_embed = make_embed(args.p_edge_sca_indim, args.c_edge_sca_hidden)\n self.p_x_vec_embed = VNL(args.p_x_vec_indim, args.c_x_vec_hidden, leaky_relu=False)\n self.p_edge_vec_embed = VNL(args.p_edge_vec_indim, args.c_edge_vec_hidden, leaky_relu=False)\n\n\n # cycle\n self.x_gate = GVGateResidue(args.c_x_sca_hidden, args.c_x_vec_hidden, full_gate=True)\n self.edge_gate = GVGateResidue(args.c_edge_sca_hidden, args.c_edge_vec_hidden, full_gate=True)\n\n # for additional tasks\n self.pred_CB_layer = torch.nn.Sequential(\n GVP(args.c_x_sca_hidden, args.c_x_vec_hidden, args.c_x_sca_hidden, args.c_x_vec_hidden),\n GVL(args.c_x_sca_hidden, args.c_x_vec_hidden, args.c_x_sca_hidden, 1)\n )\n self.pred_tor_layer = torch.nn.Sequential(\n torch.nn.Linear(args.c_x_sca_hidden + args.c_x_vec_hidden,\n args.c_x_sca_hidden + args.c_x_vec_hidden),\n torch.nn.LeakyReLU(),\n torch.nn.Linear(args.c_x_sca_hidden + args.c_x_vec_hidden, 8),\n )\n self.pred_aff_layer = torch.nn.Sequential(\n torch.nn.Linear(args.c_x_sca_hidden + args.c_x_vec_hidden + args.c_edge_sca_hidden + args.c_edge_vec_hidden,\n args.c_x_sca_hidden + args.c_x_vec_hidden + args.c_edge_sca_hidden + args.c_edge_vec_hidden),\n torch.nn.LeakyReLU(),\n torch.nn.Linear(args.c_x_sca_hidden + args.c_x_vec_hidden + args.c_edge_sca_hidden + args.c_edge_vec_hidden, 1)\n )\n\n def init_param_with_save(self, param_path):\n if isinstance(param_path, str) and os.path.isfile(param_path):\n chk = torch.load(param_path, map_location='cpu')\n else:\n chk = load_FlexPose(param_path)\n self.init_param(chk['args'])\n self.load_state_dict(chk['model_state_dict'], strict=True)\n del chk\n\n def pred_label(self, complex_graph):\n # ligand coor\n l_coor_pred = rearrange(complex_graph.coor_hidden, 'b n h c -> (b n) h c')[complex_graph.ligand_node_loc_in_complex_flat]\n\n # CA\n CA_pred = rearrange(complex_graph.coor_hidden, 'b n h c -> (b n) h c')[complex_graph.p_partial_select_mask]\n\n # CB\n CB_pred = rearrange(complex_graph.coor + self.pred_CB_layer(complex_graph.x_sca_vec)[1].squeeze(-2),\n 'b n c -> (b n) c')[complex_graph.p_partial_select_mask]\n\n # aff\n x_sca_vec_cat = torch.cat([complex_graph.x_sca, complex_graph.x_vec.norm(p=2, dim=-1)], dim=-1)\n x_pooling = (x_sca_vec_cat * complex_graph.x_mask.float().unsqueeze(-1)).sum(dim=-2) / complex_graph.x_mask.float().sum(dim=-1, keepdims=True)\n edge_sca_vec_cat = torch.cat([complex_graph.edge_sca, complex_graph.edge_vec.norm(p=2, dim=-1)], dim=-1)\n edge_pooling = torch.einsum('b i j d -> b d', edge_sca_vec_cat * complex_graph.edge_mask.float().unsqueeze(-1)) / \\\n torch.einsum('b i j -> b', complex_graph.edge_mask.float()).unsqueeze(-1)\n x_edge_pooling = torch.cat([x_pooling, edge_pooling], dim=-1)\n aff_pred = self.pred_aff_layer(x_edge_pooling).squeeze(dim=-1)\n if not self.training:\n aff_pred = F.relu(aff_pred)\n\n # tor\n x_sca_vec_cat = rearrange(x_sca_vec_cat, 'b n d -> (b n) d')[complex_graph.p_partial_select_mask]\n # x_sca_vec_cat = torch.cat([x_sca_vec_cat, complex_graph.sc_in_partial_select], dim=-1)\n SC_pred = rearrange(self.pred_tor_layer(x_sca_vec_cat), '... (m s) -> ... m s', s=2)\n # if not self.training:\n # SC_pred = SC_pred.clamp(min=-1, max=1)\n\n return (l_coor_pred, CA_pred, CB_pred, aff_pred, SC_pred)\n\n @torch.no_grad()\n def infer(self, complex_graph):\n complex_graph = self.init_embed(complex_graph)\n complex_graph = self.run_cycling(complex_graph)\n\n # ligand coor\n coor_pred = complex_graph.coor_hidden\n\n CB_pred = complex_graph.coor + self.pred_CB_layer(complex_graph.x_sca_vec)[1].squeeze(-2)\n\n # aff\n x_sca_vec_cat = torch.cat([complex_graph.x_sca, complex_graph.x_vec.norm(p=2, dim=-1)], dim=-1)\n x_pooling = (x_sca_vec_cat * complex_graph.x_mask.float().unsqueeze(-1)).sum(\n dim=-2) / complex_graph.x_mask.float().sum(dim=-1, keepdims=True)\n edge_sca_vec_cat = torch.cat([complex_graph.edge_sca, complex_graph.edge_vec.norm(p=2, dim=-1)], dim=-1)\n edge_pooling = torch.einsum('b i j d -> b d',\n edge_sca_vec_cat * complex_graph.edge_mask.float().unsqueeze(-1)) / \\\n torch.einsum('b i j -> b', complex_graph.edge_mask.float()).unsqueeze(-1)\n x_edge_pooling = torch.cat([x_pooling, edge_pooling], dim=-1)\n aff_pred = self.pred_aff_layer(x_edge_pooling).squeeze(dim=-1)\n aff_pred = F.relu(aff_pred)\n\n # tor\n SC_pred = rearrange(self.pred_tor_layer(x_sca_vec_cat), '... (m s) -> ... m s', s=2)\n\n return (coor_pred, CB_pred, aff_pred, SC_pred)\n\n def load_encoder(self, args):\n if 'pretrain_protein_encoder' in args.__dict__.keys():\n if isinstance(args.pretrain_protein_encoder, str) and os.path.isfile(args.pretrain_protein_encoder):\n print('Loading pre-trained protein encoder ...')\n p_param = torch.load(args.pretrain_protein_encoder, map_location='cpu')\n else:\n p_param = load_pretrained_protein_encoder(args.pretrain_protein_encoder)\n self.p_encoder.load_state_dict(p_param['model_state_dict'], strict=True)\n del p_param\n else:\n pass\n # print('Skip loading pre-trained protein encoder parameters')\n\n if 'pretrain_ligand_encoder' in args.__dict__.keys():\n if isinstance(args.pretrain_ligand_encoder, str) and os.path.isfile(args.pretrain_ligand_encoder):\n print('Loading pre-trained ligand encoder ...')\n l_param = torch.load(args.pretrain_ligand_encoder, map_location='cpu')\n else:\n l_param = load_pretrained_ligand_encoder(args.pretrain_ligand_encoder)\n self.l_feat_encoder.load_state_dict(l_param['model_state_dict'], strict=True)\n del l_param\n else:\n pass\n # print('Skip loading pre-trained ligand encoder parameters')\n\n def pretrain(self, complex_graph):\n # pretrain\n cur_state = self.training\n self.train(False)\n with torch.no_grad():\n complex_graph = self.p_encoder(complex_graph)\n complex_graph = self.l_feat_encoder(complex_graph)\n self.train(cur_state)\n\n complex_graph.p_x_sca_vec_pretrained = complex_graph.p_x_sca_vec\n complex_graph.p_edge_sca_vec_pretrained = complex_graph.p_edge_sca_vec\n complex_graph.l_x_sca_pretrained = complex_graph.l_x_sca\n complex_graph.l_edge_sca_pretrained = complex_graph.l_edge_sca\n return complex_graph\n\n def init_embed(self, complex_graph):\n if self.use_pretrain:\n if not self.use_pregen_data:\n complex_graph = self.pretrain(complex_graph)\n\n # pocket\n p_x_sca, p_x_vec = complex_graph.p_x_sca_vec_pretrained\n p_edge_sca, p_edge_vec = complex_graph.p_edge_sca_vec_pretrained\n p_x_sca = torch.cat([p_x_sca, complex_graph.sc_in], dim=-1) # explicit torsion input\n p_x_sca = self.p_x_sca_embed(p_x_sca)\n if self.p_extra_embed:\n p_edge_sca = self.p_edge_sca_embed(p_edge_sca)\n p_x_vec = self.p_x_vec_embed(p_x_vec)\n p_edge_vec = self.p_edge_vec_embed(p_edge_vec)\n\n # ligand\n l_x_sca = complex_graph.l_x_sca_pretrained\n l_edge_sca = complex_graph.l_edge_sca_pretrained\n if self.l_extra_embed:\n l_x_sca = self.l_x_sca_embed(l_x_sca)\n if self.add_l_dismap:\n l_edge_sca = self.l_edge_sca_embed(torch.cat([l_edge_sca, complex_graph.l_dismap.unsqueeze(-1)], dim=-1))\n else:\n l_edge_sca = self.l_edge_sca_embed(l_edge_sca)\n l_x_vec = self.l_x_vec_embed(complex_graph.l_x_vec_init)\n l_edge_vec = self.l_edge_vec_embed(complex_graph.l_edge_vec_init)\n else:\n # pocket\n # explicit torsion input\n p_x_sca = torch.cat([complex_graph.p_x_sca_init, complex_graph.sc_in], dim=-1)\n p_x_sca = self.p_x_sca_embed(p_x_sca)\n p_edge_sca = self.p_edge_sca_embed(complex_graph.p_edge_sca_init)\n p_x_vec = self.p_x_vec_embed(complex_graph.p_x_vec_init)\n p_edge_vec = self.p_edge_vec_embed(complex_graph.p_edge_vec_init)\n\n # ligand\n l_x_sca = self.l_x_sca_embed(complex_graph.l_x_sca_init)\n if self.add_l_dismap:\n l_edge_sca = self.l_edge_sca_embed(torch.cat([complex_graph.l_edge_sca_init, complex_graph.l_dismap.unsqueeze(-1)], dim=-1))\n else:\n l_edge_sca = self.l_edge_sca_embed(complex_graph.l_edge_sca_init)\n l_x_vec = self.l_x_vec_embed(complex_graph.l_x_vec_init)\n l_edge_vec = self.l_edge_vec_embed(complex_graph.l_edge_vec_init)\n\n # merge\n complex_graph.x_sca_init = torch.cat([p_x_sca, l_x_sca], dim=1)\n complex_graph.x_vec_init = torch.cat([p_x_vec, l_x_vec], dim=1)\n complex_graph.x_sca_vec_init = (complex_graph.x_sca_init, complex_graph.x_vec_init)\n complex_graph.x_mask = torch.cat([complex_graph.p_x_mask, complex_graph.l_x_mask], dim=1)\n complex_graph.edge_sca_init = self.cat_edge(p_edge_sca, l_edge_sca)\n complex_graph.edge_vec_init = self.cat_edge(p_edge_vec, l_edge_vec)\n complex_graph.edge_sca_vec_init = (complex_graph.edge_sca_init, complex_graph.edge_vec_init)\n complex_graph.edge_mask = self.cat_edge(complex_graph.p_edge_mask, complex_graph.l_edge_mask)\n complex_graph.coor_init = torch.cat([complex_graph.p_coor_init, complex_graph.l_coor_init], dim=1)\n\n return complex_graph\n\n def run_single_cycle(self, complex_graph, cycle_i=0):\n if cycle_i == 0:\n complex_graph.x_sca_vec = complex_graph.x_sca_vec_init\n complex_graph.edge_sca_vec = complex_graph.edge_sca_vec_init\n complex_graph.coor = complex_graph.coor_init\n else:\n complex_graph.x_sca_vec = self.x_gate(complex_graph.x_sca_vec, complex_graph.x_sca_vec_init)\n complex_graph.edge_sca_vec = self.edge_gate(complex_graph.edge_sca_vec, complex_graph.edge_sca_vec_init)\n complex_graph = self.c_decoder(complex_graph)\n complex_graph.x_sca, complex_graph.x_vec = complex_graph.x_sca_vec\n complex_graph.edge_sca, complex_graph.edge_vec = complex_graph.edge_sca_vec\n return complex_graph\n\n def energy_min(self, complex_graph, loop=None, constraint=None, show_state=False, min_type='GD'):\n if self.use_min:\n coor_flat = rearrange(complex_graph.coor, 'b n c -> (b n) c')\n l_coor_pred = coor_flat[complex_graph.ligand_node_loc_in_complex_flat]\n l_coor_min = self.coor_min_object(l_coor_pred * self.coor_scale, complex_graph,\n loop=loop, constraint=constraint,\n show_state=show_state, min_type=min_type)\n coor_flat[complex_graph.ligand_node_loc_in_complex_flat] = l_coor_min / self.coor_scale\n complex_graph.coor = rearrange(coor_flat, '(b n) c -> b n c', b=complex_graph.coor.size(0))\n return complex_graph\n\n def cat_edge(self, edge_1, edge_2):\n d_1 = edge_1.size(1)\n d_2 = edge_2.size(1)\n if len(edge_1.size()) == 3:\n edge_1_pad = (0, d_2)\n edge_2_pad = (d_1, 0)\n elif len(edge_1.size()) == 4:\n edge_1_pad = (0, 0, 0, d_2)\n edge_2_pad = (0, 0, d_1, 0)\n elif len(edge_1.size()) == 5:\n edge_1_pad = (0, 0, 0, 0, 0, d_2)\n edge_2_pad = (0, 0, 0, 0, d_1, 0)\n else:\n assert len(edge_1.size()) in [3, 4, 5]\n edge_1 = F.pad(edge_1, edge_1_pad, 'constant', 0)\n edge_2 = F.pad(edge_2, edge_2_pad, 'constant', 0)\n edge = torch.cat([edge_1, edge_2], dim=1)\n return edge"
},
{
"identifier": "try_prepare_task",
"path": "FlexPose/preprocess/prepare_for_training.py",
"snippet": "def try_prepare_task(intup):\n f, task = intup\n try:\n f(task)\n return True\n except:\n return False"
},
{
"identifier": "pred_ens",
"path": "FlexPose/utils/APOPDBbind_data.py",
"snippet": "@torch.no_grad()\ndef pred_ens(coor_pred, dic_data, return_raw=False):\n # coor_pred: [ens, n_atom, 3]\n ens = coor_pred.shape[0]\n l_match = dic_data.l_match.reshape(ens, -1)[0]\n\n if ens > 1:\n ens_pred = coor_pred[0]\n first_pred = coor_pred[0]\n\n rest_pred = coor_pred[1:]\n\n rmsd_match_ens, tmp_pred, n_match = calc_rmsd(rest_pred,\n repeat(first_pred, 'n c -> e n c', e=rest_pred.size(0)),\n match=l_match) # return [match, ens]\n min_index = rmsd_match_ens.min(dim=0, keepdims=True)[1]\n rest_ens_matched_pred = torch.gather(tmp_pred, dim=0,\n index=repeat(min_index, 'm e -> m n e c', n=rest_pred.size(1),\n c=3)).squeeze(0) # to [n_atom, ens-1, 3]\n ens_pred = torch.cat([first_pred.unsqueeze(1), rest_ens_matched_pred], dim=1)\n if return_raw:\n return ens_pred\n else:\n ens_pred = ens_pred.mean(dim=1)\n else:\n ens_pred = coor_pred[0]\n\n return ens_pred"
},
{
"identifier": "MMFF_keys",
"path": "FlexPose/model/MMFF.py",
"snippet": "def pad_zero_param(index, param, index_dim, param_dim):\ndef get_BS_param(mol, props, dic_MMFF_param, dic_tmp):\ndef get_angle_by_bond(bond_list):\ndef get_AB_param(mol, props, dic_MMFF_param, dic_tmp):\ndef filter_SB(mol, props, SB_index):\ndef get_SB_param(mol, props, dic_MMFF_param, dic_tmp):\ndef get_dict_bond(bond_list):\ndef get_oop(mol, props, dic_bond, angle_list):\ndef get_OOP_param(mol, props, dic_MMFF_param, dic_tmp):\ndef get_torsion(mol, props, dic_bond, angle_list):\ndef get_TOR_param(mol, props, dic_MMFF_param, dic_tmp):\ndef get_14(mol):\ndef get_noncov_pair(mol, bond_list, angle_list, pair_14_list):\ndef get_VDW_param(mol, props, dic_MMFF_param, dic_tmp, select_pair_index=None):\ndef get_ELE_param(mol, props, dic_MMFF_param, dic_tmp):\ndef add_batch_info(dic_MMFF_param):\ndef get_MMFF_param(mol, props=None, strict=False):\ndef get_MMFF_param_for_complex(protein_mol, ligand_mol):\n def __init__(self, split_interact=False, warm=False):\n def __call__(self, coor, MMFF_param, return_sum=True):\n def get_BS(self, coor, BS_index, BS_param, BS_batch):\n def get_AB(self, coor, AB_index, AB_param, AB_batch):\n def get_SB(self, coor, SB_index, SB_param, SB_batch):\n def get_OOP(self, coor, OOP_index, OOP_param, OOP_batch):\n def get_TOR(self, coor, TOR_index, TOR_param, TOR_batch):\n def get_VDW(self, coor, VDW_index, VDW_param, VDW_batch):\n def get_ELE(self, coor, ELE_index, ELE_param, ELE_batch):\n def get_angle(self, coor, angle_index):\n def get_oop(self, coor, oop_index):\n def get_dihedral_angle(self, coor, dihedral_index):\n def get_cross(self, v1, v2):\n def split_to_single_dim(self, x):\n A = np.array([143.9325*kb/2 for kb, r0 in BS_param])\n B = np.array([1 for kb, r0 in BS_param])\n C = np.array([-2 for kb, r0 in BS_param])\n D = np.array([7/12*(-2)**2 for kb, r0 in BS_param])\n A = np.array([0.043844*ka/2 for lin, ka, theta0 in AB_param])\n B = np.array([1 for lin, ka, theta0 in AB_param])\n C = np.array([-0.006981317 for lin, ka, theta0 in AB_param])\n D = np.array([143.9325*ka for lin, ka, theta0 in AB_param])\n E = np.array([143.9325*ka for lin, ka, theta0 in AB_param])\n A = np.array([2.51210*kbaIJK for kbaIJK, kbaKJI, r0ij, r0kj, theta0 in SB_param])\n B = np.array([2.51210*kbaKJI for kbaIJK, kbaKJI, r0ij, r0kj, theta0 in SB_param])\n A = np.array([0.043844*koop/2 for koop in OOP_param])\n A = np.array([0.5*(V1+V2+V3) for V1, V2, V3 in TOR_param])\n B = np.array([0.5*V1 for V1, V2, V3 in TOR_param])\n C = np.array([0.5*(-V2) for V1, V2, V3 in TOR_param])\n D = np.array([0.5*V3 for V1, V2, V3 in TOR_param])\n A = np.array([epsilon*(1.07*R_ij_star)**7 for R_ij_star, epsilon in VDW_param])\n B = np.array([0.07*R_ij_star for R_ij_star, epsilon in VDW_param])\n C = np.array([1.12*R_ij_star**7 for R_ij_star, epsilon in VDW_param])\n D = np.array([0.12*R_ij_star**7 for R_ij_star, epsilon in VDW_param])\n E = np.array([-2 for R_ij_star, epsilon in VDW_param])\n A = np.array([332.07169*flag_14*qi*qj for flag_14, qi, qj in ELE_param])\n B = np.array([0.05 for flag_14, qi, qj in ELE_param])\n A = OOP_param\n A, B, C, D = TOR_param.split(1, dim=-1)\n A, B, C, D, E = VDW_param.split(1, dim=-1)\n A, B = ELE_param.split(1, dim=-1)\nclass MMFFLoss():"
}
] | import os
import shutil
import sys
import argparse
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import pyrosetta
import pyrosetta
from biopandas.pdb import PandasPdb
from collections import defaultdict
from ray.util.multiprocessing import Pool
from rdkit import Chem
from rdkit.Chem import AllChem, Descriptors
from einops import rearrange, repeat
from torch_scatter import scatter_min, scatter_add
from FlexPose.model.layers import FlexPose
from FlexPose.utils.common import *
from FlexPose.preprocess.prepare_for_training import try_prepare_task
from FlexPose.utils.APOPDBbind_data import pred_ens
from FlexPose.utils.pdbbind_preprocess import *
from FlexPose.utils.data_utils import *
from FlexPose.model.MMFF import MMFF_keys, MMFF_pad_dim, get_MMFF_param
from tqdm.notebook import tqdm, trange
from tqdm import tqdm, trange
from modeller import Environ
from modeller.scripts import complete_pdb | 8,748 | opts = '-mute true -ignore_unrecognized_res true'
pyrosetta.distributed.init(opts)
pose = pyrosetta.io.pose_from_pdb(fixed_protein_path)
dic_tor = get_torsion_from_pose(pose)
ref_mol = read_rdkit_mol(ref_path, silence=True)
ref_coor = get_true_posi(ref_mol)
biodf_protein = PandasPdb().read_pdb(fixed_protein_path)
df_protein = biodf_protein.df['ATOM']
df_protein['chain_resi'] = df_protein['chain_id'].astype(str) + '_' + df_protein['residue_number'].astype(str)
df_pocket, sele_res = get_pocket(df_protein, ref_coor, max_len_protein=max_len_pocket)
SCtorsion_data = get_torsion(dic_tor, df_protein, df_pocket)
protein_data = encode_pocket(df_pocket) + [SCtorsion_data]
assert protein_data[0].shape[0] == SCtorsion_data[0].shape[0]
assert protein_data[0].shape[0] <= max_len_pocket, 'pocket residues need less than 150'
# os.remove(fixed_protein_path)
dic_data = dict(
ligand_data=ligand_data,
protein_data=protein_data,
protein_path=fixed_protein_path,
ligand_path=l_path,
sele_res=sele_res,
dic_MMFF_param=dic_MMFF_param,
)
pickle.dump(dic_data, open(cache_path + '/{}.pkl'.format(idx), 'wb'))
return True
def preprare_input_data(input_list, cache_path, prepare_data_with_multi_cpu):
delmkdir(cache_path)
tasks = []
for idx, f_name_list in enumerate(input_list):
tasks.append((prepare_single_input, (f_name_list, idx, cache_path)))
fail = 0
if prepare_data_with_multi_cpu:
pool = Pool()
print('Preparing input data...')
for r in pool.map(try_prepare_task, tasks):
if not r:
fail += 1
else:
for task in tqdm(tasks, desc='Preparing input data'):
r = try_prepare_task(task)
if not r:
fail += 1
print(f'Prepared data: {len(tasks) - fail}/{len(tasks)}, {(len(tasks) - fail) / len(tasks) * 100:.2f}%')
def read_input(protein, ligand, ref_pocket_center, batch_csv):
if batch_csv is not None:
df_input = pd.read_csv(batch_csv)
protein_list = df_input['protein'].values
ligand_list = df_input['ligand'].values
ref_pocket_center_list = df_input['ref_pocket_center'].values
else:
assert protein is not None and ligand is not None and ref_pocket_center is not None
if not isinstance(protein, list):
protein_list = [protein]
else:
protein_list = protein
if not isinstance(ligand, list):
ligand_list = [ligand]
else:
ligand_list = ligand
if not isinstance(ref_pocket_center, list):
ref_pocket_center_list = [ref_pocket_center]
else:
ref_pocket_center_list = ref_pocket_center
input_list = [(i, j, k) for i, j, k in zip(protein_list, ligand_list, ref_pocket_center_list)]
return input_list
class InferDataset(torch.utils.data.Dataset):
def __init__(self, args, cache_path, ens=1):
self.data_path = cache_path
self.data_list = [i.split('.')[0] for i in os.listdir(cache_path) if i.endswith('.pkl')]
self.ens = ens
self.coor_scale = args.coor_scale
self.max_len_pocket = args.max_len_pocket
self.max_len_ligand = args.max_len_ligand
self.l_init_sigma = args.l_init_sigma / self.coor_scale
def __getitem__(self, i):
if self.ens > 1:
complex_graph = []
for e in range(self.ens):
complex_graph.append(self.get_complex(self.data_list[i]))
complex_graph = collate_input(complex_graph) # use collate_dummy in loader
else:
complex_graph = self.get_complex(self.data_list[i])
return complex_graph
def get_complex(self, idx):
# =============== get dict data ===============
dic_data = pickle.load(open(f'{self.data_path}/{idx}.pkl', 'rb'))
ligand_data = dic_data['ligand_data']
protein_data = dic_data['protein_data']
protein_path = dic_data['protein_path']
ligand_path = dic_data['ligand_path']
sele_res = dic_data['sele_res']
dic_MMFF_param = dic_data['dic_MMFF_param']
# =============== ligand ===============
l_x_sca_init, l_edge_sca_init, l_coor_ref, l_match, l_dismap = ligand_data
l_match = l_match.reshape(-1)
n_match = len(l_match) // len(l_x_sca_init)
l_nomatch = repeat(torch.arange(0, len(l_x_sca_init)), 'm -> (n m)', n=n_match)
# get ligand MMFF (if exists)
if dic_MMFF_param is not None:
| sys.path.append('/'.join(os.path.abspath(__file__).split('/')[:-2]))
opts = '-mute true -ignore_unrecognized_res true'
pyrosetta.distributed.init(opts)
if is_notebook():
else:
def set_device(device):
if device == 'cpu':
torch.set_num_threads(16)
else:
torch.cuda.set_device(device)
def get_torsion_from_pose(pose):
bb_torsion = []
sc_torsion = []
for i in range(1, pose.size() + 1):
try:
res = pose.residue(i)
assert res.name3() in ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE',
'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL']
phi_psi = [pose.phi(i), pose.psi(i)]
chi = [c for c in res.chi()]
bb_torsion.append(phi_psi)
sc_torsion.append(chi)
except:
bb_torsion.append([None])
sc_torsion.append([None])
return {'bb_torsion': bb_torsion, 'sc_torsion': sc_torsion}
def prepare_single_input(tupin):
f_name_list, idx, cache_path = tupin
p_path, l_path, ref_path = f_name_list
max_len_ligand = 150
max_len_pocket = 150
# =========== ligand encoding ===========
ligand_mol = read_rdkit_mol(l_path)
if l_path.endswith('mol2'):
ligand_template = ligand_mol
else:
mol2 = '.'.join(l_path.split('.')[:-1]) + '.mol2'
if os.path.exists(mol2):
try:
ligand_template = Chem.MolFromMol2File(mol2)
ligand_mol = AllChem.AssignBondOrdersFromTemplate(ligand_template, ligand_mol)
print(f'Found mol2 {mol2} as input.')
except:
ligand_template = ligand_mol
else:
ligand_template = ligand_mol
if ligand_mol.GetNumConformers() == 0:
AllChem.EmbedMolecule(ligand_mol, maxAttempts=10, useRandomCoords=True, clearConfs=False)
ff = Chem.rdForceFieldHelpers.MMFFGetMoleculeForceField(
ligand_mol, Chem.rdForceFieldHelpers.MMFFGetMoleculeProperties(ligand_mol))
for atom_i in range(ligand_mol.GetNumAtoms()):
ff.MMFFAddPositionConstraint(atom_i, 1, 100) # maxDispl: maximum displacement
ff.Minimize(maxIts=20)
try:
dic_MMFF_param = get_MMFF_param(ligand_template)
except:
dic_MMFF_param = None
ligand_node_features = get_node_feature(ligand_template, 'ligand')
ligand_edge, ligand_edge_features = get_ligand_edge_feature(ligand_template)
ligand_match = get_ligand_match(ligand_template)
ligand_dismap = get_ligand_unrotable_distance(ligand_template) # not use in our model
ligand_coor_true = get_true_posi(ligand_mol)
ligand_coor_true = ligand_coor_true[get_ligand_match(ligand_mol, ligand_template)[0]]
ligand_data = [ligand_node_features, ligand_edge_features, ligand_coor_true, ligand_match, ligand_dismap]
assert len(ligand_node_features) <= max_len_ligand, 'ligand atoms need less than 150'
# =========== protein encoding ===========
# load modeller again for ray
with suppress_stdout_stderr():
env_ = Environ()
env_.libs.topology.read(file='$(LIB)/top_heav.lib')
env_.libs.parameters.read(file='$(LIB)/par.lib')
fixed_protein_path = cache_path + f'/{idx}_protein_tmp.pdb'
pdb_m = complete_pdb(env_, p_path)
pdb_m.write(fixed_protein_path)
opts = '-mute true -ignore_unrecognized_res true'
pyrosetta.distributed.init(opts)
pose = pyrosetta.io.pose_from_pdb(fixed_protein_path)
dic_tor = get_torsion_from_pose(pose)
ref_mol = read_rdkit_mol(ref_path, silence=True)
ref_coor = get_true_posi(ref_mol)
biodf_protein = PandasPdb().read_pdb(fixed_protein_path)
df_protein = biodf_protein.df['ATOM']
df_protein['chain_resi'] = df_protein['chain_id'].astype(str) + '_' + df_protein['residue_number'].astype(str)
df_pocket, sele_res = get_pocket(df_protein, ref_coor, max_len_protein=max_len_pocket)
SCtorsion_data = get_torsion(dic_tor, df_protein, df_pocket)
protein_data = encode_pocket(df_pocket) + [SCtorsion_data]
assert protein_data[0].shape[0] == SCtorsion_data[0].shape[0]
assert protein_data[0].shape[0] <= max_len_pocket, 'pocket residues need less than 150'
# os.remove(fixed_protein_path)
dic_data = dict(
ligand_data=ligand_data,
protein_data=protein_data,
protein_path=fixed_protein_path,
ligand_path=l_path,
sele_res=sele_res,
dic_MMFF_param=dic_MMFF_param,
)
pickle.dump(dic_data, open(cache_path + '/{}.pkl'.format(idx), 'wb'))
return True
def preprare_input_data(input_list, cache_path, prepare_data_with_multi_cpu):
delmkdir(cache_path)
tasks = []
for idx, f_name_list in enumerate(input_list):
tasks.append((prepare_single_input, (f_name_list, idx, cache_path)))
fail = 0
if prepare_data_with_multi_cpu:
pool = Pool()
print('Preparing input data...')
for r in pool.map(try_prepare_task, tasks):
if not r:
fail += 1
else:
for task in tqdm(tasks, desc='Preparing input data'):
r = try_prepare_task(task)
if not r:
fail += 1
print(f'Prepared data: {len(tasks) - fail}/{len(tasks)}, {(len(tasks) - fail) / len(tasks) * 100:.2f}%')
def read_input(protein, ligand, ref_pocket_center, batch_csv):
if batch_csv is not None:
df_input = pd.read_csv(batch_csv)
protein_list = df_input['protein'].values
ligand_list = df_input['ligand'].values
ref_pocket_center_list = df_input['ref_pocket_center'].values
else:
assert protein is not None and ligand is not None and ref_pocket_center is not None
if not isinstance(protein, list):
protein_list = [protein]
else:
protein_list = protein
if not isinstance(ligand, list):
ligand_list = [ligand]
else:
ligand_list = ligand
if not isinstance(ref_pocket_center, list):
ref_pocket_center_list = [ref_pocket_center]
else:
ref_pocket_center_list = ref_pocket_center
input_list = [(i, j, k) for i, j, k in zip(protein_list, ligand_list, ref_pocket_center_list)]
return input_list
class InferDataset(torch.utils.data.Dataset):
def __init__(self, args, cache_path, ens=1):
self.data_path = cache_path
self.data_list = [i.split('.')[0] for i in os.listdir(cache_path) if i.endswith('.pkl')]
self.ens = ens
self.coor_scale = args.coor_scale
self.max_len_pocket = args.max_len_pocket
self.max_len_ligand = args.max_len_ligand
self.l_init_sigma = args.l_init_sigma / self.coor_scale
def __getitem__(self, i):
if self.ens > 1:
complex_graph = []
for e in range(self.ens):
complex_graph.append(self.get_complex(self.data_list[i]))
complex_graph = collate_input(complex_graph) # use collate_dummy in loader
else:
complex_graph = self.get_complex(self.data_list[i])
return complex_graph
def get_complex(self, idx):
# =============== get dict data ===============
dic_data = pickle.load(open(f'{self.data_path}/{idx}.pkl', 'rb'))
ligand_data = dic_data['ligand_data']
protein_data = dic_data['protein_data']
protein_path = dic_data['protein_path']
ligand_path = dic_data['ligand_path']
sele_res = dic_data['sele_res']
dic_MMFF_param = dic_data['dic_MMFF_param']
# =============== ligand ===============
l_x_sca_init, l_edge_sca_init, l_coor_ref, l_match, l_dismap = ligand_data
l_match = l_match.reshape(-1)
n_match = len(l_match) // len(l_x_sca_init)
l_nomatch = repeat(torch.arange(0, len(l_x_sca_init)), 'm -> (n m)', n=n_match)
# get ligand MMFF (if exists)
if dic_MMFF_param is not None: | dic_MMFF_param = self.repad_MMFFparam(dic_MMFF_param, MMFF_keys, MMFF_pad_dim) | 3 | 2023-10-19 22:03:51+00:00 | 12k |
openvpi/SingingVocoders | training/univnet.py | [
{
"identifier": "UnivNet",
"path": "models/univnet/univnet.py",
"snippet": "class UnivNet(torch.nn.Module):\n \"\"\"Parallel WaveGAN Generator module.\"\"\"\n\n def __init__(self, h, use_weight_norm=True):\n\n super().__init__()\n\n in_channels = h['model_args']['cond_in_channels']\n out_channels = h['model_args']['out_channels']\n inner_channels = h['model_args']['cg_channels']\n cond_channels = h['audio_num_mel_bins']\n upsample_ratios = h['model_args']['upsample_rates']\n lvc_layers_each_block = h['model_args']['num_lvc_blocks']\n lvc_kernel_size = h['model_args']['lvc_kernels']\n kpnet_hidden_channels = h['model_args']['lvc_hidden_channels']\n kpnet_conv_size = h['model_args']['lvc_conv_size']\n dropout = h['model_args']['dropout']\n\n upmel=h['model_args'].get('upmel')\n self.upblocke=torch.nn.Sequential(*[Upspamper() for i in range(upmel//2)]) if upmel is not None or upmel==1 else torch.nn.Identity()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.cond_channels = cond_channels\n self.lvc_block_nums = len(upsample_ratios)\n\n # define first convolution\n self.first_conv = torch.nn.Conv1d(in_channels, inner_channels,\n kernel_size=7, padding=(7 - 1) // 2,\n dilation=1, bias=True)\n\n # define residual blocks\n self.lvc_blocks = torch.nn.ModuleList()\n cond_hop_length = 1\n for n in range(self.lvc_block_nums):\n cond_hop_length = cond_hop_length * upsample_ratios[n]\n lvcb = LVCBlock(\n in_channels=inner_channels,\n cond_channels=cond_channels,\n upsample_ratio=upsample_ratios[n],\n conv_layers=lvc_layers_each_block,\n conv_kernel_size=lvc_kernel_size,\n cond_hop_length=cond_hop_length,\n kpnet_hidden_channels=kpnet_hidden_channels,\n kpnet_conv_size=kpnet_conv_size,\n kpnet_dropout=dropout,\n )\n self.lvc_blocks += [lvcb]\n\n # define output layers\n self.last_conv_layers = torch.nn.ModuleList([\n torch.nn.Conv1d(inner_channels, out_channels, kernel_size=7, padding=(7 - 1) // 2,\n dilation=1, bias=True),\n\n ])\n\n # apply weight norm\n if use_weight_norm:\n self.apply_weight_norm()\n\n def forward(self, x, c):\n \"\"\"Calculate forward propagation.\n Args:\n x (Tensor): Input noise signal (B, 1, T).\n c (Tensor): Local conditioning auxiliary features (B, C ,T').\n Returns:\n Tensor: Output tensor (B, out_channels, T)\n \"\"\"\n\n x = self.first_conv(x)\n c=self.upblocke(c)\n\n for n in range(self.lvc_block_nums):\n x = self.lvc_blocks[n](x, c)\n\n # apply final layers\n for f in self.last_conv_layers:\n x = F.leaky_relu(x, LRELU_SLOPE)\n x = f(x)\n x = torch.tanh(x)\n return x\n\n def remove_weight_norm(self):\n \"\"\"Remove weight normalization module from all of the layers.\"\"\"\n def _remove_weight_norm(m):\n try:\n logging.debug(f\"Weight norm is removed from {m}.\")\n torch.nn.utils.remove_weight_norm(m)\n except ValueError: # this module didn't have weight norm\n return\n\n self.apply(_remove_weight_norm)\n\n def apply_weight_norm(self):\n \"\"\"Apply weight normalization module from all of the layers.\"\"\"\n def _apply_weight_norm(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):\n torch.nn.utils.weight_norm(m)\n logging.debug(f\"Weight norm is applied to {m}.\")\n\n self.apply(_apply_weight_norm)\n\n @staticmethod\n def _get_receptive_field_size(layers, stacks, kernel_size,\n dilation=lambda x: 2 ** x):\n assert layers % stacks == 0\n layers_per_cycle = layers // stacks\n dilations = [dilation(i % layers_per_cycle) for i in range(layers)]\n return (kernel_size - 1) * sum(dilations) + 1\n\n @property\n def receptive_field_size(self):\n \"\"\"Return receptive field size.\"\"\"\n return self._get_receptive_field_size(self.layers, self.stacks, self.kernel_size)\n\n def inference(self, c=None, x=None):\n \"\"\"Perform inference.\n Args:\n c (Union[Tensor, ndarray]): Local conditioning auxiliary features (T' ,C).\n x (Union[Tensor, ndarray]): Input noise signal (T, 1).\n Returns:\n Tensor: Output tensor (T, out_channels)\n \"\"\"\n if x is not None:\n if not isinstance(x, torch.Tensor):\n x = torch.tensor(x, dtype=torch.float).to(next(self.parameters()).device)\n x = x.transpose(1, 0).unsqueeze(0)\n else:\n assert c is not None\n x = torch.randn(1, 1, len(c) * self.upsample_factor).to(next(self.parameters()).device)\n if c is not None:\n if not isinstance(c, torch.Tensor):\n c = torch.tensor(c, dtype=torch.float).to(next(self.parameters()).device)\n c = c.transpose(1, 0).unsqueeze(0)\n c = torch.nn.ReplicationPad1d(self.aux_context_window)(c)\n return self.forward(x, c).squeeze(0).transpose(1, 0)"
},
{
"identifier": "univloss",
"path": "modules/loss/univloss.py",
"snippet": "class univloss(nn.Module):\n def __init__(self,config:dict):\n super().__init__()\n self.mel=PitchAdjustableMelSpectrogram( sample_rate=config['audio_sample_rate'],\n n_fft=config['fft_size'],\n win_length=config['win_size'],\n hop_length=config['hop_size'],\n f_min=config['fmin'],\n f_max=config['fmax_for_loss'],\n n_mels=config['audio_num_mel_bins'],)\n self.L1loss=nn.L1Loss()\n self.labauxloss=config.get('lab_aux_loss',45)\n # self.stft=warp_stft({'fft_sizes':[1024, 2048, 512,],'hop_sizes':[120, 240, 50,],'win_lengths':[600, 1200, 240,]})\n\n self.stft = warp_stft({'fft_sizes': config['loss_fft_sizes'], 'hop_sizes': config['loss_hop_sizes'],\n 'win_lengths': config['loss_win_lengths']})\n # fft_sizes = [2048, 4096, 1024, 512, 256, 128],\n # hop_sizes = [240, 480, 100, 50, 25, 12],\n # win_lengths = [1200, 2400, 480, 240, 120, 60]\n\n def discriminator_loss(self,disc_real_outputs, disc_generated_outputs):\n loss = 0\n rlosses=0\n glosses=0\n r_losses = []\n g_losses = []\n\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg ** 2)\n loss += r_loss + g_loss\n rlosses+=r_loss.item()\n glosses +=g_loss.item()\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, rlosses,glosses,r_losses, g_losses\n\n\n def Dloss(self,Dfake, Dtrue):\n\n (Fmrd_out, _), (Fmpd_out, _)=Dfake\n (Tmrd_out, _), (Tmpd_out, _)=Dtrue\n mrdloss, mrdrlosses, mrdglosses, _, _=self.discriminator_loss(Tmrd_out,Fmrd_out)\n mpdloss, mpdrlosses, mpdglosses, _, _ = self.discriminator_loss(Tmpd_out, Fmpd_out)\n loss=mrdloss+mpdloss\n return loss,{'DmrdlossF':mrdglosses,'DmrdlossT':mrdrlosses,'DmpdlossT':mpdrlosses,'DmpdlossF':mpdglosses}\n\n def feature_loss(self,fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2\n\n def GDloss(self,GDfake,GDtrue):\n loss = 0\n gen_losses = []\n mrd_losses=0\n mpd_losses = 0\n (mrd_out, Fmrd_feature), (mpd_out, Fmpd_feature)=GDfake\n (_, Tmrd_feature), (_, Tmpd_feature) = GDtrue\n for dg in mrd_out:\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l.item())\n # loss += l\n mrd_losses=l+mrd_losses\n\n for dg in mpd_out:\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l.item())\n # loss += l\n mpd_losses=l+mpd_losses\n\n mrd_feature_loss=self.feature_loss(Tmrd_feature,Fmrd_feature)\n mpd_feature_loss = self.feature_loss(Tmpd_feature, Fmpd_feature)\n # loss +=msd_feature_loss\n # loss +=mpd_feature_loss\n loss= mrd_feature_loss+mpd_feature_loss+mpd_losses+mrd_losses\n # (msd_losses, mpd_losses), (msd_feature_loss, mpd_feature_loss), gen_losses\n return loss, {'Gmrdloss':mrd_losses,'Gmpdloss':mpd_losses,'Gmrd_feature_loss':mrd_feature_loss,'Gmpd_feature_loss':mpd_feature_loss}\n\n # def Auxloss(self,Goutput, sample):\n #\n # Gmel=self.mel.dynamic_range_compression_torch(self.mel(Goutput['audio'].squeeze(1)))\n # # Rmel=sample['mel']\n # Rmel = self.mel.dynamic_range_compression_torch(self.mel(sample['audio'].squeeze(1)))\n # loss=self.L1loss(Gmel, Rmel)*self.labauxloss\n # return loss,{'auxloss':loss}\n\n def Auxloss(self,Goutput, sample):\n\n # Gmel=self.mel.dynamic_range_compression_torch(self.mel(Goutput['audio'].squeeze(1)))\n # # Rmel=sample['mel']\n # Rmel = self.mel.dynamic_range_compression_torch(self.mel(sample['audio'].squeeze(1)))\n sc_loss, mag_loss=self.stft.stft(Goutput['audio'].squeeze(1), sample['audio'].squeeze(1))\n loss=(sc_loss+ mag_loss)*self.labauxloss\n return loss,{'auxloss':loss,'auxloss_sc_loss':sc_loss,'auxloss_mag_loss':mag_loss}"
},
{
"identifier": "MultiPeriodDiscriminator",
"path": "modules/univ_D/discriminator.py",
"snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self,periods=[2,3,5,7,11]):\n super(MultiPeriodDiscriminator, self).__init__()\n # self.discriminators = nn.ModuleList([\n # DiscriminatorP(2),\n # DiscriminatorP(3),\n # DiscriminatorP(5),\n # DiscriminatorP(7),\n # DiscriminatorP(11),\n # ])\n self.discriminators = nn.ModuleList([\n DiscriminatorP(i) for i in periods\n\n ])\n\n def forward(self, y,):\n y_d_rs = []\n\n fmap_rs = []\n\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n\n\n return y_d_rs, fmap_rs"
},
{
"identifier": "MultiResSpecDiscriminator",
"path": "modules/univ_D/discriminator.py",
"snippet": "class MultiResSpecDiscriminator(torch.nn.Module):\n\n def __init__(self,\n fft_sizes=[1024, 2048, 512],\n hop_sizes=[120, 240, 50],\n win_lengths=[600, 1200, 240],\n window=\"hann_window\"):\n\n super(MultiResSpecDiscriminator, self).__init__()\n # self.discriminators = nn.ModuleList([\n # SpecDiscriminator(fft_sizes[0], hop_sizes[0], win_lengths[0], window),\n # SpecDiscriminator(fft_sizes[1], hop_sizes[1], win_lengths[1], window),\n # SpecDiscriminator(fft_sizes[2], hop_sizes[2], win_lengths[2], window)\n # ])\n self.discriminators = nn.ModuleList([\n SpecDiscriminator(i[0], i[1], i[2], window) for i in zip(fft_sizes,hop_sizes,win_lengths)\n\n ])\n\n def forward(self, y,):\n y_d_rs = []\n\n fmap_rs = []\n\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n\n return y_d_rs, fmap_rs"
},
{
"identifier": "GanBaseTask",
"path": "training/base_task_gan.py",
"snippet": "class GanBaseTask(pl.LightningModule):\n \"\"\"\n Base class for training tasks.\n 1. *load_ckpt*:\n load checkpoint;\n 2. *training_step*:\n record and log the loss;\n 3. *optimizer_step*:\n run backwards step;\n 4. *start*:\n load training configs, backup code, log to tensorboard, start training;\n 5. *configure_ddp* and *init_ddp_connection*:\n start parallel training.\n\n Subclasses should define:\n 1. *build_model*, *build_optimizer*, *build_scheduler*:\n how to build the model, the optimizer and the training scheduler;\n 2. *_training_step*:\n one training step of the model;\n 3. *on_validation_end* and *_on_validation_end*:\n postprocess the validation output.\n \"\"\"\n\n def __init__(self, config: dict, *args, **kwargs):\n # dataset configs\n super().__init__(*args, **kwargs)\n self.dataset_cls = None\n self.config = config\n # self.max_batch_frames = self.config['max_batch_frames']\n # self.max_batch_size = self.config['max_batch_size']\n # self.max_val_batch_frames = self.config['max_val_batch_frames']\n # self.max_val_batch_size = self.config['max_val_batch_size']\n\n # self.accumulate_grad_batches = self.config['accumulate_grad_batches']\n self.clip_grad_norm = self.config['clip_grad_norm']\n\n self.training_sampler = None\n self.model = None\n self.generator = None\n self.discriminator = None\n self.skip_immediate_validation = False\n self.skip_immediate_ckpt_save = False\n\n self.valid_losses: Dict[str, Metric] = {\n 'total_loss': MeanMetric()\n }\n self.valid_metric_names = set()\n self.mix_loss = None\n\n self.automatic_optimization = False\n self.skip_immediate_validations = 0\n\n self.aux_step = self.config.get('aux_step')\n self.train_dataset = None\n self.valid_dataset = None\n\n ###########\n\n # Training, validation and testing\n ###########\n def setup(self, stage):\n self.model = self.build_model()\n self.unfreeze_all_params()\n if self.config['freezing_enabled']:\n self.freeze_params()\n if self.config['finetune_enabled'] and get_latest_checkpoint_path(\n pathlib.Path(self.config['work_dir'])) is None:\n self.load_finetune_ckpt(self.load_pre_train_model())\n self.print_arch()\n self.build_losses_and_metrics()\n self.build_dataset()\n # self.train_dataset = self.dataset_cls(\n # config=self.config, data_dir=self.config['binary_data_dir'],\n # prefix=self.config['train_set_name'], allow_aug=True\n # )\n # self.valid_dataset = self.dataset_cls(\n # config=self.config, data_dir=self.config['binary_data_dir'],\n # prefix=self.config['valid_set_name'], allow_aug=False\n # )\n\n def build_dataset(self):\n raise NotImplementedError()\n\n def get_need_freeze_state_dict_key(self, model_state_dict) -> list:\n key_list = []\n for i in self.config['frozen_params']:\n for j in model_state_dict:\n if j.startswith(i):\n key_list.append(j)\n return list(set(key_list))\n\n def freeze_params(self) -> None:\n model_state_dict = self.state_dict().keys()\n freeze_key = self.get_need_freeze_state_dict_key(model_state_dict=model_state_dict)\n\n for i in freeze_key:\n params = self.get_parameter(i)\n\n params.requires_grad = False\n\n def unfreeze_all_params(self) -> None:\n for i in self.parameters():\n i.requires_grad = True\n\n def load_finetune_ckpt(\n self, state_dict\n ) -> None:\n\n adapt_shapes = self.config['finetune_strict_shapes']\n if not adapt_shapes:\n cur_model_state_dict = self.state_dict()\n unmatched_keys = []\n for key, param in state_dict.items():\n if key in cur_model_state_dict:\n new_param = cur_model_state_dict[key]\n if new_param.shape != param.shape:\n unmatched_keys.append(key)\n print('| Unmatched keys: ', key, new_param.shape, param.shape)\n for key in unmatched_keys:\n del state_dict[key]\n self.load_state_dict(state_dict, strict=False)\n\n def load_pre_train_model(self):\n\n pre_train_ckpt_path = self.config.get('finetune_ckpt_path')\n blacklist = self.config.get('finetune_ignored_params')\n if blacklist is None:\n blacklist = []\n # if whitelist is None:\n # raise RuntimeError(\"\")\n\n if pre_train_ckpt_path is not None:\n ckpt = torch.load(pre_train_ckpt_path)\n\n state_dict = {}\n for i in ckpt['state_dict']:\n # if 'diffusion' in i:\n # if i in rrrr:\n # continue\n skip = False\n for b in blacklist:\n if i.startswith(b):\n skip = True\n break\n\n if skip:\n continue\n\n state_dict[i] = ckpt['state_dict'][i]\n print(i)\n return state_dict\n else:\n raise RuntimeError(\"\")\n\n def build_model(self):\n raise NotImplementedError()\n\n @rank_zero_only\n def print_arch(self):\n utils.print_arch(self)\n\n def build_losses_and_metrics(self):\n raise NotImplementedError()\n\n def register_metric(self, name: str, metric: Metric):\n assert isinstance(metric, Metric)\n setattr(self, name, metric)\n self.valid_metric_names.add(name)\n\n # def run_model(self, sample, infer=False):\n # \"\"\"\n # steps:\n # 1. run the full model\n # 2. calculate losses if not infer\n # \"\"\"\n # raise NotImplementedError()\n\n def Gforward(self, sample, infer=False):\n \"\"\"\n steps:\n 1. run the full model\n 2. calculate losses if not infer\n \"\"\"\n raise NotImplementedError()\n\n def Dforward(self, Goutput):\n \"\"\"\n steps:\n 1. run the full model\n 2. calculate losses if not infer\n \"\"\"\n raise NotImplementedError()\n\n # def on_train_epoch_start(self):\n # if self.training_sampler is not None:\n # self.training_sampler.set_epoch(self.current_epoch)\n\n def _training_step(self, sample, batch_idx):\n \"\"\"\n :return: total loss: torch.Tensor, loss_log: dict, other_log: dict\n\n \"\"\"\n aux_only = False\n if self.aux_step is not None:\n if self.aux_step > self.global_step:\n aux_only = True\n\n log_diet = {}\n opt_g, opt_d = self.optimizers()\n Goutput = self.Gforward(sample=sample)\n if not aux_only:\n Dfake = self.Dforward(Goutput=Goutput['audio'].detach())\n Dtrue = self.Dforward(Goutput=sample['audio'])\n Dloss, Dlog = self.mix_loss.Dloss(Dfake=Dfake, Dtrue=Dtrue)\n log_diet.update(Dlog)\n # if self.clip_grad_norm is not None:\n # self.manual_backward(Dloss/self.clip_grad_norm)\n # else:\n opt_d.zero_grad()\n self.manual_backward(Dloss)\n if self.clip_grad_norm is not None:\n self.clip_gradients(opt_d, gradient_clip_val=self.clip_grad_norm, gradient_clip_algorithm=\"norm\")\n opt_d.step()\n opt_d.zero_grad()\n if not aux_only:\n GDfake = self.Dforward(Goutput=Goutput['audio'])\n GDtrue = self.Dforward(Goutput=sample['audio'])\n GDloss, GDlog = self.mix_loss.GDloss(GDfake=GDfake,GDtrue=GDtrue)\n log_diet.update(GDlog)\n Auxloss, Auxlog = self.mix_loss.Auxloss(Goutput=Goutput, sample=sample)\n\n log_diet.update(Auxlog)\n if not aux_only:\n Gloss=GDloss + Auxloss\n else:\n Gloss=Auxloss\n\n # if self.clip_grad_norm is not None:\n # self.manual_backward(Gloss / self.clip_grad_norm)\n # else:\n # self.manual_backward(Gloss)\n # if (batch_idx + 1) % self.accumulate_grad_batches == 0:\n opt_g.zero_grad()\n self.manual_backward(Gloss)\n if self.clip_grad_norm is not None:\n self.clip_gradients(opt_g, gradient_clip_val=self.clip_grad_norm, gradient_clip_algorithm=\"norm\")\n opt_g.step()\n\n\n\n return log_diet\n\n def training_step(self, sample, batch_idx, ): # todo\n log_outputs = self._training_step(sample, batch_idx)\n\n # logs to progress bar\n self.log_dict({'loss':sum(log_outputs.values())}, prog_bar=True, logger=False, on_step=True, on_epoch=False)\n # self.log('lr', self.lr_schedulers().get_last_lr()[0], prog_bar=True, logger=False, on_step=True, on_epoch=False)\n # logs to tensorboard\n if self.global_step % self.config['log_interval'] == 0:\n tb_log = {f'training/{k}': v for k, v in log_outputs.items()}\n # tb_log['training/lr'] = self.lr_schedulers().get_last_lr()[0]\n self.logger.log_metrics(tb_log, step=self.global_step)\n #\n # return total_loss\n\n # def on_before_optimizer_step(self, *args, **kwargs):\n # self.log_dict(grad_norm(self, norm_type=2))\n\n def _on_validation_start(self):\n pass\n\n def on_validation_start(self):\n self._on_validation_start()\n for metric in self.valid_losses.values():\n metric.to(self.device)\n metric.reset()\n\n def _validation_step(self, sample, batch_idx):\n \"\"\"\n\n :param sample:\n :param batch_idx:\n :return: loss_log: dict, weight: int\n \"\"\"\n raise NotImplementedError()\n\n def validation_step(self, sample, batch_idx):\n \"\"\"\n\n :param sample:\n :param batch_idx:\n\n \"\"\"\n\n # if self.skip_immediate_validations == 0 and self.global_step != 0:\n # self.skip_immediate_validation = True\n # self.skip_immediate_validations = 1\n # if self.global_step == 0:\n # self.skip_immediate_validations = 1\n\n if self.skip_immediate_validation:\n rank_zero_debug(f\"Skip validation {batch_idx}\")\n return {}\n with torch.autocast(self.device.type, enabled=False):\n losses, weight = self._validation_step(sample, batch_idx)\n losses = {\n 'total_loss': sum(losses.values()),\n **losses\n }\n for k, v in losses.items():\n if k not in self.valid_losses:\n self.valid_losses[k] = MeanMetric().to(self.device)\n self.valid_losses[k].update(v, weight=weight) # weight=1\n return losses\n\n def on_validation_epoch_end(self):\n if self.skip_immediate_validation:\n self.skip_immediate_validation = False\n self.skip_immediate_ckpt_save = True\n return\n loss_vals = {k: v.compute() for k, v in self.valid_losses.items()}\n self.log('val_loss', loss_vals['total_loss'], on_epoch=True, prog_bar=True, logger=False, sync_dist=True)\n self.logger.log_metrics({f'validation/{k}': v for k, v in loss_vals.items()}, step=self.global_step)\n for metric in self.valid_losses.values():\n metric.reset()\n metric_vals = {k: getattr(self, k).compute() for k in self.valid_metric_names}\n self.logger.log_metrics({f'metrics/{k}': v for k, v in metric_vals.items()}, step=self.global_step)\n for metric_name in self.valid_metric_names:\n getattr(self, metric_name).reset()\n\n # noinspection PyMethodMayBeStatic\n def build_scheduler(self, optimizer):\n from utils import build_lr_scheduler_from_config\n\n scheduler_args = self.config['lr_scheduler_args']\n assert scheduler_args['scheduler_cls'] != ''\n scheduler = build_lr_scheduler_from_config(optimizer, scheduler_args)\n return scheduler\n\n # noinspection PyMethodMayBeStatic\n def build_optimizer(self, model, optimizer_args):\n from utils import build_object_from_class_name\n\n assert optimizer_args['optimizer_cls'] != ''\n if 'beta1' in optimizer_args and 'beta2' in optimizer_args and 'betas' not in optimizer_args:\n optimizer_args['betas'] = (optimizer_args['beta1'], optimizer_args['beta2'])\n\n if isinstance(model, nn.ModuleList):\n parameterslist = []\n for i in model:\n parameterslist = parameterslist + list(i.parameters())\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n parameterslist,\n **optimizer_args\n )\n elif isinstance(model, nn.ModuleDict):\n parameterslist = []\n for i in model:\n # parameterslist = parameterslist + list(model[i].parameters())\n parameterslist.append({'params': model[i].parameters()})\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n parameterslist,\n **optimizer_args\n )\n elif isinstance(model, nn.Module):\n\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n model.parameters(),\n **optimizer_args\n )\n else:\n raise RuntimeError(\"\")\n\n return optimizer\n\n def configure_optimizers(self):\n optG = self.build_optimizer(self.generator, optimizer_args=self.config['generater_optimizer_args'])\n optD = self.build_optimizer(self.discriminator, optimizer_args=self.config['discriminate_optimizer_args'])\n\n return [optG, optD]\n # scheduler = self.build_scheduler(optm)\n # if scheduler is None:\n # return optm\n # return {\n # \"optimizer\": optm,\n # \"lr_scheduler\": {\n # \"scheduler\": scheduler,\n # \"interval\": \"step\",\n # \"frequency\": 1\n # }\n # }\n\n def train_dataloader(self):\n # self.training_sampler = DsBatchSampler(\n # self.train_dataset,\n # max_batch_frames=self.max_batch_frames,\n # max_batch_size=self.max_batch_size,\n # num_replicas=(self.trainer.distributed_sampler_kwargs or {}).get('num_replicas', 1),\n # rank=(self.trainer.distributed_sampler_kwargs or {}).get('rank', 0),\n # sort_by_similar_size=self.config['sort_by_len'],\n # required_batch_count_multiple=self.config['accumulate_grad_batches'],\n # frame_count_grid=self.config['sampler_frame_count_grid'],\n # shuffle_sample=True,\n # shuffle_batch=False,\n # seed=self.config['seed']\n # )\n return torch.utils.data.DataLoader(self.train_dataset,\n collate_fn=self.train_dataset.collater,\n batch_size=self.config['batch_size'],\n # batch_sampler=self.training_sampler,\n num_workers=self.config['ds_workers'],\n prefetch_factor=self.config['dataloader_prefetch_factor'],\n pin_memory=True,\n persistent_workers=True)\n\n def val_dataloader(self):\n # sampler = DsEvalBatchSampler(\n # self.valid_dataset,\n # max_batch_frames=self.max_val_batch_frames,\n # max_batch_size=self.max_val_batch_size,\n # rank=(self.trainer.distributed_sampler_kwargs or {}).get('rank', 0),\n # batch_by_size=False\n # )\n return torch.utils.data.DataLoader(self.valid_dataset,\n collate_fn=self.valid_dataset.collater,\n batch_size=1,\n # batch_sampler=sampler,\n num_workers=self.config['ds_workers'],\n prefetch_factor=self.config['dataloader_prefetch_factor'],\n shuffle=False)\n\n def test_dataloader(self):\n return self.val_dataloader()\n\n def on_test_start(self):\n self.on_validation_start()\n\n def test_step(self, sample, batch_idx):\n return self.validation_step(sample, batch_idx)\n\n def on_test_end(self):\n return self.on_validation_end()\n\n def on_save_checkpoint(self, checkpoint):\n pass\n # checkpoint['trainer_stage'] = self.trainer.state.stage.value\n\n # def on_load_checkpoint(self, checkpoint):\n # # from lightning.pytorch.trainer.states import RunningStage\n # from utils import simulate_lr_scheduler\n # # if checkpoint.get('trainer_stage', '') == RunningStage.VALIDATING.value:\n # # self.skip_immediate_validation = True\n #\n # optimizer_args = self.config['optimizer_args']\n # scheduler_args = self.config['lr_scheduler_args']\n #\n # if 'beta1' in optimizer_args and 'beta2' in optimizer_args and 'betas' not in optimizer_args:\n # optimizer_args['betas'] = (optimizer_args['beta1'], optimizer_args['beta2'])\n #\n # if checkpoint.get('optimizer_states', None):\n # opt_states = checkpoint['optimizer_states']\n # assert len(opt_states) == 1 # only support one optimizer\n # opt_state = opt_states[0]\n # for param_group in opt_state['param_groups']:\n # for k, v in optimizer_args.items():\n # if k in param_group and param_group[k] != v:\n # if 'lr_schedulers' in checkpoint and checkpoint['lr_schedulers'] and k == 'lr':\n # continue\n # rank_zero_info(f'| Overriding optimizer parameter {k} from checkpoint: {param_group[k]} -> {v}')\n # param_group[k] = v\n # if 'initial_lr' in param_group and param_group['initial_lr'] != optimizer_args['lr']:\n # rank_zero_info(\n # f'| Overriding optimizer parameter initial_lr from checkpoint: {param_group[\"initial_lr\"]} -> {optimizer_args[\"lr\"]}'\n # )\n # param_group['initial_lr'] = optimizer_args['lr']\n #\n # if checkpoint.get('lr_schedulers', None):\n # assert checkpoint.get('optimizer_states', False)\n # assert len(checkpoint['lr_schedulers']) == 1 # only support one scheduler\n # checkpoint['lr_schedulers'][0] = simulate_lr_scheduler(\n # optimizer_args, scheduler_args,\n # step_count=checkpoint['global_step'],\n # num_param_groups=len(checkpoint['optimizer_states'][0]['param_groups'])\n # )\n # for param_group, new_lr in zip(\n # checkpoint['optimizer_states'][0]['param_groups'],\n # checkpoint['lr_schedulers'][0]['_last_lr'],\n # ):\n # if param_group['lr'] != new_lr:\n # rank_zero_info(\n # f'| Overriding optimizer parameter lr from checkpoint: {param_group[\"lr\"]} -> {new_lr}')\n # param_group['lr'] = new_lr"
},
{
"identifier": "PitchAdjustableMelSpectrogram",
"path": "utils/wav2mel.py",
"snippet": "class PitchAdjustableMelSpectrogram:\n def __init__(\n self,\n sample_rate=44100,\n n_fft=2048,\n win_length=2048,\n hop_length=512,\n f_min=40,\n f_max=16000,\n n_mels=128,\n center=False,\n ):\n self.sample_rate = sample_rate\n self.n_fft = n_fft\n self.win_size = win_length\n self.hop_length = hop_length\n self.f_min = f_min\n self.f_max = f_max\n self.n_mels = n_mels\n self.center = center\n\n self.mel_basis = {}\n self.hann_window = {}\n\n def __call__(self, y, key_shift=0, speed=1.0):\n factor = 2 ** (key_shift / 12)\n n_fft_new = int(np.round(self.n_fft * factor))\n win_size_new = int(np.round(self.win_size * factor))\n hop_length = int(np.round(self.hop_length * speed))\n\n # if torch.min(y) < -1.0:\n # logger.warning(f\"min value is {torch.min(y)}\")\n # if torch.max(y) > 1.0:\n # logger.warning(f\"max value is {torch.max(y)}\")\n\n mel_basis_key = f\"{self.f_max}_{y.device}\"\n if mel_basis_key not in self.mel_basis:\n mel = librosa_mel_fn(\n sr=self.sample_rate,\n n_fft=self.n_fft,\n n_mels=self.n_mels,\n fmin=self.f_min,\n fmax=self.f_max,\n )\n self.mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)\n\n hann_window_key = f\"{key_shift}_{y.device}\"\n if hann_window_key not in self.hann_window:\n self.hann_window[hann_window_key] = torch.hann_window(\n win_size_new, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (\n int((win_size_new - hop_length) // 2),\n int((win_size_new - hop_length+1) // 2),\n ),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft_new,\n hop_length=hop_length,\n win_length=win_size_new,\n window=self.hann_window[hann_window_key],\n center=self.center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=True,\n ).abs()\n # spec = torch.view_as_real(spec)\n # spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))\n\n if key_shift != 0:\n size = self.n_fft // 2 + 1\n resize = spec.size(1)\n if resize < size:\n spec = F.pad(spec, (0, 0, 0, size - resize))\n\n spec = spec[:, :size, :] * self.win_size / win_size_new\n\n spec = torch.matmul(self.mel_basis[mel_basis_key], spec)\n\n return spec\n\n def dynamic_range_compression_torch(self,x, C=1, clip_val=1e-5):\n return torch.log(torch.clamp(x, min=clip_val) * C)"
}
] | import logging
import os
import pathlib
import random
import sys
import lightning.pytorch as pl
import matplotlib
import numpy as np
import torch.utils.data
import utils
from typing import Dict
from lightning.pytorch.utilities.rank_zero import rank_zero_debug, rank_zero_info, rank_zero_only
from matplotlib import pyplot as plt
from torch import nn
from torch.utils.data import Dataset
from torchmetrics import Metric, MeanMetric
from models.univnet.univnet import UnivNet
from modules.loss.univloss import univloss
from modules.univ_D.discriminator import MultiPeriodDiscriminator, MultiResSpecDiscriminator
from training.base_task_gan import GanBaseTask
from utils.wav2mel import PitchAdjustableMelSpectrogram | 10,709 | cty=(len(record['spectrogram'].T) * samples_per_frame)
record['audio'] = record['audio'][:cty]
record['audio'] = np.pad(record['audio'], (
0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])),
mode='constant')
pass
else:
# record['spectrogram'] = record['spectrogram'][start:end].T
record['audio'] = record['audio'][start:end]
record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])),
mode='constant')
if self.volume_aug:
for record in minibatch:
if random.random() < self.volume_aug_prob:
audio = record['audio']
audio_mel = record['spectrogram']
max_amp = float(np.max(np.abs(audio))) + 1e-5
max_shift = min(3, np.log(1 / max_amp))
log_mel_shift = random.uniform(-3, max_shift)
# audio *= (10 ** log_mel_shift)
audio *= np.exp(log_mel_shift)
audio_mel += log_mel_shift
audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy()
record['audio'] = audio
record['spectrogram'] = audio_mel
audio = np.stack([record['audio'] for record in minibatch if 'audio' in record])
spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record])
f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record])
return {
'audio': torch.from_numpy(audio).unsqueeze(1),
'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0),
}
class stftlog:
def __init__(self,
n_fft=2048,
win_length=2048,
hop_length=512,
center=False,):
self.hop_length=hop_length
self.win_size=win_length
self.n_fft = n_fft
self.win_size = win_length
self.center = center
self.hann_window = {}
def exc(self,y):
hann_window_key = f"{y.device}"
if hann_window_key not in self.hann_window:
self.hann_window[hann_window_key] = torch.hann_window(
self.win_size, device=y.device
)
y = torch.nn.functional.pad(
y.unsqueeze(1),
(
int((self.win_size - self.hop_length) // 2),
int((self.win_size - self.hop_length+1) // 2),
),
mode="reflect",
)
y = y.squeeze(1)
spec = torch.stft(
y,
self.n_fft,
hop_length=self.hop_length,
win_length=self.win_size,
window=self.hann_window[hann_window_key],
center=self.center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
).abs()
return spec
class univnet_task(GanBaseTask):
def __init__(self, config):
super().__init__(config)
self.TF = PitchAdjustableMelSpectrogram( f_min=0,
f_max=None,
n_mels=256,)
self.logged_gt_wav = set()
self.stft=stftlog()
upmel = config['model_args'].get('upmel')
self.upmel=upmel
# if upmel is not None:
# self.noisec=config['model_args']['cond_in_channels']*upmel
# else:
self.noisec = config['model_args']['cond_in_channels']
def build_dataset(self):
self.train_dataset = nsf_HiFigan_dataset(config=self.config,
data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[
'train_set_name'])
self.valid_dataset = nsf_HiFigan_dataset(config=self.config,
data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[
'valid_set_name'], infer=True)
def build_model(self):
# cfg=self.config['model_args']
# cfg.update({'sampling_rate':self.config['audio_sample_rate'],'num_mels':self.config['audio_num_mel_bins'],'hop_size':self.config['hop_size']})
# h=AttrDict(cfg)
self.generator=UnivNet(self.config,use_weight_norm=self.config['model_args'].get('use_weight_norm',True))
self.discriminator=nn.ModuleDict({'mrd':MultiResSpecDiscriminator(fft_sizes=self.config['model_args'].get('mrd_fft_sizes',[1024, 2048, 512]),
hop_sizes=self.config['model_args'].get('mrd_hop_sizes',[120, 240, 50]),
win_lengths= self.config['model_args'].get('mrd_win_lengths',[600, 1200, 240]),), 'mpd':MultiPeriodDiscriminator(periods=self.config['model_args']['discriminator_periods'])})
def build_losses_and_metrics(self):
|
# from models.lvc_ddspgan.lvc_ddspgan import DDSPgan
# from models.nsf_HiFigan.models import Generator, AttrDict, MultiScaleDiscriminator, MultiPeriodDiscriminator
def spec_to_figure(spec, vmin=None, vmax=None):
if isinstance(spec, torch.Tensor):
spec = spec.cpu().numpy()
fig = plt.figure(figsize=(12, 9),dpi=100)
plt.pcolor(spec.T, vmin=vmin, vmax=vmax)
plt.tight_layout()
return fig
class nsf_HiFigan_dataset(Dataset):
def __init__(self, config: dict, data_dir, infer=False):
super().__init__()
self.config = config
self.data_dir = data_dir if isinstance(data_dir, pathlib.Path) else pathlib.Path(data_dir)
with open(self.data_dir, 'r', encoding='utf8') as f:
fills = f.read().strip().split('\n')
self.data_index = fills
self.infer = infer
self.volume_aug = self.config['volume_aug']
self.volume_aug_prob = self.config['volume_aug_prob'] if not infer else 0
def __getitem__(self, index):
data_path = self.data_index[index]
data = np.load(data_path)
return {'f0':data['f0'],'spectrogram':data['mel'],'audio':data['audio']}
def __len__(self):
return len(self.data_index)
def collater(self, minibatch):
samples_per_frame = self.config['hop_size']
if self.infer:
crop_mel_frames = 0
else:
crop_mel_frames = self.config['crop_mel_frames']
for record in minibatch:
# Filter out records that aren't long enough.
if len(record['spectrogram']) < crop_mel_frames:
del record['spectrogram']
del record['audio']
del record['f0']
continue
start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames)
end = start + crop_mel_frames
if self.infer:
record['spectrogram'] = record['spectrogram'].T
record['f0'] = record['f0']
else:
record['spectrogram'] = record['spectrogram'][start:end].T
record['f0'] = record['f0'][start:end]
start *= samples_per_frame
end *= samples_per_frame
if self.infer:
cty=(len(record['spectrogram'].T) * samples_per_frame)
record['audio'] = record['audio'][:cty]
record['audio'] = np.pad(record['audio'], (
0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])),
mode='constant')
pass
else:
# record['spectrogram'] = record['spectrogram'][start:end].T
record['audio'] = record['audio'][start:end]
record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])),
mode='constant')
if self.volume_aug:
for record in minibatch:
if random.random() < self.volume_aug_prob:
audio = record['audio']
audio_mel = record['spectrogram']
max_amp = float(np.max(np.abs(audio))) + 1e-5
max_shift = min(3, np.log(1 / max_amp))
log_mel_shift = random.uniform(-3, max_shift)
# audio *= (10 ** log_mel_shift)
audio *= np.exp(log_mel_shift)
audio_mel += log_mel_shift
audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy()
record['audio'] = audio
record['spectrogram'] = audio_mel
audio = np.stack([record['audio'] for record in minibatch if 'audio' in record])
spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record])
f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record])
return {
'audio': torch.from_numpy(audio).unsqueeze(1),
'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0),
}
class stftlog:
def __init__(self,
n_fft=2048,
win_length=2048,
hop_length=512,
center=False,):
self.hop_length=hop_length
self.win_size=win_length
self.n_fft = n_fft
self.win_size = win_length
self.center = center
self.hann_window = {}
def exc(self,y):
hann_window_key = f"{y.device}"
if hann_window_key not in self.hann_window:
self.hann_window[hann_window_key] = torch.hann_window(
self.win_size, device=y.device
)
y = torch.nn.functional.pad(
y.unsqueeze(1),
(
int((self.win_size - self.hop_length) // 2),
int((self.win_size - self.hop_length+1) // 2),
),
mode="reflect",
)
y = y.squeeze(1)
spec = torch.stft(
y,
self.n_fft,
hop_length=self.hop_length,
win_length=self.win_size,
window=self.hann_window[hann_window_key],
center=self.center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
).abs()
return spec
class univnet_task(GanBaseTask):
def __init__(self, config):
super().__init__(config)
self.TF = PitchAdjustableMelSpectrogram( f_min=0,
f_max=None,
n_mels=256,)
self.logged_gt_wav = set()
self.stft=stftlog()
upmel = config['model_args'].get('upmel')
self.upmel=upmel
# if upmel is not None:
# self.noisec=config['model_args']['cond_in_channels']*upmel
# else:
self.noisec = config['model_args']['cond_in_channels']
def build_dataset(self):
self.train_dataset = nsf_HiFigan_dataset(config=self.config,
data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[
'train_set_name'])
self.valid_dataset = nsf_HiFigan_dataset(config=self.config,
data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[
'valid_set_name'], infer=True)
def build_model(self):
# cfg=self.config['model_args']
# cfg.update({'sampling_rate':self.config['audio_sample_rate'],'num_mels':self.config['audio_num_mel_bins'],'hop_size':self.config['hop_size']})
# h=AttrDict(cfg)
self.generator=UnivNet(self.config,use_weight_norm=self.config['model_args'].get('use_weight_norm',True))
self.discriminator=nn.ModuleDict({'mrd':MultiResSpecDiscriminator(fft_sizes=self.config['model_args'].get('mrd_fft_sizes',[1024, 2048, 512]),
hop_sizes=self.config['model_args'].get('mrd_hop_sizes',[120, 240, 50]),
win_lengths= self.config['model_args'].get('mrd_win_lengths',[600, 1200, 240]),), 'mpd':MultiPeriodDiscriminator(periods=self.config['model_args']['discriminator_periods'])})
def build_losses_and_metrics(self): | self.mix_loss=univloss(self.config) | 1 | 2023-10-17 13:45:09+00:00 | 12k |
OllieBoyne/FOUND | FOUND/utils/eval_utils.py | [
{
"identifier": "modified_chamf",
"path": "FOUND/utils/pytorch3d.py",
"snippet": "def modified_chamf(x,y, x_lengths=None, y_lengths=None,\n x_normals=None, y_normals=None,\n norm: int = 2):\n \"\"\"\n \tA modified version of pytorch3d.loss.chamfer_distance\n \tto allow for no point or batch reduction and some other changes\n \"\"\"\n\n if not ((norm == 1) or (norm == 2)):\n raise ValueError(\"Support for 1 or 2 norm.\")\n\n x, x_lengths, x_normals = _handle_pointcloud_input(x, x_lengths, x_normals)\n y, y_lengths, y_normals = _handle_pointcloud_input(y, y_lengths, y_normals)\n\n return_normals = x_normals is not None and y_normals is not None\n\n N, P1, D = x.shape\n P2 = y.shape[1]\n\n # Check if inputs are heterogeneous and create a lengths mask.\n is_x_heterogeneous = (x_lengths != P1).any()\n is_y_heterogeneous = (y_lengths != P2).any()\n x_mask = (\n torch.arange(P1, device=x.device)[None] >= x_lengths[:, None]\n ) # shape [N, P1]\n y_mask = (\n torch.arange(P2, device=y.device)[None] >= y_lengths[:, None]\n ) # shape [N, P2]\n\n if y.shape[0] != N or y.shape[2] != D:\n raise ValueError(\"y does not have the correct shape.\")\n\n cham_norm_x = x.new_zeros(())\n cham_norm_y = x.new_zeros(())\n\n x_nn = knn_points(x, y, lengths1=x_lengths, lengths2=y_lengths, norm=norm, K=1)\n y_nn = knn_points(y, x, lengths1=y_lengths, lengths2=x_lengths, norm=norm, K=1)\n\n cham_x = x_nn.dists[..., 0] # (N, P1)\n cham_y = y_nn.dists[..., 0] # (N, P2)\n\n if is_x_heterogeneous:\n cham_x[x_mask] = 0.0\n if is_y_heterogeneous:\n cham_y[y_mask] = 0.0\n\n\n # Gather the normals using the indices and keep only value for k=0\n x_normals_near = knn_gather(y_normals, x_nn.idx, y_lengths)[..., 0, :]\n y_normals_near = knn_gather(x_normals, y_nn.idx, x_lengths)[..., 0, :]\n\n cham_norm_x = torch.abs(\n F.cosine_similarity(x_normals, x_normals_near, dim=2, eps=1e-6)\n )\n cham_norm_y = torch.abs(\n F.cosine_similarity(y_normals, y_normals_near, dim=2, eps=1e-6)\n )\n\n return dict(cham_x=cham_x, cham_y=cham_y, cham_norm_x = cham_norm_x, cham_norm_y=cham_norm_y)"
},
{
"identifier": "modified_sample",
"path": "FOUND/utils/pytorch3d.py",
"snippet": "def modified_sample(meshes: Meshes, \n num_samples: int = 10000,\n return_normals: bool = False,\n return_textures: bool = False,):\n\n \"\"\"Modified version of pytorch3d.ops.sample_points_from_meshes\n that returns references to the faces sampled from\"\"\"\n\n if meshes.isempty():\n raise ValueError(\"Meshes are empty.\")\n\n verts = meshes.verts_packed()\n if not torch.isfinite(verts).all():\n raise ValueError(\"Meshes contain nan or inf.\")\n\n if return_textures and meshes.textures is None:\n raise ValueError(\"Meshes do not contain textures.\")\n\n faces = meshes.faces_packed()\n mesh_to_face = meshes.mesh_to_faces_packed_first_idx()\n num_meshes = len(meshes)\n num_valid_meshes = torch.sum(meshes.valid) # Non empty meshes.\n\n # Initialize samples tensor with fill value 0 for empty meshes.\n samples = torch.zeros((num_meshes, num_samples, 3), device=meshes.device)\n\n # Only compute samples for non empty meshes\n with torch.no_grad():\n areas, _ = mesh_face_areas_normals(verts, faces) # Face areas can be zero.\n max_faces = meshes.num_faces_per_mesh().max().item()\n areas_padded = packed_to_padded(\n areas, mesh_to_face[meshes.valid], max_faces\n ) # (N, F)\n\n # TODO (gkioxari) Confirm multinomial bug is not present with real data.\n sample_face_idxs = areas_padded.multinomial(\n num_samples, replacement=True\n ) # (N, num_samples)\n sample_face_idxs += mesh_to_face[meshes.valid].view(num_valid_meshes, 1)\n\n # Get the vertex coordinates of the sampled faces.\n face_verts = verts[faces]\n v0, v1, v2 = face_verts[:, 0], face_verts[:, 1], face_verts[:, 2]\n\n # Randomly generate barycentric coords.\n w0, w1, w2 = _rand_barycentric_coords(\n num_valid_meshes, num_samples, verts.dtype, verts.device\n )\n\n # Use the barycentric coords to get a point on each sampled face.\n a = v0[sample_face_idxs] # (N, num_samples, 3)\n b = v1[sample_face_idxs]\n c = v2[sample_face_idxs]\n samples[meshes.valid] = w0[:, :, None] * a + w1[:, :, None] * b + w2[:, :, None] * c\n\n if return_normals:\n # Initialize normals tensor with fill value 0 for empty meshes.\n # Normals for the sampled points are face normals computed from\n # the vertices of the face in which the sampled point lies.\n normals = torch.zeros((num_meshes, num_samples, 3), device=meshes.device)\n vert_normals = (v1 - v0).cross(v2 - v1, dim=1)\n vert_normals = vert_normals / vert_normals.norm(dim=1, p=2, keepdim=True).clamp(\n min=sys.float_info.epsilon\n )\n vert_normals = vert_normals[sample_face_idxs]\n normals[meshes.valid] = vert_normals\n\n if return_textures:\n # fragment data are of shape NxHxWxK. Here H=S, W=1 & K=1.\n pix_to_face = sample_face_idxs.view(len(meshes), num_samples, 1, 1) # NxSx1x1\n bary = torch.stack((w0, w1, w2), dim=2).unsqueeze(2).unsqueeze(2) # NxSx1x1x3\n # zbuf and dists are not used in `sample_textures` so we initialize them with dummy\n dummy = torch.zeros(\n (len(meshes), num_samples, 1, 1), device=meshes.device, dtype=torch.float32\n ) # NxSx1x1\n fragments = MeshFragments(\n pix_to_face=pix_to_face, zbuf=dummy, bary_coords=bary, dists=dummy\n )\n textures = meshes.sample_textures(fragments) # NxSx1x1xC\n textures = textures[:, :, 0, 0, :] # NxSxC\n\n out = {}\n\n out['verts'] = samples\n if return_normals: out['normals'] = normals\n if return_textures: out['textures'] = textures\n\n # return original faces\n out['face_idxs'] = sample_face_idxs\n\n return out"
},
{
"identifier": "Renderer",
"path": "FOUND/utils/renderer.py",
"snippet": "class Renderer(nn.Module):\n\n\tdef __init__(self, device='cuda', image_size=(256, 256),\n\t\t\t\t bin_size=None, z_clip_value=None,\n\t\t\t\t max_faces_per_bin=None, cam_params: dict = None,\n\t\t\t\t MAX_BATCH_SIZE=10,\n\t\t\t\t **kwargs):\n\n\t\tsuper().__init__()\n\n\t\tself.MAX_BATCH_SIZE = MAX_BATCH_SIZE\n\n\t\tif isinstance(image_size, int):\n\t\t\timage_size = (image_size, image_size)\n\n\t\tself.image_size = image_size\n\n\t\tself.img_raster_settings = RasterizationSettings(\n\t\t\timage_size=image_size, blur_radius=0.,\n\t\t\tfaces_per_pixel=1, max_faces_per_bin=max_faces_per_bin,\n\t\t\tbin_size=bin_size, z_clip_value=z_clip_value)\n\n\t\t# Rasterization settings for silhouette rendering\n\t\tsigma = 1e-6\n\t\tself.raster_settings_silhouette = RasterizationSettings(\n\t\t\timage_size=image_size,\n\t\t\tblur_radius=np.log(1. / 1e-4 - 1.) * sigma,\n\t\t\tfaces_per_pixel=10, max_faces_per_bin=max_faces_per_bin,\n\t\t\tbin_size=bin_size\n\t\t)\n\n\t\tself.rasterizer = MeshRasterizer(raster_settings=self.img_raster_settings)\n\t\tself.sil_rasterizer = MeshRasterizer(raster_settings=self.raster_settings_silhouette)\n\n\t\t# Shaders\n\t\tself.img_shader = SoftPhongShader(device=device)\n\t\tself.norm_shader = NormalShader()\n\t\tself.sil_shader = SoftSilhouetteShader()\n\n\t\t# default lighting\n\t\tself.lights = AmbientLights(device=device)\n\n\t\tself.camera_params = {}\n\t\tif cam_params is not None:\n\t\t\t# Multiple camera intrinsics not currently supported\n\t\t\tf = torch.tensor([[cam_params['focal_length']]]).to(device) # [N x 1]\n\t\t\tpp = torch.tensor(cam_params['principal_point']).unsqueeze(0).to(device) # [N x 2]\n\t\t\tself.camera_params = dict(focal_length=f, principal_point=pp,\n\t\t\t\t\t\t\t\t\t in_ndc=False, image_size=torch.tensor(image_size).unsqueeze(0).to(device))\n\n\tdef forward(self, meshes, R: torch.Tensor, T: torch.Tensor, keypoints=None,\n\t\t\t\trender_normals=True, render_rgb=True, render_sil=True,\n\t\t\t\tmask_out_faces=None, return_cameras=False, camera_params=None,\n\t\t\t\tnormals_fmt='blender', one_view_per_mesh=False):\n\t\t\"\"\"\n\t\tCan receive various number of 'views' (size of R) and meshes (size of 'meshes')\n\t\tN input views, 1 mesh -> render N views of 1 mesh\n\t\tN input views, N mesh -> render one view per mesh (only if one_view_per_mesh is True)\n\t\tN input views, M mesh -> render N views of M meshes\n\n\t\tRender modes:\n\t\t\t- render_rgb: render RGB image\n\t\t\t- render_normals: render surface normals\n\t\t\t- render_sil: render silhouette\n\t\t\t- keypoints: project 3D keypoints onto image\n\n\t\t:param R: [N x 4 x 4]\n\t\t:param T: [N x 4 x 4]\n\t\t:param keypoints: optional [M x P x 3] keypoints to render\n\t\t:param mask_out_faces: [M x F] faces per mesh to optionally remove from seg & normal\n\t\t:param camera_params: Optional per-camera focal length & principal point\n\t\t:return:\n\n\t\tCurrently does not support M > 1 rendering to M images.\n\t\t\"\"\"\n\n\t\tif camera_params is None:\n\t\t\tcamera_params = self.camera_params\n\n\t\tN = R.shape[0] # number of views\n\t\tM = len(meshes) # number of meshes\n\n\t\tif M > 1 and (N == M):\n\t\t\tassert one_view_per_mesh, \"For N == M, M > 1, requires one_view_per_mesh=True parameter.\"\n\n\t\t\tout_shape_rgb = (N, *self.image_size, 3)\n\t\t\tout_shape_single = (N, *self.image_size)\n\t\t\tbatch_size = N\n\n\t\t# in the case M != N for M > 1, want to render all N views for each mesh\n\t\telif M != N and M > 1:\n\t\t\tmeshes = meshes.extend(N) # produce a mesh for each view\n\t\t\tR = torch.cat([R] * M, dim=0)\n\t\t\tT = torch.cat([T] * M, dim=0) # produce R, T for each mesh\n\n\t\t\tout_shape_rgb = (N, M, *self.image_size, 3)\n\t\t\tout_shape_single = (N, M, *self.image_size)\n\t\t\tbatch_size = N * M\n\n\t\t# in the case M = 1, N >= 1, render N views of 1 mesh\n\t\telse:\n\t\t\tmeshes = meshes.extend(N) # produce a mesh for each view\n\t\t\tout_shape_rgb = (N, *self.image_size, 3)\n\t\t\tout_shape_single = (N, *self.image_size)\n\n\t\tcameras = PerspectiveCameras(device=meshes.device, R=R, T=T, **camera_params)\n\n\t\tout = dict()\n\t\t_frags = None\n\t\tnormals = None\n\t\tif render_rgb or render_normals:\n\t\t\tfragments = self.rasterizer(meshes, cameras=cameras)\n\t\t\t_frags = fragments # Store fragments for mask out faces\n\n\t\t\tif render_rgb:\n\t\t\t\tout['rgb'] = self.img_shader(fragments, meshes, cameras=cameras, lights=self.lights)[..., :3].reshape(\n\t\t\t\t\tout_shape_rgb)\n\n\t\t\tif render_normals:\n\t\t\t\tnormals = self.norm_shader(fragments, meshes, cameras=cameras)\n\n\t\tif render_sil:\n\t\t\tfragments_sil = self.sil_rasterizer(meshes, cameras=cameras)\n\t\t\tif _frags is None: _frags = fragments_sil # Store fragments for mask out faces\n\n\t\t\tsil = self.sil_shader(fragments_sil, meshes, cameras=cameras)\n\t\t\tout['sil'] = sil[..., -1].reshape(out_shape_single) # return just alpha channel (silhouette)\n\n\t\t# Apply face masking of FIND model\n\t\tif (render_rgb or render_sil or render_normals) and mask_out_faces is not None:\n\t\t\t# get foremost face for each pixel in correct format\n\t\t\tpix_to_face = get_padded_pix_to_face(_frags.pix_to_face[..., 0], meshes).reshape(out_shape_single)\n\n\t\t\tfor n in range(N):\n\t\t\t\tmask_pix = torch.isin(pix_to_face[n], mask_out_faces)\n\n\t\t\t\tif render_rgb:\n\t\t\t\t\tout['rgb'][n][mask_pix] = 1. # set pixels to white\n\n\t\t\t\tif render_sil:\n\t\t\t\t\tout['sil'][n, mask_pix] = 0.\n\n\t\t\t\tif render_normals:\n\t\t\t\t\tnormals.mask[n] *= ~mask_pix # does not work for certain batch types\n\n\t\tif render_normals:\n\t\t\t# Also return rgb and xyz of normals\n\t\t\tout['norm_rgb'] = normals.to_rgb(format=normals_fmt, mask_value=.5).reshape(out_shape_rgb)\n\t\t\tout['norm_xyz'] = normals.to_xyz(format=normals_fmt).reshape(out_shape_rgb)\n\n\t\tif keypoints is not None:\n\t\t\tkps_2d = cameras.transform_points_screen(keypoints, image_size=self.image_size)[..., :2]\n\t\t\tout['kps'] = kps_2d\n\n\t\tif return_cameras:\n\t\t\tout['cameras'] = cameras\n\n\t\treturn out"
},
{
"identifier": "view_from",
"path": "FOUND/utils/renderer.py",
"snippet": "def view_from(view_kw='topdown', dist=.35):\n\tkws = ['topdown', 'side1', 'side2', 'toes', '45', '60']\n\n\tif isinstance(view_kw, str):\n\t\tview_kw = [view_kw]\n\n\tN = len(view_kw)\n\tR, T = torch.empty((N, 3, 3)), torch.empty((N, 3))\n\tfor n, v in enumerate(view_kw):\n\t\tassert v in kws or isinstance(v, int), f\"View description `{view_kw}` not understood\"\n\n\t\tdist, elev, azim, point = dist, 0, 0, ((0, 0, 0),)\n\t\tif v == 'topdown': elev = 0\n\t\tif v == 'side1': elev = 90\n\t\tif v == 'side2': elev, azim = -90, 180\n\t\tif v == 'toes': point = ((0.1, 0, 0),); dist = 0.1\n\t\tif isinstance(v, int):\n\t\t\telev = v\n\n\t\t_R, _T = look_at_view_transform(dist=dist, elev=elev, azim=azim, up=((1, 0, 0),), at=point)\n\n\t\tR[n] = _R\n\t\tT[n] = _T\n\n\treturn R, T"
},
{
"identifier": "produce_grid",
"path": "FOUND/utils/vis.py",
"snippet": "def produce_grid(entries):\n\t\"\"\"Receives list of lists, containing several possible data types. Converts them all to the correct RGB uint8 format, combines into a single image, and returns.\n\n\tAccepted formats:\n\tTensor, any device, >= 2 dims (will take first element in all above last 3), >= 3 channels (will take first 3) OR 1 channel (segmentation)\n\tnp.ndarray (same rules as tensor)\n\tNone - fill with blank\n\n\tPads all rows with black images if not enough elements\n\t\"\"\"\n\n\tif not isinstance(entries[0], list):\n\t\tentries = [entries] # convert to 2D list of lists\n\n\tM = max(map(len, entries))\n\n\tH, W = None, None\n\n\trows = []\n\tfor j, raw_row in enumerate(entries):\n\t\trow = []\n\t\tfor i, entry in enumerate(raw_row):\n\t\t\tif entry is None:\n\t\t\t\tentry = np.zeros((H, W, 3), dtype=np.uint8)\n\n\t\t\tentry = tens2rgb(entry)\n\n\t\t\tassert entry.ndim >= 2, f\"Arrays for grid must have >= 2 dimensions. Entry ({i}, {j}) has shape {entry.shape}.\"\n\t\t\tentry = reduce_ax(entry, 3) # reduce dimensions to just get a single image\n\n\t\t\t# handle segmentations\n\t\t\tif entry.shape[-1] > 4: # if last axis is clearly a width/height axis\n\t\t\t\tentry = seg_to_rgb(reduce_ax(entry, 2))\n\n\t\t\tentry = entry[..., :3] # only take first 3 channels\n\n\t\t\tif i == j == 0:\n\t\t\t\tH, W, _ = entry.shape\n\n\t\t\tentry = entry.astype(np.uint8)\n\t\t\trow.append(entry)\n\n\t\tfor i in range(M - len(raw_row)):\n\t\t\trow.append(np.zeros((H, W, 3), dtype=np.uint8)) # pad each row with black images if not enough items\n\n\t\t# stack the row images together\n\t\ttry:\n\t\t\trows.append(np.hstack(row))\n\t\texcept:\n\t\t\traise ValueError(\n\t\t\t\tf\"Could not combine row {j}, of raw shapes: {[x.shape for x in raw_row]}. Attempted conversion to shapes: {[x.shape for x in row]}\")\n\n\treturn np.vstack(rows)"
},
{
"identifier": "put_text",
"path": "FOUND/utils/vis.py",
"snippet": "def put_text(img, string, x, y, width, height, backg=(0,0,0), scale=1, vertical=False):\n\t\"\"\"Place text on an image, with top left corner (x,y), and a given width height.\n\tWhite text, black background fixed.\n\tVertical flag used to rotate 90 degrees anticlockwise\"\"\"\n\n\tout = img.copy()\n\tout[y:y+height, x:x+width] = get_text(string.split('\\n'), width, height, scale=scale, backg=backg, vertical=vertical)\n\treturn out"
},
{
"identifier": "colourbar",
"path": "FOUND/utils/vis.py",
"snippet": "def colourbar(width, height, colours, points=(0, 1), orientation='vertical'):\n\t\"\"\"Produce a colour bar of size width x height.\n\tAt each point in `points`, the colour at point along the horizontal/vertical (depending on `orientation`)\n\tmust be the corresponding colour in `colour`. Between points, linearly interpolate.\"\"\"\n\n\tassert len(colours) == len(points), \"Colours to points must be 1-1 correspondence for colourbar\"\n\tcolours = np.array(colours)\n\n\timg = np.zeros((height, width, 3))\n\tfor (c0, p0, c1, p1) in zip(colours, points, colours[1:], points[1:]):\n\t\tif orientation == 'vertical':\n\t\t\tv0, v1 = int(p0*height), int(p1*height)\n\t\t\timg[v0: v1] = c0[None, None, :] + np.linspace(0, 1, v1-v0)[:, None, None] * (c1 - c0)[None, None, :]\n\n\t\telse:\n\t\t\th0, h1 = int(p0 * width), int(p1 * width)\n\t\t\timg[:, h0:h1] = c0 + np.linspace(0, 1, h1 - h0) * (c1 - c0)\n\n\treturn img.astype(np.uint8)"
}
] | from pytorch3d.renderer import TexturesVertex
from pytorch3d.structures import Meshes
from multiprocessing import Process
from prettytable import PrettyTable
from .pytorch3d import modified_chamf, modified_sample
from .renderer import Renderer, view_from
from .vis import produce_grid, put_text, colourbar
from matplotlib import pyplot as plt
import os
import trimesh
import cv2
import multiprocessing as mp
import torch
import torch.nn.functional as F
import numpy as np
import json | 8,570 | if settings.get('model', 'FIND') == 'FIND':
# apply masking here to not include errors for sole in pred -> real
# errs has a sample of the vertices in, need to do correct indexing
sampled_vertex_mask = FIND_sole_faces_mask[pred_sample_dict['face_idxs'].cpu().detach().numpy()[0]]
errs['cham_x'][:, sampled_vertex_mask] = np.nan
errs['cham_norm_x'][:, sampled_vertex_mask] = np.nan
# vis_errs has all vertices in mesh in
vis_errs['cham_x'][:, FIND_sole_vertex_mask] = np.nan
vis_errs['cham_norm_x'][:, FIND_sole_vertex_mask] = np.nan
# visualisation info for each metric of error
vis_params = {
'cham': dict(vmin=0, vmax=1e-4, mag=1_000_000, units='um', cutoffs=np.array([5, 10, 15, 20, 25])*1e-6, xscale='log'),
'cham_norm': dict(vmin=0, vmax=60, mag=1, units='deg', cutoffs=[5, 7.5, 11.25, 22.5, 30], xscale='lin')
}
# define axes
fig, axs = plt.subplots(nrows=2, ncols=2, sharex='col')
axs[0, 0].set_title('Chamfer Error')
axs[0, 1].set_title('Normal Error')
axs[0, 0].set_ylabel('pred2real')
axs[1, 0].set_ylabel('real2pred')
axs[1, 0].set_xlabel('um')
axs[1, 1].set_xlabel('Degrees')
axs[1,1].set_xlim(0, 90)
axs[1, 1].set_yticks([0, 30, 60, 90])
with Reporter(os.path.join(eval_dir, 'report.txt')) as report:
report(f"Experiment: {exp_dir}")
i = 0
for L in ['cham', 'cham_norm']:
report(L)
table = PrettyTable()
cutoffs = vis_params[L]['cutoffs']
mag = vis_params[L]['mag']
table.field_names = ['Desc', 'Mean', 'Median', 'RMSE'] + [f'% < {round(x*mag)}' for x in cutoffs]
for desc, x in zip(['pred2real', 'real2pred'], ['x', 'y']):
e = errs[f'{L}_{x}'].cpu().detach().numpy()
e = e[~np.isnan(e)] # filter out nan values
metrics = eval_metrics(e, cutoffs=cutoffs)
table.add_row([desc] +
[f'{metrics[k] * mag:.2f}' for k in ['mean', 'median', 'rmse']] +
[f'{i * 100:.1f}' for i in metrics['cutoffs']]
)
# plot distribution of errors
ax = axs[i%2, i//2]
if vis_params[L]['xscale'] == 'log':
ax.hist(**get_loghist(np.ravel(e)*mag, 100), density=True)
ax.set_xscale('log')
else:
ax.hist(np.ravel(e) * mag, bins=100, density=True)
i+=1
results[f'{L}_{desc}'] = {**{k: metrics[k] * mag for k in ['mean', 'median', 'rmse']},
**{f'% < {round(c*mag)}': i * 100 for c, i in zip(cutoffs, metrics['cutoffs'])}}
report(table.get_string())
report("")
plt.savefig(os.path.join(eval_dir, 'err_dist.png'))
plt.close()
# Set up rendering
if render:
renderer: Renderer = Renderer(image_size=256, max_faces_per_bin=100_000, device=device)
R, T = view_from(['side1', 'topdown', 'side2'])
nviews = len(R)
vis_elements = []
# render chamf & norm err on GT mesh and pred mesh
for i, (mesh, err_key) in enumerate(zip([gt_mesh, pred_mesh, gt_mesh, pred_mesh],
['cham_y', 'cham_x', 'cham_norm_y', 'cham_norm_x'])):
vis_type = 'cham_norm' if 'norm' in err_key else 'cham'
# set texture according to error
this_error = vis_errs[err_key]
colours = err_to_colour(this_error, vmin=vis_params[vis_type]['vmin'], vmax=vis_params[vis_type]['vmax'])
mesh.textures = TexturesVertex(colours)
res = renderer(mesh, R, T, render_normals=False, render_sil=False) # render mesh
# add to plot
vis_elements.append([res['rgb'][n] for n in range(nviews)])
grid = produce_grid(vis_elements)
gridH, gridW, _ = grid.shape
left_size = gridH // 8 # size of left padding in pix
right_size = gridH // 8 # right size padding for colourbar
out = np.zeros((gridH, left_size + gridW + right_size, 3), dtype=np.uint8)
out[:, left_size:-right_size] = grid
# write row names
row_names = 'Chamf\nGT', 'Chamf\nFIND', 'Norm\nGT', 'Norm\nFIND'
for i in range(4):
out = put_text(out, row_names[i],
x=0, y=int(gridH*i/4), width=int(left_size), height=int(gridH//4), scale=left_size / 100,
vertical=True)
# add colourbars
# width, height, colours, points = (0, 1), orientation = 'vertical'
cW, cH = right_size//2, int(gridH*0.3)
cbar_x = left_size + gridW + (right_size - cW) // 2
cbar_ys = [int(0.1 * gridH), int(0.6*gridH)]
for key, y in zip(['cham', 'cham_norm'], cbar_ys):
params = vis_params[key]
| """Evaluate the performance of a fitted mesh"""
device = 'cuda'
def eval_metrics(arr, cutoffs=[5, 7.5, 11.25, 22.5, 30]):
"""Given a 1d array, return mean, median, rmse,
and % of values less than each in `cutoffs`"""
assert arr.ndim == 1, "eval_metrics requires 1D array"
out = dict(mean = arr.mean(), median = np.median(arr), rmse = (arr ** 2).mean() **.5,
cutoffs = [(arr < i).mean() for i in cutoffs])
return out
def err_to_colour(err: torch.Tensor, vmin:float=None, vmax:float=None, colmin=(0, 1, 0), colmax=(1, 0, 0), nan_colour=(0.3, 0.3, 0.3)):
"""Convert a tensor of errors (...) to an RGB colour scale (..., 3).
Linearly interpolate so that err of vmin -> colmin, err of vmax -> colmax
if vmin and vmax not given, take min and max of err
If any nan's given, set their colour to nan_colour
"""
ndim = err.ndim
colmin = torch.tensor(colmin)[(None,)*ndim].to(err.device) # expand colmin to [..., 3]
colmax = torch.tensor(colmax)[(None,)*ndim].to(err.device)
colnan = torch.tensor(nan_colour)[(None,)*ndim].to(err.device)
vmin = err.nanmin() if vmin is None else vmin
vmax = err.nanmax() if vmax is None else vmax
fracs = (err - vmin) / (vmax - vmin)
rgba = (colmin + fracs.unsqueeze(-1) * (colmax - colmin)).to(err.device)
rgba = torch.clip(rgba, min=0, max=1)
rgba[torch.any(torch.isnan(rgba), dim=-1)] = colnan
return rgba
class Reporter:
"""Receive statements, on exit print all and save all to file"""
def __init__(self, out_file_loc):
self.lines = []
self.out_file_loc = out_file_loc
def __call__(self, line):
self.lines.append(line)
def __enter__(self, *args):
return self
def __exit__(self, *args):
[*map(print, self.lines)]
with open(self.out_file_loc, 'w') as outfile:
outfile.writelines([s + '\n' for s in self.lines])
def get_max_fit(exp_dir):
"""Search in an experiment directory for the fit_xx.obj with the highest value"""
f = lambda s: -1 if 'fit_' not in s else int(s.split('fit_')[1].split('.obj')[0])
return max(os.listdir(exp_dir), key=f)
def cutoff_slice_FIND(mesh, max_heel_height = 0.04, cutoff_height = 0.1):
"""Similar mesh slicing method to FIND: identify heel keypoint, slice off 1cm above"""
X, Y, Z = mesh.vertices.T
Xma = np.ma.array(X, mask= Z >= max_heel_height)
heel_idx = np.ma.argmin(Xma)
slice_height = min(Z[heel_idx] + cutoff_height, Z.max() - 5e-3)
return mesh.slice_plane([0, 0, slice_height], [0, 0, -1], cap=False)
def get_loghist(x, nbins):
hist, bins = np.histogram(x, bins=nbins)
logbins = np.logspace(np.log10(bins[0]),np.log10(bins[-1]),len(bins))
return dict(x=x, bins=logbins)
def eval_exp(exp_dir, render=True):
results = {} # return results as errors
if not any('fit_' in f for f in os.listdir(exp_dir)):
print(f"No fits for {exp_dir}, skipping...")
return
pred_obj_loc = os.path.join(exp_dir, get_max_fit(exp_dir))
# load settings to get folder
opts_loc = os.path.join(exp_dir, 'opts.json')
if not os.path.isfile(opts_loc):
print(f"No opts for {exp_dir}, skipping...")
return
with open(opts_loc) as infile:
settings = json.load(infile)
# assume GT OBJ loc is
# (1) saved in <data_folder>/mesh.obj if <data_folder> given
if 'data_folder' in settings:
gt_obj_loc = os.path.join(settings['data_folder'], 'mesh.obj')
# (2) saved in <exp_dir>/gt_mesh.obj otherwise
else:
gt_obj_loc = os.path.join(exp_dir, 'gt_mesh.obj')
eval_dir = os.path.join(exp_dir, 'eval')
os.makedirs(eval_dir, exist_ok=True)
with open(gt_obj_loc) as infile:
d = trimesh.exchange.obj.load_obj(infile, process=False)
gt_mesh_trimesh = trimesh.Trimesh(**d)
with open(pred_obj_loc) as infile:
d = trimesh.exchange.obj.load_obj(infile, process=False)
pred_mesh_trimesh = trimesh.Trimesh(**d)
# pre-process meshes, w/ cutoff
# Same method as used for Foot3D here for slicing GT
gt_mesh_trimesh = cutoff_slice_FIND(gt_mesh_trimesh)
if settings.get('model', 'FIND') == 'FIND':
# slice FIND faces
FIND_cutoff_surface = np.load(os.path.join(settings['find_pth'], 'templ_masked_faces.npy'))
FIND_sole_faces = np.load(os.path.join(settings['find_pth'], 'templ_sole_faces.npy'))
FIND_sole_verts = np.unique(np.ravel(pred_mesh_trimesh.faces[FIND_sole_faces])) # all vertices considered part of the sole
sole_vert_positions = pred_mesh_trimesh.vertices[FIND_sole_verts] # save sole vertex positions to refind them after mesh pre-processing
pred_mesh_trimesh.update_faces(~np.isin(np.arange(pred_mesh_trimesh.faces.shape[0]), FIND_cutoff_surface))
pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh)
# define a mask
# want to be able to define a mask on the FIND model, so that errors of verts in this mask aren't considered real -> pred, but are considered in reverse
# (for sole verts, unfair to count the error on them, but likewise incorrect to just remove them all, especially at the boundary)
# recalculate sole vertices
FIND_sole_vert_idxs = np.argwhere(np.all(pred_mesh_trimesh.vertices[:, None, :] == sole_vert_positions[None, ...], axis=-1))[:, 0]
FIND_sole_vertex_mask = np.isin(np.arange(pred_mesh_trimesh.vertices.shape[0]), FIND_sole_vert_idxs) # mask of which vertices correspond to the sole
FIND_sole_faces_mask = np.any(FIND_sole_vertex_mask[pred_mesh_trimesh.faces], axis=-1) # mask of which faces are in sole
else:
pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh)
# Convert to PyTorch3D
p3d_from_trimesh = lambda mesh: Meshes(verts=torch.from_numpy(np.asarray(mesh.vertices)[None, ...]).float(),
faces=torch.from_numpy(np.asarray(mesh.faces)[None, ...])).to(device)
gt_mesh = p3d_from_trimesh(gt_mesh_trimesh)
pred_mesh = p3d_from_trimesh(pred_mesh_trimesh)
# Sample vertices uniformly from mesh, returning vertex position, normal, and original face/vert idxs
gt_sample_dict = modified_sample(gt_mesh, num_samples=10_000, return_normals=True)
pred_sample_dict = modified_sample(pred_mesh, num_samples=10_000, return_normals=True)
# Calculate errors for reporting - by considering samples over the surface
errs = modified_chamf(pred_sample_dict['verts'], gt_sample_dict['verts'],
x_normals=pred_sample_dict['normals'], y_normals=gt_sample_dict['normals'])
# Calculate errors for visualisation - by considering every vertex
vis_errs = modified_chamf(pred_mesh.verts_padded(), gt_mesh.verts_padded(),
x_normals=pred_mesh.verts_normals_padded(), y_normals=gt_mesh.verts_normals_padded())
# convert from cosine similarity to error in degrees
errs['cham_norm_x'] = torch.rad2deg(torch.acos(errs['cham_norm_x']))
errs['cham_norm_y'] = torch.rad2deg(torch.acos(errs['cham_norm_y']))
vis_errs['cham_norm_x'] = torch.rad2deg(torch.acos(vis_errs['cham_norm_x']))
vis_errs['cham_norm_y'] = torch.rad2deg(torch.acos(vis_errs['cham_norm_y']))
if settings.get('model', 'FIND') == 'FIND':
# apply masking here to not include errors for sole in pred -> real
# errs has a sample of the vertices in, need to do correct indexing
sampled_vertex_mask = FIND_sole_faces_mask[pred_sample_dict['face_idxs'].cpu().detach().numpy()[0]]
errs['cham_x'][:, sampled_vertex_mask] = np.nan
errs['cham_norm_x'][:, sampled_vertex_mask] = np.nan
# vis_errs has all vertices in mesh in
vis_errs['cham_x'][:, FIND_sole_vertex_mask] = np.nan
vis_errs['cham_norm_x'][:, FIND_sole_vertex_mask] = np.nan
# visualisation info for each metric of error
vis_params = {
'cham': dict(vmin=0, vmax=1e-4, mag=1_000_000, units='um', cutoffs=np.array([5, 10, 15, 20, 25])*1e-6, xscale='log'),
'cham_norm': dict(vmin=0, vmax=60, mag=1, units='deg', cutoffs=[5, 7.5, 11.25, 22.5, 30], xscale='lin')
}
# define axes
fig, axs = plt.subplots(nrows=2, ncols=2, sharex='col')
axs[0, 0].set_title('Chamfer Error')
axs[0, 1].set_title('Normal Error')
axs[0, 0].set_ylabel('pred2real')
axs[1, 0].set_ylabel('real2pred')
axs[1, 0].set_xlabel('um')
axs[1, 1].set_xlabel('Degrees')
axs[1,1].set_xlim(0, 90)
axs[1, 1].set_yticks([0, 30, 60, 90])
with Reporter(os.path.join(eval_dir, 'report.txt')) as report:
report(f"Experiment: {exp_dir}")
i = 0
for L in ['cham', 'cham_norm']:
report(L)
table = PrettyTable()
cutoffs = vis_params[L]['cutoffs']
mag = vis_params[L]['mag']
table.field_names = ['Desc', 'Mean', 'Median', 'RMSE'] + [f'% < {round(x*mag)}' for x in cutoffs]
for desc, x in zip(['pred2real', 'real2pred'], ['x', 'y']):
e = errs[f'{L}_{x}'].cpu().detach().numpy()
e = e[~np.isnan(e)] # filter out nan values
metrics = eval_metrics(e, cutoffs=cutoffs)
table.add_row([desc] +
[f'{metrics[k] * mag:.2f}' for k in ['mean', 'median', 'rmse']] +
[f'{i * 100:.1f}' for i in metrics['cutoffs']]
)
# plot distribution of errors
ax = axs[i%2, i//2]
if vis_params[L]['xscale'] == 'log':
ax.hist(**get_loghist(np.ravel(e)*mag, 100), density=True)
ax.set_xscale('log')
else:
ax.hist(np.ravel(e) * mag, bins=100, density=True)
i+=1
results[f'{L}_{desc}'] = {**{k: metrics[k] * mag for k in ['mean', 'median', 'rmse']},
**{f'% < {round(c*mag)}': i * 100 for c, i in zip(cutoffs, metrics['cutoffs'])}}
report(table.get_string())
report("")
plt.savefig(os.path.join(eval_dir, 'err_dist.png'))
plt.close()
# Set up rendering
if render:
renderer: Renderer = Renderer(image_size=256, max_faces_per_bin=100_000, device=device)
R, T = view_from(['side1', 'topdown', 'side2'])
nviews = len(R)
vis_elements = []
# render chamf & norm err on GT mesh and pred mesh
for i, (mesh, err_key) in enumerate(zip([gt_mesh, pred_mesh, gt_mesh, pred_mesh],
['cham_y', 'cham_x', 'cham_norm_y', 'cham_norm_x'])):
vis_type = 'cham_norm' if 'norm' in err_key else 'cham'
# set texture according to error
this_error = vis_errs[err_key]
colours = err_to_colour(this_error, vmin=vis_params[vis_type]['vmin'], vmax=vis_params[vis_type]['vmax'])
mesh.textures = TexturesVertex(colours)
res = renderer(mesh, R, T, render_normals=False, render_sil=False) # render mesh
# add to plot
vis_elements.append([res['rgb'][n] for n in range(nviews)])
grid = produce_grid(vis_elements)
gridH, gridW, _ = grid.shape
left_size = gridH // 8 # size of left padding in pix
right_size = gridH // 8 # right size padding for colourbar
out = np.zeros((gridH, left_size + gridW + right_size, 3), dtype=np.uint8)
out[:, left_size:-right_size] = grid
# write row names
row_names = 'Chamf\nGT', 'Chamf\nFIND', 'Norm\nGT', 'Norm\nFIND'
for i in range(4):
out = put_text(out, row_names[i],
x=0, y=int(gridH*i/4), width=int(left_size), height=int(gridH//4), scale=left_size / 100,
vertical=True)
# add colourbars
# width, height, colours, points = (0, 1), orientation = 'vertical'
cW, cH = right_size//2, int(gridH*0.3)
cbar_x = left_size + gridW + (right_size - cW) // 2
cbar_ys = [int(0.1 * gridH), int(0.6*gridH)]
for key, y in zip(['cham', 'cham_norm'], cbar_ys):
params = vis_params[key] | cbar = colourbar(cW, cH, colours=((255, 0, 0), (0, 255, 0))) | 6 | 2023-10-24 11:46:42+00:00 | 12k |
RobertCsordas/moe | tasks/simple/language_model/enwik8_transformer.py | [
{
"identifier": "TransformerLanguageModel",
"path": "models/transformer_language_model.py",
"snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: bool, layers: List[torch.nn.Module], n_prev_states: int,\n n_prev_states_test: Optional[int] = None, adaptive_cutoffs: List[int] = [],\n same_length_eval: bool = True, norm_before_output: bool = False,\n p_drop_layer: float = 0.0, use_last_state: bool = False, same_length: bool = False,\n output_mode: str = \"normal\"):\n\n super().__init__()\n\n self.embedding = torch.nn.Embedding(voc_size, embedding_size or state_size)\n # with torch.no_grad():\n # self.embedding.weight.uniform_(-0.1, 0.1)\n\n torch.nn.init.xavier_uniform_(self.embedding.weight)\n\n self.shared_layers = all([la is layers[0] for la in layers])\n\n if embedding_size is None:\n self.embedding_adapter = lambda x: x\n else:\n self.embedding_adapter = torch.nn.Linear(embedding_size, state_size)\n\n self.dropout = torch.nn.Dropout(dropout)\n self.layers = torch.nn.ModuleList(layers)\n self.output_adapter = lambda x: x\n self.n_prev_states = n_prev_states\n self.n_prev_states_test = n_prev_states_test or n_prev_states\n self.same_length_eval = same_length_eval\n self.embedding_scale = math.sqrt(state_size)\n self.p_drop_layer = p_drop_layer\n self.use_last_state = use_last_state\n self.same_length = same_length\n self.iter = 0\n self.output_mode = output_mode\n\n assert self.output_mode in {\"normal\", \"sum\", \"geometric\", \"sigmoid\"}\n\n if self.output_mode in {\"geometric\", \"sigmoid\"}:\n self.output_gate = torch.nn.Linear(state_size, 1)\n\n self.adaptive = bool(adaptive_cutoffs)\n\n out_proj_size = (embedding_size or state_size) if tied_embedding else state_size\n if self.adaptive:\n self.output = framework.layers.CustomAdaptiveLogSoftmaxWithLoss(\n out_proj_size, voc_size, adaptive_cutoffs, div_value=1,\n tied_to=self.embedding if tied_embedding else None)\n else:\n self.output = torch.nn.Linear(out_proj_size, voc_size)\n\n if norm_before_output or self.output_mode in {\"sum\", \"sigmoid\"}:\n self.out_norm = torch.nn.LayerNorm(state_size)\n else:\n self.out_norm = lambda x: x\n\n if tied_embedding:\n if not self.adaptive:\n self.output.weight = self.embedding.weight\n if embedding_size is not None:\n self.output_adapter = torch.nn.Linear(state_size, embedding_size)\n\n @staticmethod\n def generate_history_mask(sz: int, device: torch.device) -> torch.Tensor:\n return torch.tril(torch.ones(sz, sz, dtype=torch.bool, device=device), diagonal=-1)\n\n def gen_output(self, x: torch.Tensor, target: Optional[torch.Tensor]) -> torch.Tensor:\n net = self.out_norm(x)\n net = self.output_adapter(net)\n net = self.dropout(net)\n\n if self.adaptive:\n net = self.output(net.transpose(0, 1), target)\n else:\n net = self.output(net.transpose(0, 1))\n\n return net\n\n def accumulate_output(self, features: List[torch.Tensor]) -> torch.Tensor:\n if self.output_mode == \"sum\":\n return sum(features)\n elif self.output_mode in {\"geometric\", \"sigmoid\"}:\n # Must cast it to float16, otherwise pytorch will crash after a few hundred iterations with an\n # incomprehensible error in the gradient scaler\n gates = torch.sigmoid(torch.cat([self.output_gate(f).float() for f in features], -1))\n if self.output_mode == \"geometric\":\n ngates = torch.cumprod(1.0 - gates, -1)\n scores = torch.cat([gates[..., 0:1], gates[..., 1:] * ngates[..., :-1]], -1)\n else:\n scores = gates\n\n if self.iter % 100 == 0 and self.training:\n self.log(\"output_gate_mean\", framework.visualize.plot.Barplot(scores.flatten(end_dim=-2).mean(0)))\n # return sum(f * scores[..., i: i+1] for i, f in enumerate(features))\n f = scores.unsqueeze(-2) @ torch.stack(features, -2)\n return f.squeeze(-2)\n else:\n assert False, \"Invalid output mode\"\n\n def forward(self, x: torch.Tensor, target: Optional[torch.Tensor], state) -> Tuple[torch.Tensor, Any]:\n causality_mask = Transformer.generate_square_subsequent_mask(x.shape[0], x.device)\n\n net = self.dropout(self.embedding(x.T.long()))\n net = self.embedding_adapter(net)\n net = net * self.embedding_scale\n\n new_state = []\n features = [net]\n\n n_prev_states = self.n_prev_states if self.training else self.n_prev_states_test\n\n same_length = self.same_length or ((not self.training) and self.same_length_eval)\n if same_length and state is not None:\n causality_mask = [self.generate_history_mask(x.shape[0], x.device)] + \\\n [torch.zeros_like(causality_mask)] * (len(state[0]) - 1) + [causality_mask]\n causality_mask = torch.cat(causality_mask, -1)\n\n plot_cossim = (self.iter % 100 == 0 and self.training)\n for li, l in enumerate(self.layers):\n if n_prev_states > 0:\n if li == 0:\n # Pos offset should be constant for all layers\n pos_offset = sum(s.shape[1] for s in state[0]) if state is not None else 0\n\n # Concatenate the new state with the previous states\n li_r = 0 if self.use_last_state else li\n s = (state[li_r] + [net]) if state is not None else [net]\n attend_to = torch.cat(s, 1)\n\n if not self.use_last_state:\n s[-1] = s[-1].detach()\n new_state.append(s[-n_prev_states:])\n else:\n pos_offset = None\n attend_to = None\n\n net_o = l(net, mask=AttentionMask(None, causality_mask), attend_to=attend_to,\n pos_offset=pos_offset)\n\n if plot_cossim or self.output_mode != \"normal\":\n features.append(net_o)\n\n with torch.no_grad():\n ndiff = torch.norm(net_o - net, p=2, dim=-1)\n n_in = torch.norm(net, p=2, dim=-1)\n self.log(f\"activation_norm/abs_update_layer_{li}\", ndiff.mean())\n self.log(f\"activation_norm/in_layer_{li}\", n_in.mean())\n self.log(f\"activation_norm/rel_update_layer_{li}\", (ndiff/n_in.clamp(min=torch.finfo(n_in.dtype).eps)).mean())\n\n if self.training and self.p_drop_layer > 0.0:\n net = torch.where(torch.rand_like(net_o[..., 0:1]) < self.p_drop_layer, net, net_o)\n else:\n net = net_o\n\n if self.use_last_state and n_prev_states > 0:\n # If we carry over the last state, save it here\n new_state = [((state[0] if state is not None else []) + [net.detach()])[-n_prev_states:]]\n\n if self.output_mode != \"normal\":\n net = self.accumulate_output(features)\n\n if plot_cossim:\n with torch.no_grad():\n f_sample = [f.view(-1, f.shape[-1])[:1024] for f in features]\n f_sample_all = torch.stack(f_sample, -2)\n scores = framework.utils.cossim(f_sample_all, f_sample_all).mean(0)\n self.log(\"feature_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n if self.output_mode != \"normal\":\n f_sample = [self.accumulate_output(f_sample[:i]) for i in range(1, len(f_sample)+1)]\n f_sample_all = torch.stack(f_sample, -2)\n\n outs = F.softmax(self.gen_output(f_sample_all, target).transpose(0, 1), -1)\n scores = framework.utils.cossim(outs, outs).mean(0)\n self.log(\"out_dist_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n real_out = outs[:, -1]\n for i in range(outs.shape[-2] - 1):\n self.log(f\"out_diff_{i}\", (outs[:, i] - real_out).norm(dim=-1, p=1).mean())\n\n del outs\n del features\n\n net = self.gen_output(net, target)\n self.iter += 1\n\n return net, new_state"
},
{
"identifier": "task",
"path": "tasks/task_db.py",
"snippet": "def task(name: Optional[str] = None):\n def wrapper(cls):\n n = TASK_PREFIX + (name or camel_to_snake(cls.__name__))\n assert n not in TASKS, f\"Task {n} already exists\"\n TASKS[n] = cls\n return cls\n return wrapper"
},
{
"identifier": "args",
"path": "tasks/task_db.py",
"snippet": "def args(fn):\n global ARGS_REGISTERS\n ARGS_REGISTERS.append(fn)\n return fn"
},
{
"identifier": "TransformerLMMixin",
"path": "tasks/simple/language_model/transformer_lm_mixin.py",
"snippet": "class TransformerLMMixin:\n helper: framework.helpers.TrainingHelper\n\n def is_preln(self) -> bool:\n return \"preln\" in self.helper.args.transformer.variant\n\n def topk_activation(self, x: torch.Tensor) -> torch.Tensor:\n nx = -x\n return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0)\n\n def get_layers(self) -> List[torch.nn.Module]:\n # pyright: reportOptionalMemberAccess=false\n if self.helper.args.transformer.activation == \"relu\":\n activation = F.relu\n elif self.helper.args.transformer.activation == \"topk\":\n activation = self.topk_activation\n elif self.helper.args.transformer.activation == \"identity\":\n activation = lambda x: x\n elif self.helper.args.transformer.activation == \"sigmoid\":\n activation = torch.sigmoid\n elif self.helper.args.transformer.activation == \"gelu\":\n activation = F.gelu\n elif self.helper.args.transformer.activation == \"softmax\":\n activation = lambda x: F.softmax(x, dim=-1)\n else:\n raise ValueError(f\"Invalid activation: {self.helper.args.transformer.activation}\")\n\n base_args = dict(\n d_model=self.helper.args.state_size,\n nhead=self.helper.args.transformer.n_heads,\n dim_feedforward=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier),\n dropout=self.helper.args.dropout,\n activation=activation\n )\n\n\n extra_args = {} if not self.helper.args.transformer.variant.endswith(\"_gelu\") else {\n \"activation\": F.gelu,\n \"drop_expand\": False\n }\n\n\n if self.helper.args.transformer.variant in {\"preln_relative\"}:\n mklayer = lambda: PrelnRelativeTransformerEncoderLayer(\n **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp,\n n_layers=self.helper.args.transformer.encoder_n_layers,\n head_projection_size=self.helper.args.transformer.head_projection_size,)\n elif self.helper.args.transformer.variant in {\"preln_topk\"}:\n mklayer = lambda: TopkTransformer(\n **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp,\n n_layers=self.helper.args.transformer.encoder_n_layers, k=self.helper.args.transformer.topk_value,\n use_norm=self.helper.args.transformer.topk_use_norm,\n head_projection_size=self.helper.args.transformer.head_projection_size,)\n elif self.helper.args.transformer.variant in {\"preln_kvmem\"}:\n mklayer = lambda: PrelnRelativeKVMemTransformerEncoderLayer(\n **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp,\n n_layers=self.helper.args.transformer.encoder_n_layers, n_keys=self.helper.args.pkm.n_keys,\n pkm_stochastic=self.helper.args.pkm.stochastic, pkm_heads=self.helper.args.pkm.n_heads,\n pkm_custom_init=self.helper.args.pkm.custom_init, pkm_slice_values=self.helper.args.pkm.slice_values,\n pkm_knn=self.helper.args.pkm.knn, linproj=self.helper.args.kvmem.linproj,\n head_merge_topk=self.helper.args.kvmem.head_merge_topk,\n load_balance=self.helper.args.kvmem.load_balance, kvmem_dropout=self.helper.args.kvmem.dropout,\n kvmem_randomize_indices=self.helper.args.kvmem.randomize_indices,\n kvmem_query_bias=self.helper.args.kvmem.query_bias,\n standard_parallel=self.helper.args.kvmem.standard_parallel,\n approx_topk=self.helper.args.kvmem.approx_topk,\n factorize=self.helper.args.kvmem.factorize,\n full_key=self.helper.args.kvmem.full_key,\n key_redundancy_factor=self.helper.args.kvmem.key_redundancy_factor,\n two_stage=self.helper.args.kvmem.two_stage,\n head_exclusive=self.helper.args.kvmem.head_exclusive,\n head_projection_size=self.helper.args.transformer.head_projection_size,)\n elif self.helper.args.transformer.variant in {\"preln_moe\", \"preln_moe_universal\", \"moe\", \"moe_universal\"}:\n # def __init__(self, d_model, nhead, n_bins: int, bin_size: int, n_layers: int, dim_feedforward=2048,\n mklayer = lambda: RelativeMoeTransformerEncoderLayer(\n **base_args, **extra_args, preln=self.is_preln(),\n test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp,\n n_layers=self.helper.args.transformer.encoder_n_layers,\n standard_parallel=self.helper.args.kvmem.standard_parallel,\n custom_init=self.helper.args.pkm.custom_init,\n n_experts=self.helper.args.moe.n_experts,\n expert_size=self.helper.args.moe.expert_size,\n dropout_mode=self.helper.args.kvmem.dropout,\n knn=self.helper.args.pkm.knn,\n selection_mode=self.helper.args.moe.selection_mode,\n perplexity_reg=self.helper.args.moe.perplexity_reg,\n key_mode=self.helper.args.moe.key_mode,\n half_key=self.helper.args.moe.half_key,\n n_heads=self.helper.args.pkm.n_heads,\n norm_keys=self.helper.args.moe.norm_keys,\n perplexity_reg_mode=self.helper.args.moe.perplexity_reg_mode,\n n_random=self.helper.args.moe.n_random,\n reg_type=self.helper.args.moe.reg_type,\n std_correction=self.helper.args.moe.std_correction,\n topk_mode=self.helper.args.moe.topk_mode,\n head_projection_size=self.helper.args.transformer.head_projection_size,\n activation_after_topk=self.helper.args.moe.activation_after_topk,\n weight_grouping=self.helper.args.moe.weight_grouping,\n kmeans_distance=self.helper.args.moe.kmeans_distance,\n drop_parallel=self.helper.args.moe.drop_parallel,\n block_expert_sel_in_grad=self.helper.args.moe.block_expert_sel_in_grad,\n mlp_selection=self.helper.args.moe.mlp_selection,\n classification_target=self.helper.args.moe.classification_target,\n norm_key_init=self.helper.args.moe.norm_key_init,\n normalize_expert_sel_init=self.helper.args.moe.norm_expert_sel_init,\n norm_value_init=self.helper.args.moe.norm_value_init,\n norm_standard_parallel_values=self.helper.args.moe.norm_standard_parallel_values,\n identical_init=self.helper.args.moe.identical_init,\n topological_sel_reg=self.helper.args.moe.topological_sel_reg,\n topological_expert_reg=self.helper.args.moe.topological_expert_reg,\n gumbel_select_only=self.helper.args.moe.gumbel_select_only,\n topk_value_norm_compensation=self.helper.args.moe.topk_value_norm_compensation,\n norm_expert_scores=self.helper.args.moe.norm_expert_scores,\n sel_input_cluster_init=self.helper.args.moe.sel_input_cluster_init,\n init_norm_mode=self.helper.args.moe.init_norm_mode,\n sel_bias=self.helper.args.moe.sel_bias,\n bias=self.helper.args.moe.bias,\n rescale_normed=self.helper.args.moe.rescale_normed,\n sel_norm=self.helper.args.moe.sel_norm,\n rescale_grads=self.helper.args.moe.rescale_grads,\n gumbel_decay=self.helper.args.moe.gumbel_decay,\n ln_affine=self.helper.args.transformer.ln_affine,\n sinkhorn_local=self.helper.args.moe.sinkhorn_local,\n sinkhorn_n_iters=self.helper.args.moe.sinkhron_n_iters,\n moe_dropout_factor=self.helper.args.moe.dropout_factor,\n drop_expert=self.helper.args.moe.drop_expert,\n expert_size_init=self.helper.args.moe.expert_size_init,\n sync_distributed=self.helper.args.moe.sync_distributed,\n modulation_amplitude=self.helper.args.moe.modulation_amplitude,\n invisible_selection=self.helper.args.moe.invisible_selection,\n slope_multiplier=self.helper.args.moe.slope_multiplier,\n moe_init_scale=self.helper.args.moe.init_scale)\n else:\n assert False, \"Invalid variant\"\n\n layers = [mklayer() for _ in range(self.helper.args.transformer.encoder_n_layers)]\n return layers\n\n\n def fix_init(self, model):\n init_std = 0.02\n\n torch.nn.init.normal_(model.embedding.weight, 0.0, init_std)\n # torch.nn.init.normal_(model.embedding_adapter.weight, 0.0, init_std)\n\n initialized = 0\n for m in model.modules():\n if isinstance(m, (torch.nn.Linear, torch.nn.Embedding)) and hasattr(m, \"weight\"):\n torch.nn.init.normal_(m.weight, 0.0, init_std)\n initialized += m.weight.numel()\n if isinstance(m, (torch.nn.Linear, torch.nn.LayerNorm)) and m.bias is not None:\n torch.nn.init.constant_(m.bias, 0)\n initialized += m.bias.numel()\n if isinstance(m, (torch.nn.LayerNorm)) and m.weight is not None:\n torch.nn.init.normal_(m.weight, 1.0, init_std)\n initialized += m.weight.numel()\n if isinstance(m, MoE):\n torch.nn.init.normal_(m.keys, 0.0, init_std)\n torch.nn.init.normal_(m.values, 0.0, init_std)\n if m.expert_sel is not None:\n torch.nn.init.normal_(m.expert_sel, 0.0, init_std)\n initialized += m.expert_sel.numel()\n initialized += m.keys.numel() + m.values.numel()\n\n print(f\"Reinitialized {initialized/self.n_weights*100:.3f}% weights\")\n\n def create_model(self) -> torch.nn.Module:\n # pyright: reportOptionalMemberAccess=false\n tlayers = self.get_layers()\n\n if self.helper.args.transformer.output_mode != \"normal\" and self.is_preln():\n raise ValueError(\"accumulated_output not supported with pre-ln\")\n\n model = TransformerLanguageModel(\n len(self.train_set.vocabulary), self.helper.args.embedding_size,\n self.helper.args.state_size, self.helper.args.dropout,\n tied_embedding=self.helper.args.tied_embedding,\n layers=tlayers, n_prev_states=self.helper.args.lm.trafo.context_blocks,\n n_prev_states_test=self.helper.args.lm.trafo.test_context_blocks,\n same_length_eval=self.helper.args.lm.trafo.same_length_eval,\n p_drop_layer=self.helper.args.transformer.p_drop_layer,\n same_length=self.helper.args.lm.trafo.same_length,\n use_last_state=self.helper.args.lm.trafo.last_layer_context,\n norm_before_output=self.is_preln(), output_mode=self.helper.args.transformer.output_mode,)\n\n self.n_weights = sum(p.numel() for p in model.parameters())\n\n with torch.no_grad():\n if self.is_preln():\n model.embedding_scale = 1.0\n elif self.helper.args.lm.trafo.xl_init:\n self.fix_init(model)\n elif self.helper.args.lm.trafo.embedding_mode_init==\"scale_to_sqrt_dmodel\":\n norm = model.embedding.weight.norm(dim=-1).mean()\n model.embedding_scale = math.sqrt(self.helper.args.state_size) / norm\n elif self.helper.args.lm.trafo.embedding_mode_init==\"one_and_scale_to_sqrt_dmodel\":\n norm = model.embedding.weight.norm(dim=-1).mean()\n model.embedding_scale = math.sqrt(self.helper.args.state_size)\n model.embedding.weight.mul_(1.0 / norm)\n elif self.helper.args.lm.trafo.embedding_mode_init==\"init_to_sqrt_dmodel\":\n norm = model.embedding.weight.norm(dim=-1, keepdim=True)\n model.embedding_scale=1.0\n model.embedding.weight.mul_(math.sqrt(self.helper.args.state_size) / norm)\n\n return model\n\n def moe_recluster(self):\n for n, m in self.model.named_modules():\n if isinstance(m, MoE):\n perm = m.regroup_weights()\n m.patch_optimizer_state(self.optimizer, perm)\n\n def train_step(self) -> Tuple[Result, Dict[str, Any]]:\n if self.helper.args.kvmem.norm_values:\n with torch.no_grad():\n for m in self.model.modules():\n if isinstance(m, torch.nn.EmbeddingBag):\n m.weight.div_(m.weight.norm(dim=-1, keepdim=True))\n if self.helper.args.moe.recluster_steps:\n if self.helper.state.iter in self.helper.args.moe.recluster_steps:\n self.moe_recluster()\n\n return super().train_step()\n\n def get_optimizer_param_list(self):\n params = list(self.model.parameters())\n sel_params = []\n expert_params = []\n\n if self.helper.args.moe.sel_lr_multipler != 1.0:\n for m in self.model.modules():\n if isinstance(m, MoE):\n sel_params += list(m.sel.parameters()) if m.mlp_selection else [m.expert_sel]\n\n if self.helper.args.moe.expert_lr_multipler != 1.0:\n for m in self.model.modules():\n if isinstance(m, MoE):\n expert_params += [m.keys, m.values]\n\n excluded_params = [id(p) for p in sel_params + expert_params]\n params = [p for p in params if id(p) not in excluded_params]\n\n if not excluded_params:\n return params\n\n return [\n {\"params\": params},\n {\"params\": sel_params, \"lr\": self.helper.args.lr * self.helper.args.moe.sel_lr_multipler},\n {\"params\": expert_params, \"lr\": self.helper.args.lr * self.helper.args.moe.expert_lr_multipler},\n ]"
},
{
"identifier": "SimpleTask",
"path": "tasks/simple/simple_task.py",
"snippet": "class SimpleTask(Task):\n MAX_LENGHT_PER_BATCH = None\n train_set: torch.utils.data.Dataset\n train_loader: torch.utils.data.DataLoader\n model: torch.nn.Module\n\n def create_datasets(self):\n raise NotImplementedError()\n\n def create_model_interface(self):\n raise NotImplementedError()\n\n def create_model(self) -> torch.nn.Module:\n raise NotImplementedError()\n\n def create_state(self):\n pass\n\n @property\n def amp_enabled(self):\n return torch.cuda.is_available() and self.helper.args.amp\n\n @property\n def time_dim(self) -> int:\n return 1 - self.batch_dim\n\n def __init__(self, helper: framework.helpers.TrainingHelper):\n super().__init__(helper)\n\n self.avg_num_chunks = framework.utils.Average()\n self.reg_loss_average = framework.utils.DictAverage()\n self.max_grad = 0\n self.time_sum = 0\n\n self.create_datasets()\n self.create_loaders()\n self.model = self.create_model()\n self.model = self.model.to(self.helper.device)\n\n self.create_model_interface()\n self.create_optimizer()\n self.create_lr_scheduler()\n\n self.regularizer = LayerRegularizer(\n self.model, self.helper.args.stop_after, self.helper.args.reg_scales, self.helper.args.reg_lin_decay)\n\n self.scaler = torch.cuda.amp.GradScaler(enabled=self.amp_enabled)\n self.helper.saver[\"scaler\"] = self.scaler\n\n print(f\"Total number of model parameters: {sum(p.numel() for p in self.model.parameters())}\")\n\n self.helper.saver[\"model\"] = self.model\n self.create_state()\n self.helper.restore()\n\n self.fetcher = None\n\n def fetch_thread(self):\n data = self.prepare_data(self.get_train_batch())\n n_chunks = self.get_n_chunks(data)\n d_chunks = self.chunk_batch_dim(data, n_chunks)\n\n return data, d_chunks\n\n def create_train_loader(self, loader: torch.utils.data.Dataset, seed: Optional[int] = None,\n batch_size: Optional[int] = None) -> torch.utils.data.DataLoader:\n\n return super().create_train_loader_bs(loader, batch_size or self.helper.args.batch_size, seed)\n\n def set_train_set(self, ds: torch.utils.data.Dataset, seed: Optional[int] = None):\n self.train_set = ds\n self.train_loader = self.create_train_loader(self.train_set, seed)\n self.data_iter = iter(self.train_loader)\n\n def create_loaders(self):\n self.train_loader = self.create_train_loader(self.train_set)\n self.valid_loaders = framework.data_structures.DotDict()\n self.valid_loaders.update({k: self.create_valid_loader(v) for k, v in self.valid_sets.items()})\n\n def get_optimizer_param_list(self):\n return self.model.parameters()\n\n def create_optimizer(self):\n if self.helper.args.optimizer in [\"adam\", \"adamw\"]:\n opt = torch.optim.Adam if self.helper.args.optimizer == \"adam\" else torch.optim.AdamW\n self.set_optimizer(opt(self.get_optimizer_param_list(), self.helper.args.lr,\n weight_decay=self.helper.args.wd, betas=self.helper.args.adam.betas,\n eps=self.helper.args.adam.eps))\n elif self.helper.args.optimizer == \"adagrad\":\n self.set_optimizer(torch.optim.Adagrad(self.get_optimizer_param_list(), self.helper.args.lr,\n weight_decay=self.helper.args.wd))\n elif self.helper.args.optimizer == \"sgd\":\n self.set_optimizer(torch.optim.SGD(self.get_optimizer_param_list(), self.helper.args.lr,\n weight_decay=self.helper.args.wd, momentum=0.9))\n else:\n assert False, f\"Unsupported optimizer: {self.helper.args.optimizer}\"\n\n def set_optimizer(self, optimizer: torch.optim.Optimizer):\n self.optimizer = optimizer\n self.helper.saver.register(\"optimizer\", self.optimizer, replace=True)\n\n def get_train_batch(self) -> Dict[str, Any]:\n return next(self.data_iter)\n\n def chunk_batch_dim(self, data: Dict[str, Any], n: int) -> List[Dict[str, Any]]:\n if n == 1:\n return [data]\n\n res = [{} for _ in range(n)]\n for k, v in data.items():\n assert torch.is_tensor(v), \"Only tensors are supported by autosplitting\"\n\n bd = self.batch_dim if self.batch_dim < v.ndimension() else 0\n assert v.shape[bd] % n == 0, f\"Batch (dim {bd} of input {k} of shape {v.shape} is not divisible by {n})\"\n\n for i, c in enumerate(v.chunk(n, dim=bd)):\n res[i][k] = c\n\n # Avoid unnecessary computation.\n if \"in\" in data and \"in_len\" in data:\n for r in res:\n r[\"in\"] = r[\"in\"].narrow(1 - self.batch_dim, 0, int(r[\"in_len\"].max().item()))\n\n if \"out\" in data and \"out_len\" in data and data[\"out\"].ndim > 1:\n for r in res:\n r[\"out\"] = r[\"out\"].narrow(1 - self.batch_dim, 0, int(r[\"out_len\"].max().item()))\n\n return res\n\n def is_seq2seq_task(self, data: Dict[str, Any]) -> bool:\n return \"in_len\" in data and \"out_len\" in data\n\n def get_seq_length(self, data: Dict[str, Any]) -> int:\n # This assumes separate encoder and decoder\n return max(data[\"in\"].shape[self.time_dim], data[\"out\"].shape[self.time_dim] if data[\"out\"].ndim > 1 else 0)\n\n def get_n_chunks(self, data: Dict[str, Any]) -> int:\n if self.helper.args.n_microbatch:\n return self.helper.args.n_microbatch\n\n max_length_per_batch = self.helper.args.max_length_per_batch or self.MAX_LENGHT_PER_BATCH\n if self.is_seq2seq_task(data) and max_length_per_batch:\n # The formula below assumes quadratic memory consumption\n return int(2**int(self.get_seq_length(data) / max_length_per_batch))\n return 1\n\n def post_backward(self) -> Dict[str, Any]:\n return {}\n\n def train_step(self) -> Tuple[Result, Dict[str, Any]]:\n plots = {}\n\n if self.helper.args.speedtest==\"iter\":\n torch.cuda.synchronize()\n\n with self.forward_time_meter:\n self.set_lr()\n self.optimizer.zero_grad(set_to_none=True)\n\n data, d_chunks = self.fetcher.get()\n\n res_list = []\n weights = []\n\n self.avg_num_chunks.add(len(d_chunks))\n\n total_out_len = data[\"out_len\"].sum() if \"out_len\" in data else 1\n\n profiler = None\n # if self.helper.state.iter == 3:\n # profiler = torch.profiler.profile(activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], record_shapes=True)\n # profiler.__enter__()\n\n\n call_pre_iter(self.model)\n for d in d_chunks:\n with torch.cuda.amp.autocast(enabled=self.amp_enabled):\n res, custom_plots = self.run_model(d)\n call_before_loss(self.model)\n res_list.append(res)\n plots.update(custom_plots)\n weights.append((d[\"out_len\"].sum()/total_out_len) if \"out_len\" in d else 1)\n reg_loss, reg_log = self.regularizer.get(self.helper.state.iter)\n self.reg_loss_average.add(reg_log)\n total_loss = (res_list[-1].loss + reg_loss * self.helper.args.reg) * self.helper.get_loss_scaling()\n\n if not torch.isfinite(total_loss):\n for n, p in self.model.named_parameters():\n if not torch.isfinite(p).all():\n print(f\"Found non-finite weight {n}\")\n\n for n, p in self.model.named_buffers():\n if not torch.isfinite(p).all():\n print(f\"Found non-finite buffer {n}\")\n assert False, \"Loss not finite\"\n\n self.scaler.scale(total_loss * weights[-1]).backward()\n plots.update(self.post_backward())\n\n if self.helper.dist_env.is_distributed:\n aops = []\n for p in self.model.parameters():\n if p.grad is None:\n continue\n aops.append(torch.distributed.all_reduce(p.grad.contiguous(), async_op=True))\n\n for a in aops:\n a.wait()\n\n\n call_post_iter(self.model)\n\n self.scaler.unscale_(self.optimizer)\n\n if self.helper.args.grad_clip:\n gn = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.helper.args.grad_clip)\n self.max_grad = max(self.max_grad, gn)\n\n\n if self.helper.args.log_grad_norms:\n for n, p in self.model.named_parameters():\n plots[f\"grad_norms/{n}\"] = p.detach().norm().item()\n\n\n self.scaler.step(self.optimizer)\n self.scaler.update()\n\n self.helper.state.iter += 1\n res = res_list[0].__class__.merge(res_list, weights)\n\n if self.helper.args.speedtest in {\"iter\"}:\n torch.cuda.synchronize()\n\n if profiler is not None:\n profiler.__exit__(None, None, None)\n profiler.export_chrome_trace(\"trace_all.json\")\n assert False\n\n\n # if self.helper.state.iter % 20 == 0:\n\n if \"in_len\" in data:\n n_total_tokens = (data[\"in_len\"] + data[\"out_len\"]).sum()\n if self.helper.dist_env.is_distributed:\n torch.distributed.all_reduce(n_total_tokens)\n\n self.total_n_token_in_period += n_total_tokens\n\n return res, plots\n\n def plot(self, res: Result) -> Dict[str, Any]:\n res = super().plot(res)\n\n if self.helper.args.dump_logs and self.helper.dist_env.is_master():\n dump_logs(self.model, self.helper.get_storage_path(\"log_dumps\") + f\"/{self.helper.state.iter}\")\n\n if self.helper.state.iter % 20 == 1:\n res.update(get_logs(self.model))\n\n res[\"average_num_chunks\"] = self.avg_num_chunks.get()\n for k, v in self.reg_loss_average.get().items():\n res[f\"train/reg_loss/{k}\"] = v\n\n if self.helper.args.grad_clip:\n res[\"max_grad\"] = self.max_grad\n self.max_grad = 0\n\n\n return res\n\n def train(self):\n self.loss_average.reset()\n\n self.data_iter = iter(self.train_loader)\n self.fetcher = framework.helpers.StoppingParallelProducer(self.fetch_thread)\n\n try:\n while (self.helper.args.stop_after or 10e10) > self.helper.state.iter:\n self.load_time_meter.stop()\n\n res, plots = self.train_step()\n plots.update(self.plot(res))\n\n with self.plot_time_meter:\n self.helper.log(plots)\n\n self.load_time_meter.start()\n\n self.helper.tick()\n except self.fetcher.Stopped:\n pass"
},
{
"identifier": "LanguageModelInterface",
"path": "interfaces/language_model_interface.py",
"snippet": "class LanguageModelInterface(ModelInterface):\n def __init__(self, model: torch.nn.Module, batch_dim: int = 1, drop_state_prob: float = 0,\n dist_env: Optional[DistributedEnv] = None, save_state: bool = False):\n super().__init__()\n self.model = model\n self.state = None\n self.batch_dim = batch_dim\n self.drop_state_prob = drop_state_prob\n self.time_dim = 1 - self.batch_dim\n self.dist_env = dist_env\n self.save_state = save_state\n\n def create_input(self, data: Dict[str, torch.Tensor]) -> torch.Tensor:\n return data[\"data\"].narrow(self.time_dim, 0, data[\"data\"].shape[self.time_dim] - 1)\n\n def decode_outputs(self, outputs: RecurrentResult) -> Any:\n return outputs.outputs\n\n def reset_state(self):\n self.state = None\n\n def loss(self, net_out: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n assert net_out.shape[:-1] == target.shape\n return F.cross_entropy(net_out.flatten(0, -2), target.flatten().long())\n\n def create_target(self, data: Dict[str, torch.Tensor]) -> torch.Tensor:\n return data[\"data\"].narrow(self.time_dim, 1, data[\"data\"].shape[self.time_dim] - 1).contiguous()\n\n def __call__(self, data: Dict[str, torch.Tensor]) -> LanguageModelResult:\n if self.model.training and self.drop_state_prob > 0 and random.random() < self.drop_state_prob:\n self.state = None\n\n input = self.create_input(data)\n target = self.create_target(data)\n\n res, state = self.model(input, target, self.state)\n if isinstance(res, torch.nn.modules.adaptive._ASMoutput):\n loss = res.loss\n # res = res.outputs\n else:\n loss = self.loss(res, target)\n\n self.state = U.apply_to_tensors(state, lambda x: x.detach())\n return LanguageModelResult(res, loss)\n\n def state_dict(self) -> Dict[str, Any]:\n if not self.save_state:\n return {}\n\n if self.dist_env is not None and self.dist_env.is_distributed:\n # Collect the state from all workers\n alist = [None] * self.dist_env.world_size\n state = torch.distributed.all_gather(alist, self.state)\n state = torch.cat(state, self.batch_dim)\n return {\"state\": state}\n else:\n return {\"state\": self.state}\n\n def load_state_dict(self, state: Dict[str, Any]):\n if not self.save_state:\n self.state = None\n return\n\n if self.dist_env is not None and self.dist_env.is_distributed:\n state_bs = state[\"state\"].shape[self.batch_dim]\n if state_bs % self.dist_env.world_size != 0:\n print(f\"WARNING: State batch size ({state_bs}) is not divisible by the number of workers ({self.dist_env.world_size}). Resetting state.\")\n self.state = None\n else:\n bs_per_worker = state_bs // self.dist_env.world_size\n self.state = state[\"state\"].narrow(self.batch_dim, self.dist_env.local_rank * bs_per_worker, bs_per_worker)\n else:\n self.state = state[\"state\"]"
}
] | import framework
import torch
import torch.nn
import torch.utils.data
import dataset
import random
from models import TransformerLanguageModel
from ... import task, args
from .transformer_lm_mixin import TransformerLMMixin
from ..simple_task import SimpleTask
from typing import Tuple, Any, Dict, List, Union
from interfaces import LanguageModelInterface | 9,241 |
@args
def a(parser: framework.helpers.ArgumentParser):
parser.add_argument("-lm.state_drop_probability", default=0.0)
parser.add_argument("-lm.lstm_weight_drop", default=0.0)
parser.add_argument("-lm.unroll", default=100)
parser.add_argument("-lm.unroll_eval", default="none", parser=parser.int_or_none_parser)
parser.add_argument("-lm.example_context", default=100)
parser.add_argument("-lm.example_window", default=40)
@task()
|
@args
def a(parser: framework.helpers.ArgumentParser):
parser.add_argument("-lm.state_drop_probability", default=0.0)
parser.add_argument("-lm.lstm_weight_drop", default=0.0)
parser.add_argument("-lm.unroll", default=100)
parser.add_argument("-lm.unroll_eval", default="none", parser=parser.int_or_none_parser)
parser.add_argument("-lm.example_context", default=100)
parser.add_argument("-lm.example_window", default=40)
@task() | class Enwik8Transformer(TransformerLMMixin, SimpleTask): | 3 | 2023-10-16 11:26:45+00:00 | 12k |
blackgold3/SemanticBoost | mdm/model_util.py | [
{
"identifier": "MDM",
"path": "mdm/model/mdm.py",
"snippet": "class MDM(nn.Module):\n def __init__(self, njoints, nfeats, latent_dim=256, ff_size=1024, num_layers=8, num_heads=4, dropout=0.1,\n activation=\"gelu\", dataset='amass', clip_dim=512,\n arch='trans_enc', clip_version=None, **kargs):\n super().__init__()\n\n self.local = kargs[\"local\"]\n self.encode_full = kargs.get(\"encode_full\", 0) #### encode_full = 1 add tokens & encode_full = 2 model compress tokens\n self.txt_tokens = kargs.get(\"txt_tokens\", 0) #### txt_tokens = 1 add tokens & txt_tokens = 2 model compress tokens\n self.dataset = dataset\n self.condition_length = 77\n self.num_frames = kargs.get(\"num_frames\", 196)\n self.json_dict = kargs.get(\"json_dict\")\n\n if arch.endswith(\"static\"):\n self.position_type = \"static\" #### [static or rope] only for llama arch\n self.arch = arch.replace(\"_static\", \"\")\n elif arch.endswith(\"rope\"):\n self.position_type = \"rope\"\n self.arch = arch.replace(\"_rope\", \"\")\n else:\n self.position_type = \"static\"\n self.arch = arch\n\n if isinstance(self.num_frames, list) or isinstance(self.num_frames, tuple):\n self.num_frames = self.num_frames[0]\n\n self.njoints = njoints\n self.nfeats = nfeats\n\n self.latent_dim = latent_dim\n\n self.ff_size = ff_size\n self.num_layers = num_layers\n self.num_heads = num_heads\n self.dropout = dropout\n\n self.activation = activation\n self.clip_dim = clip_dim\n self.action_emb = kargs.get('action_emb', None)\n\n self.input_feats = self.njoints * self.nfeats\n\n self.cond_mode = kargs.get('cond_mode', 'no_cond')\n self.cond_mask_prob = kargs.get('cond_mask_prob', 0.)\n\n\n self.input_process = InputProcess(self.input_feats, self.latent_dim) #### 输入 x 的 linear\n self.output_process = OutputProcess(self.input_feats, self.latent_dim, self.njoints,\n self.nfeats)\n\n self.sequence_pos_encoder = PositionalEncoding(self.latent_dim, self.dropout)\n\n if self.arch == 'trans_enc':\n print(\"TRANS_ENC init\")\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=self.num_heads,\n dim_feedforward=self.ff_size,\n dropout=self.dropout,\n activation=self.activation)\n self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer, num_layers=self.num_layers)\n\n elif self.arch == \"llama_encoder\":\n TransLayer = RefinedLayer(self.latent_dim, self.num_heads, self.ff_size, self.dropout, self.activation, max_seq_len=self.num_frames, position_type=self.position_type, norm_type=\"rmsnorm\")\n self.seqTransEncoder = Refined_Transformer(TransLayer, self.num_layers)\n\n elif self.arch == \"llama_decoder\":\n TransLayer = RefinedLayer(self.latent_dim, self.num_heads, self.ff_size, self.dropout, self.activation, max_seq_len=self.num_frames, position_type=self.position_type, word_tokens=True, norm_type=\"rmsnorm\")\n self.seqTransEncoder = Refined_Transformer(TransLayer, self.num_layers)\n\n else:\n raise ValueError('Please choose correct architecture')\n\n self.embed_timestep = TimestepEmbedder(self.latent_dim, self.sequence_pos_encoder)\n\n if self.cond_mode != 'no_cond':\n if 'text' in self.cond_mode:\n self.embed_text = nn.Linear(self.clip_dim, self.latent_dim)\n print('EMBED TEXT')\n print('Loading CLIP...')\n self.clip_version = clip_version\n self.clip_model = self.load_and_freeze_clip(clip_version)\n\n if self.txt_tokens == 2:\n if self.arch in [\"trans_enc\", \"llama_encoder\"]:\n scale = 3\n elif self.arch in [\"llama_decoder\"]:\n scale = 2\n\n encode_compress_layer = RefinedLayer(d_model=self.latent_dim * scale,\n nhead=self.num_heads,\n dim_feedforward=self.ff_size,\n dropout=self.dropout,\n activation=self.activation, norm_type=\"rmsnorm\")\n self.condition_compress = nn.Sequential(\n Refined_Transformer(encode_compress_layer, num_layers=1),\n nn.Linear(self.latent_dim * scale, self.latent_dim, )\n ) \n\n if self.encode_full != 0: #### [1, bs, 512] -> [seq, bs, 1024] -> [seq, bs, 512]\n self.code_full = Encoder_Block(begin_channel=self.input_feats, latent_dim=self.latent_dim, num_layers=6, TN=1, bias=kargs[\"conv_bias\"], norm_type=kargs[\"conv_norm\"], activate_type=kargs[\"conv_activate\"]) \n\n if self.encode_full == 2:\n encode_compress_layer = RefinedLayer(d_model=self.latent_dim * 2,\n nhead=self.num_heads,\n dim_feedforward=self.ff_size,\n dropout=self.dropout,\n activation=self.activation, norm_type=\"rmsnorm\")\n\n self.encode_compress = nn.Sequential(\n Refined_Transformer(encode_compress_layer, num_layers=1),\n nn.Linear(self.latent_dim * 2, self.latent_dim, )\n )\n\n print(\" =========================\", self.cond_mode, \"===================================\")\n\n def parameters_wo_clip(self):\n return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n\n def load_and_freeze_clip(self, clip_version):\n clip_model, clip_preprocess = clip.load(clip_version, device='cpu', jit=False, download_root=self.json_dict[\"clip\"]) # Must set jit=False for training\n clip_model.float()\n \n # Freeze CLIP weights\n clip_model.eval()\n for p in clip_model.parameters():\n p.requires_grad = False\n\n return clip_model\n\n def mask_cond(self, cond, force_mask=False):\n bs = cond.shape[0]\n if force_mask:\n return torch.zeros_like(cond)\n elif self.training and self.cond_mask_prob > 0.:\n mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.cond_mask_prob) # 1-> use null_cond, 0-> use real cond\n if len(cond.shape) == 3:\n mask = mask.view(bs, 1, 1)\n else:\n mask = mask.view(bs, 1)\n return cond * (1. - mask)\n else:\n return cond\n\n def clip_text_embedding(self, raw_text):\n device = self.clip_model.ln_final.weight.device\n default_context_length = self.condition_length\n texts = clip.tokenize(raw_text, context_length=default_context_length, truncate=True).to(device) # [bs, context_length] # if n_tokens > context_length -> will truncate\n if self.txt_tokens == 0: \n clip_feature = self.clip_model.encode_text(texts)\n else:\n with torch.no_grad():\n x = self.clip_model.token_embedding(texts) # [batch_size, n_ctx, d_model]\n x = x + self.clip_model.positional_embedding\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.clip_model.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n x = self.clip_model.ln_final(x)\n clip_feature = x[torch.arange(x.shape[0]), texts.argmax(dim=-1)] @ self.clip_model.text_projection\n clip_feature = clip_feature.unsqueeze(1)\n clip_feature = torch.cat([clip_feature, x], dim=1) #### [bs, T, 512]\n return clip_feature\n \n def get_mask(self, sz1, sz2):\n mask = (torch.triu(torch.ones(sz1, sz2)) == 1).transpose(0, 1)\n mask = mask.float()\n mask = mask.masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n mask.requires_grad = False\n return mask\n\n def forward(self, x, timesteps, y=None):\n \"\"\"\n x: [batch_size, njoints, nfeats, max_frames], denoted x_t in the paper\n timesteps: [batch_size] (int)\n \"\"\"\n \n results = {}\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n x = x.to(emb.dtype)\n\n real_length = x.shape[-1]\n\n if self.encode_full != 0 and x.shape[-1] < self.num_frames:\n extension = torch.zeros([x.shape[0], x.shape[1], x.shape[2], self.num_frames - x.shape[-1]], device=x.device, dtype=x.dtype)\n x = torch.cat([x, extension], dim=-1)\n\n if self.encode_full == 1:\n latent = self.code_full(x) ### [seq, bs, 512]\n current = self.input_process(x) \n latent = latent.repeat(current.shape[0], 1, 1)\n current = current + latent\n elif self.encode_full == 2:\n latent = self.code_full(x) ### [seq, bs, 512]\n current = self.input_process(x) #### [seq, bs, 512]\n latent = latent.repeat(current.shape[0], 1, 1)\n current = torch.cat([current, latent], dim=2)\n current = self.encode_compress(current)\n else:\n current = self.input_process(x) #### [seq, bs, 512]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.clip_text_embedding(y['text']).to(emb.dtype) ### MASK_COND 会按照一定的比例把 batch_size 中的一部分文本句整句换成 [0, 0, ... 0]\n txt_emb = self.embed_text(enc_text)\n txt_emb = self.mask_cond(txt_emb, force_mask=force_mask)\n \n if len(txt_emb.shape) == 3:\n txt_emb = txt_emb.permute(1, 0, 2)\n else:\n txt_emb = txt_emb.unsqueeze(0)\n else:\n txt_emb = None\n\n if txt_emb is not None:\n all_emb = txt_emb\n else:\n all_emb = torch.zeros_like(emb)\n\n if self.arch in [\"trans_enc\", \"llama_encoder\"] and txt_emb is not None:\n if self.txt_tokens == 1:\n word_embedding = all_emb[1::, :, :]\n global_embedding = all_emb[0:1, :, :].repeat(word_embedding.shape[0], 1, 1)\n all_emb = word_embedding + global_embedding\n emb = emb.repeat(all_emb.shape[0], 1, 1)\n emb += all_emb\n elif self.txt_tokens == 2:\n word_embedding = all_emb[1::, :, :]\n global_embedding = all_emb[0:1, :, :].repeat(word_embedding.shape[0], 1, 1)\n emb = emb.repeat(word_embedding.shape[0], 1, 1)\n concat_embedding = torch.cat([emb, global_embedding, word_embedding], dim=2)\n emb = self.condition_compress(concat_embedding)\n else:\n emb += all_emb\n elif txt_emb is not None:\n if self.txt_tokens == 1:\n emb = emb.repeat(all_emb.shape[0], 1, 1)\n emb += all_emb\n elif self.txt_tokens == 2:\n emb = emb.repeat(all_emb.shape[0], 1, 1)\n concat_embedding = torch.cat([emb, all_emb], dim=2)\n emb = self.condition_compress(concat_embedding) \n else:\n emb += all_emb \n else:\n emb = emb.repeat(all_emb.shape[0], 1, 1)\n emb += all_emb\n\n if self.arch in [\"trans_enc\", \"llama_encoder\"]:\n real_token_length = emb.shape[0] ######### 用来截断输出,只保留真正的output\n elif self.arch in [\"llama_decoder\"]:\n real_token_length = 1\n\n if self.arch in [\"trans_enc\", \"llama_encoder\"]:\n xseq = torch.cat([emb, current], dim=0)\n\n if self.arch in [\"trans_enc\"] or self.position_type == \"static\":\n xseq = self.sequence_pos_encoder(xseq)\n\n output = self.seqTransEncoder(xseq)\n\n elif self.arch in [\"llama_decoder\"]:\n if emb.shape[0] == 1:\n emb = emb.repeat(1+self.condition_length, 1, 1)\n\n xseq = torch.cat([emb[0:1], current], dim=0)\n word_tokens = emb[1::]\n\n if self.position_type == \"static\":\n xseq = self.sequence_pos_encoder(xseq)\n \n output = self.seqTransEncoder(xseq, word_tokens=word_tokens)\n\n output = output[real_token_length:]\n output = self.output_process(output) # [bs, njoints, nfeats, nframes]\n output = output[:, :, :, :real_length]\n results[\"output\"] = output\n return results\n \n def _apply(self, fn):\n super()._apply(fn)\n\n def train(self, *args, **kwargs):\n super().train(*args, **kwargs)"
},
{
"identifier": "gaussian_diffusion",
"path": "mdm/diffusion/gaussian_diffusion.py",
"snippet": "def get_named_beta_schedule(schedule_name, num_diffusion_timesteps, scale_betas=1.):\ndef betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):\n def is_vb(self):\n def __init__(\n self,\n *,\n betas,\n model_mean_type,\n model_var_type,\n loss_type,\n rescale_timesteps=False,\n rep=\"t2m\"\n ):\n def masked_l2(self, a, b, mask, addition_rotate_mask):\n def q_mean_variance(self, x_start, t):\n def q_sample(self, x_start, t, noise=None, model_kwargs=None):\n def q_posterior_mean_variance(self, x_start, x_t, t):\n def p_mean_variance(\n self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None\n ):\n def process_xstart(x):\n def _predict_xstart_from_eps(self, x_t, t, eps):\n def _predict_xstart_from_xprev(self, x_t, t, xprev):\n def _scale_timesteps(self, t):\n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n const_noise=False,\n ):\n def p_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n dump_steps=None,\n const_noise=False,\n unfolding_handshake=0, # 0 means no unfolding\n eval_mask=None\n\n ):\n def p_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n const_noise=False,\n eval_mask=None\n ):\n def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):\ndef _extract_into_tensor(arr, timesteps, broadcast_shape):\nclass ModelMeanType(enum.Enum):\nclass ModelVarType(enum.Enum):\nclass LossType(enum.Enum):\nclass GaussianDiffusion:\n PREVIOUS_X = enum.auto() # the model predicts x_{t-1}\n START_X = enum.auto() # the model predicts x_0\n EPSILON = enum.auto() # the model predicts epsilon\n LEARNED = enum.auto()\n FIXED_SMALL = enum.auto()\n FIXED_LARGE = enum.auto()\n LEARNED_RANGE = enum.auto()\n MSE = enum.auto() # use raw MSE loss (and KL when learning variances)\n RESCALED_MSE = (\n enum.auto()\n ) # use raw MSE loss (with RESCALED_KL when learning variances)\n KL = enum.auto() # use the variational lower-bound\n RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB\n B, C = x.shape[:2]"
},
{
"identifier": "SpacedDiffusion",
"path": "mdm/diffusion/respace.py",
"snippet": "class SpacedDiffusion(GaussianDiffusion):\n \"\"\"\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n \"\"\"\n\n def __init__(self, use_timesteps, **kwargs):\n self.use_timesteps = set(use_timesteps)\n self.timestep_map = []\n self.original_num_steps = len(kwargs[\"betas\"])\n\n base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa\n last_alpha_cumprod = 1.0\n new_betas = []\n for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):\n if i in self.use_timesteps:\n new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)\n last_alpha_cumprod = alpha_cumprod\n self.timestep_map.append(i)\n kwargs[\"betas\"] = np.array(new_betas)\n super().__init__(**kwargs)\n\n def p_mean_variance(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)\n\n def training_losses(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().training_losses(self._wrap_model(model), *args, **kwargs)\n\n def _wrap_model(self, model):\n if isinstance(model, _WrappedModel):\n return model\n return _WrappedModel(\n model, self.timestep_map, self.rescale_timesteps, self.original_num_steps\n )\n\n def _scale_timesteps(self, t):\n # Scaling is done by the wrapped model.\n return t"
},
{
"identifier": "space_timesteps",
"path": "mdm/diffusion/respace.py",
"snippet": "def space_timesteps(num_timesteps, section_counts):\n \"\"\"\n Create a list of timesteps to use from an original diffusion process,\n given the number of timesteps we want to take from equally-sized portions\n of the original process.\n\n For example, if there's 300 timesteps and the section counts are [10,15,20]\n then the first 100 timesteps are strided to be 10 timesteps, the second 100\n are strided to be 15 timesteps, and the final 100 are strided to be 20.\n\n If the stride is a string starting with \"ddim\", then the fixed striding\n from the DDIM paper is used, and only one section is allowed.\n\n :param num_timesteps: the number of diffusion steps in the original\n process to divide up.\n :param section_counts: either a list of numbers, or a string containing\n comma-separated numbers, indicating the step count\n per section. As a special case, use \"ddimN\" where N\n is a number of steps to use the striding from the\n DDIM paper.\n :return: a set of diffusion steps from the original process to use.\n \"\"\"\n if isinstance(section_counts, str):\n if section_counts.startswith(\"ddim\"):\n desired_count = int(section_counts[len(\"ddim\") :])\n for i in range(1, num_timesteps):\n if len(range(0, num_timesteps, i)) == desired_count:\n return set(range(0, num_timesteps, i))\n raise ValueError(\n f\"cannot create exactly {num_timesteps} steps with an integer stride\"\n )\n section_counts = [int(x) for x in section_counts.split(\",\")]\n size_per = num_timesteps // len(section_counts)\n extra = num_timesteps % len(section_counts)\n start_idx = 0\n all_steps = []\n for i, section_count in enumerate(section_counts):\n size = size_per + (1 if i < extra else 0)\n if size < section_count:\n raise ValueError(\n f\"cannot divide section of {size} steps into {section_count}\"\n )\n if section_count <= 1:\n frac_stride = 1\n else:\n frac_stride = (size - 1) / (section_count - 1)\n cur_idx = 0.0\n taken_steps = []\n for _ in range(section_count):\n taken_steps.append(start_idx + round(cur_idx))\n cur_idx += frac_stride\n all_steps += taken_steps\n start_idx += size\n return set(all_steps)"
},
{
"identifier": "InpaintingGaussianDiffusion",
"path": "mdm/diffusion/respace.py",
"snippet": "class InpaintingGaussianDiffusion(SpacedDiffusion):\n def q_sample(self, x_start, t, noise=None, model_kwargs=None):\n \"\"\"\n overrides q_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n if noise is None:\n noise = th.randn_like(x_start)\n assert noise.shape == x_start.shape\n\n bs, feat, _, frames = noise.shape\n noise *= 1. - model_kwargs['y']['inpainting_mask']\n\n return (\n _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)\n * noise\n )\n \n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n const_noise=False,\n ):\n \"\"\"\n overrides p_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n out = self.p_mean_variance(\n model,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n )\n noise = th.randn_like(x)\n if const_noise:\n noise = noise[[0]].repeat(x.shape[0], 1, 1, 1)\n noise *= 1. - model_kwargs['y']['inpainting_mask']\n\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n if cond_fn is not None:\n out[\"mean\"] = self.condition_mean(\n cond_fn, out, x, t, model_kwargs=model_kwargs\n )\n sample = out[\"mean\"] + nonzero_mask * th.exp(0.5 * out[\"log_variance\"]) * noise\n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}"
},
{
"identifier": "TRT_MDM",
"path": "mdm/model/trt_model.py",
"snippet": "class TRT_MDM(nn.Module):\n def __init__(self, mode, json_dict, device=\"cuda\"):\n super(TRT_MDM, self).__init__()\n self.device = device\n self.json_dict = json_dict\n self.clip_model = DynamicModel(self.json_dict[f\"{mode}2\"], self.device)\n self.decoder = DynamicModel(self.json_dict[f\"{mode}1\"], self.device)\n self.num_frames = 196\n self.njoints = 269\n self.nfeats = 1\n self.condition_length = 77\n\n def mask_cond(self, cond, force_mask=False):\n bs = cond.shape[0]\n if force_mask:\n return torch.zeros_like(cond)\n else:\n return cond\n\n def clip_text_embedding(self, raw_text):\n default_context_length = self.condition_length\n texts = clip.tokenize(raw_text, context_length=default_context_length, truncate=True) # [bs, context_length] # if n_tokens > context_length -> will truncate\n texts = texts.to(self.device)\n\n if len(self.clip_model.inshape) == 0 or self.clip_model.inshape[0] != texts.shape:\n self.clip_model.set_shape([[*texts.shape]], [[texts.shape[0], self.condition_length+1, 512]]) \n\n clip_feature = self.clip_model(texts)\n return clip_feature\n\n @torch.no_grad()\n def forward(self, x, timesteps, y=None): \n force_mask = y.get('uncond', False)\n txt_emb = self.clip_text_embedding(y['text']) ### MASK_COND 会按照一定的比例把 batch_size 中的一部分文本句整句换成 [0, 0, ... 0]\n txt_emb = self.mask_cond(txt_emb, force_mask=force_mask)\n \n if len(txt_emb.shape) == 3:\n txt_emb = txt_emb.permute(1, 0, 2)\n else:\n txt_emb = txt_emb.unsqueeze(0)\n\n real_frame = x.shape[-1]\n if real_frame < self.num_frames:\n extension = torch.zeros([x.shape[0], x.shape[1], x.shape[2], self.num_frames - x.shape[-1]], device=x.device, dtype=x.dtype)\n x = torch.cat([x, extension], dim=-1)\n\n if len(self.decoder.inshape) == 0 or self.decoder.inshape[0] != x.shape:\n self.decoder.set_shape([[*x.shape], [*timesteps.shape], [*txt_emb.shape]], [[*x.shape]])\n\n output = self.decoder([x, timesteps, txt_emb])\n output = output[:, :, :, :real_frame]\n\n return {\"output\":output}"
}
] | from mdm.model.mdm import MDM
from mdm.diffusion import gaussian_diffusion as gd
from mdm.diffusion.respace import SpacedDiffusion, space_timesteps, InpaintingGaussianDiffusion
from mdm.model.trt_model import TRT_MDM | 7,562 |
def load_model_wo_clip(model, state_dict):
print("load model checkpoints without clip")
try:
new_state_dict = {}
for key, value in state_dict.items():
if "in_proj" in key:
keyq = key.replace("in_proj_weight", "wq.weight")
keyk = key.replace("in_proj_weight", "wk.weight")
keyv = key.replace("in_proj_weight", "wv.weight")
inshape = value.shape[0] // 3
valueq = value[:inshape]
valuek = value[inshape:inshape * 2]
valuev = value[inshape * 2:]
new_state_dict[keyq] = valueq
new_state_dict[keyk] = valuek
new_state_dict[keyv] = valuev
elif "out_proj" in key:
newkey = key.replace("out_proj", "wo")
new_state_dict[newkey] = value
else:
new_state_dict[key] = value
missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
except:
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
print(unexpected_keys)
other_miss = []
for key in missing_keys:
if not key.startswith('clip_model.'):
other_miss.append(key)
print(other_miss)
assert all([k.startswith('clip_model.') for k in missing_keys])
def load_ft_model_wo_clip(model, state_dict):
print("load model checkpoints without clip")
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
print(unexpected_keys)
# for name, value in model.named_parameters():
# if "seqTransEncoder" in name and "self_attn" in name:
# value.requires_grad = False
# if name.startswith("code_full") or name.startswith("encode_compress") or name.startswith("input_process"):
# value.requires_grad = False
assert all([k.startswith('clip_pose_encoder.') for k in unexpected_keys])
# assert all([k.startswith('clip_model.') or k.startswith('clip_pose_encoder.') or k.startswith('embed_text.') for k in missing_keys])
def create_model_and_diffusion(args, mode="text", json_dict=None):
model = MDM(**get_model_args(args), json_dict=json_dict)
diffusion = create_gaussian_diffusion(args, mode)
return model, diffusion
def create_trt_model(args, model, mode="text", json_dict=None, device="cuda"):
model = TRT_MDM(model, json_dict, device=device)
diffusion = create_gaussian_diffusion(args, mode)
return model, diffusion
def get_model_args(args):
# default args
clip_version = 'ViT-B/32'
if args.unconstrained:
cond_mode = 'no_cond'
elif args.dataset in ['kit', 'humanml']:
cond_mode = "text"
activation = args.trans_activate if args.arch != "trans_enc" else "gelu"
if args.dataset == 'humanml':
njoints = 263
nfeats = 1
elif args.dataset == 'kit':
njoints = 251
nfeats = 1
if args.rep == "smr":
njoints += 6
nfeats = 1
return {'njoints': njoints, 'nfeats': nfeats, 'latent_dim': args.latent_dim, 'ff_size': args.ff_size, 'num_layers': args.layers, 'num_heads': args.heads,
'dropout': 0.1, 'activation': activation, 'cond_mode': cond_mode, 'cond_mask_prob': args.cond_mask_prob, 'arch': args.arch,
'clip_version': clip_version, 'dataset': args.dataset, "local":args.local, "encode_full":args.encode_full, "txt_tokens":args.txt_tokens,
"dataset_path":args.dataset_path, "num_frames":args.num_frames, "conv_bias":args.conv_bias, "conv_activate":args.conv_activate,
"conv_norm":args.conv_norm}
def create_gaussian_diffusion(args, mode="text"):
# default params
predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal!
steps = 1000
scale_beta = 1. # no scaling
timestep_respacing = '' # can be used for ddim sampling, we don't use it.
learn_sigma = False
rescale_timesteps = False
betas = gd.get_named_beta_schedule(args.noise_schedule, steps, scale_beta)
loss_type = gd.LossType.MSE
if not timestep_respacing:
timestep_respacing = [steps]
if mode is not None and (mode.startswith("finetune_control") or mode == "control_length"):
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> inpainting diffusion model")
|
def load_model_wo_clip(model, state_dict):
print("load model checkpoints without clip")
try:
new_state_dict = {}
for key, value in state_dict.items():
if "in_proj" in key:
keyq = key.replace("in_proj_weight", "wq.weight")
keyk = key.replace("in_proj_weight", "wk.weight")
keyv = key.replace("in_proj_weight", "wv.weight")
inshape = value.shape[0] // 3
valueq = value[:inshape]
valuek = value[inshape:inshape * 2]
valuev = value[inshape * 2:]
new_state_dict[keyq] = valueq
new_state_dict[keyk] = valuek
new_state_dict[keyv] = valuev
elif "out_proj" in key:
newkey = key.replace("out_proj", "wo")
new_state_dict[newkey] = value
else:
new_state_dict[key] = value
missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
except:
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
print(unexpected_keys)
other_miss = []
for key in missing_keys:
if not key.startswith('clip_model.'):
other_miss.append(key)
print(other_miss)
assert all([k.startswith('clip_model.') for k in missing_keys])
def load_ft_model_wo_clip(model, state_dict):
print("load model checkpoints without clip")
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
print(unexpected_keys)
# for name, value in model.named_parameters():
# if "seqTransEncoder" in name and "self_attn" in name:
# value.requires_grad = False
# if name.startswith("code_full") or name.startswith("encode_compress") or name.startswith("input_process"):
# value.requires_grad = False
assert all([k.startswith('clip_pose_encoder.') for k in unexpected_keys])
# assert all([k.startswith('clip_model.') or k.startswith('clip_pose_encoder.') or k.startswith('embed_text.') for k in missing_keys])
def create_model_and_diffusion(args, mode="text", json_dict=None):
model = MDM(**get_model_args(args), json_dict=json_dict)
diffusion = create_gaussian_diffusion(args, mode)
return model, diffusion
def create_trt_model(args, model, mode="text", json_dict=None, device="cuda"):
model = TRT_MDM(model, json_dict, device=device)
diffusion = create_gaussian_diffusion(args, mode)
return model, diffusion
def get_model_args(args):
# default args
clip_version = 'ViT-B/32'
if args.unconstrained:
cond_mode = 'no_cond'
elif args.dataset in ['kit', 'humanml']:
cond_mode = "text"
activation = args.trans_activate if args.arch != "trans_enc" else "gelu"
if args.dataset == 'humanml':
njoints = 263
nfeats = 1
elif args.dataset == 'kit':
njoints = 251
nfeats = 1
if args.rep == "smr":
njoints += 6
nfeats = 1
return {'njoints': njoints, 'nfeats': nfeats, 'latent_dim': args.latent_dim, 'ff_size': args.ff_size, 'num_layers': args.layers, 'num_heads': args.heads,
'dropout': 0.1, 'activation': activation, 'cond_mode': cond_mode, 'cond_mask_prob': args.cond_mask_prob, 'arch': args.arch,
'clip_version': clip_version, 'dataset': args.dataset, "local":args.local, "encode_full":args.encode_full, "txt_tokens":args.txt_tokens,
"dataset_path":args.dataset_path, "num_frames":args.num_frames, "conv_bias":args.conv_bias, "conv_activate":args.conv_activate,
"conv_norm":args.conv_norm}
def create_gaussian_diffusion(args, mode="text"):
# default params
predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal!
steps = 1000
scale_beta = 1. # no scaling
timestep_respacing = '' # can be used for ddim sampling, we don't use it.
learn_sigma = False
rescale_timesteps = False
betas = gd.get_named_beta_schedule(args.noise_schedule, steps, scale_beta)
loss_type = gd.LossType.MSE
if not timestep_respacing:
timestep_respacing = [steps]
if mode is not None and (mode.startswith("finetune_control") or mode == "control_length"):
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> inpainting diffusion model") | diffusion = InpaintingGaussianDiffusion | 4 | 2023-10-20 14:53:26+00:00 | 12k |
pythonlessons/FinRock | experiments/testing_ppo_sinusoid.py | [
{
"identifier": "PdDataFeeder",
"path": "finrock/data_feeder.py",
"snippet": "class PdDataFeeder:\n def __init__(\n self, \n df: pd.DataFrame,\n indicators: list = [],\n min: float = None,\n max: float = None\n ) -> None:\n self._df = df\n self._min = min\n self._max = max\n self._indicators = indicators\n self._cache = {}\n\n assert isinstance(self._df, pd.DataFrame) == True, \"df must be a pandas.DataFrame\"\n assert 'timestamp' in self._df.columns, \"df must have 'timestamp' column\"\n assert 'open' in self._df.columns, \"df must have 'open' column\"\n assert 'high' in self._df.columns, \"df must have 'high' column\"\n assert 'low' in self._df.columns, \"df must have 'low' column\"\n assert 'close' in self._df.columns, \"df must have 'close' column\"\n\n assert isinstance(self._indicators, list) == True, \"indicators must be an iterable\"\n assert all(isinstance(indicator, Indicator) for indicator in self._indicators) == True, \"indicators must be a list of Indicator objects\"\n\n @property\n def min(self) -> float:\n return self._min or self._df['low'].min()\n \n @property\n def max(self) -> float:\n return self._max or self._df['high'].max()\n\n def __len__(self) -> int:\n return len(self._df)\n \n def __getitem__(self, idx: int, args=None) -> State:\n # Use cache to speed up training\n if idx in self._cache:\n return self._cache[idx]\n\n indicators = []\n for indicator in self._indicators:\n results = indicator(idx)\n if results is None:\n self._cache[idx] = None\n return None\n \n indicators.append(results)\n\n data = self._df.iloc[idx]\n state = State(\n timestamp=data['timestamp'],\n open=data['open'],\n high=data['high'],\n low=data['low'],\n close=data['close'],\n volume=data.get('volume', 0.0),\n indicators=indicators\n )\n self._cache[idx] = state\n\n return state\n \n def __iter__(self) -> State:\n \"\"\" Create a generator that iterate over the Sequence.\"\"\"\n for index in range(len(self)):\n yield self[index]"
},
{
"identifier": "TradingEnv",
"path": "finrock/trading_env.py",
"snippet": "class TradingEnv:\n def __init__(\n self,\n data_feeder: PdDataFeeder,\n output_transformer: typing.Callable = None,\n initial_balance: float = 1000.0,\n max_episode_steps: int = None,\n window_size: int = 50,\n reward_function: typing.Callable = simpleReward,\n metrics: typing.List[typing.Callable] = []\n ) -> None:\n self._data_feeder = data_feeder\n self._output_transformer = output_transformer\n self._initial_balance = initial_balance\n self._max_episode_steps = max_episode_steps if max_episode_steps is not None else len(data_feeder)\n self._window_size = window_size\n self._reward_function = reward_function\n self._metrics = metrics\n\n self._observations = Observations(window_size=window_size)\n self._observation_space = np.zeros(self.reset()[0].shape)\n self.action_space = 3\n\n @property\n def observation_space(self):\n return self._observation_space\n\n def _get_obs(self, index: int, balance: float=None) -> State:\n next_state = self._data_feeder[index]\n if next_state is None:\n return None\n\n if balance is not None:\n next_state.balance = balance\n\n return next_state\n \n def _get_terminated(self):\n return False\n \n def _take_action(self, action: int, order_size: float) -> typing.Tuple[int, float]:\n # validate action is in range\n assert (action in list(range(self.action_space))) == True, f'action must be in range {self.action_space}, received: {action}'\n\n # get last state and next state\n last_state, next_state = self._observations[-2:]\n\n # modify action to hold (0) if we are out of balance\n if action == 2 and last_state.allocation_percentage == 1.0:\n action = 0\n\n # modify action to hold (0) if we are out of assets\n elif action == 1 and last_state.allocation_percentage == 0.0:\n action = 0\n\n if action == 2: # buy\n next_state.allocation_percentage = order_size\n next_state.assets = last_state.balance * order_size / last_state.close\n next_state.balance = last_state.balance - (last_state.balance * order_size)\n\n elif action == 1: # sell\n next_state.allocation_percentage = 0.0\n next_state.balance = last_state.assets * order_size * last_state.close\n next_state.assets = 0.0\n\n else: # hold\n next_state.allocation_percentage = last_state.allocation_percentage\n next_state.assets = last_state.assets\n next_state.balance = last_state.balance\n\n return action, order_size\n \n @property\n def metrics(self):\n return self._metrics\n\n def _metricsHandler(self, observation: State):\n metrics = {}\n # Loop through metrics and update\n for metric in self._metrics:\n metric.update(observation)\n metrics[metric.name] = metric.result\n\n return metrics\n\n def step(self, action: int) -> typing.Tuple[State, float, bool, bool, dict]:\n\n index = self._env_step_indexes.pop(0)\n\n observation = self._get_obs(index)\n # update observations object with new observation\n self._observations.append(observation)\n\n order_size = 1.0\n action, order_size = self._take_action(action, order_size)\n reward = self._reward_function(self._observations)\n terminated = self._get_terminated()\n truncated = False if self._env_step_indexes else True\n info = {\n \"states\": [observation],\n \"metrics\": self._metricsHandler(observation)\n }\n\n transformed_obs = self._output_transformer.transform(self._observations)\n\n if np.isnan(transformed_obs).any():\n raise ValueError(\"transformed_obs contains nan values, check your data\")\n\n return transformed_obs, reward, terminated, truncated, info\n\n def reset(self) -> typing.Tuple[State, dict]:\n \"\"\" Reset the environment and return the initial state\n \"\"\"\n size = len(self._data_feeder) - self._max_episode_steps\n self._env_start_index = np.random.randint(0, size) if size > 0 else 0\n self._env_step_indexes = list(range(self._env_start_index, self._env_start_index + self._max_episode_steps))\n\n # Initial observations are the first states of the window size\n self._observations.reset()\n while not self._observations.full:\n obs = self._get_obs(self._env_step_indexes.pop(0), balance=self._initial_balance)\n if obs is None:\n continue\n # update observations object with new observation\n self._observations.append(obs)\n\n info = {\n \"states\": self._observations.observations,\n \"metrics\": {}\n }\n \n # reset metrics with last state\n for metric in self._metrics:\n metric.reset(self._observations.observations[-1])\n\n transformed_obs = self._output_transformer.transform(self._observations)\n if np.isnan(transformed_obs).any():\n raise ValueError(\"transformed_obs contains nan values, check your data\")\n \n # return state and info\n return transformed_obs, info\n\n def render(self):\n raise NotImplementedError\n\n def close(self):\n raise NotImplementedError"
},
{
"identifier": "PygameRender",
"path": "finrock/render.py",
"snippet": "class PygameRender:\n def __init__(\n self,\n window_size: int=100,\n screen_width: int=1440,\n screen_height: int=1080,\n top_offset: int=25,\n bottom_offset: int=25,\n candle_spacing: int=1,\n color_theme = ColorTheme(),\n frame_rate: int=30,\n render_balance: bool=True,\n ):\n # pygame window settings\n self.screen_width = screen_width\n self.screen_height = screen_height\n self.top_offset = top_offset\n self.bottom_offset = bottom_offset\n self.candle_spacing = candle_spacing\n self.window_size = window_size\n self.color_theme = color_theme\n self.frame_rate = frame_rate\n self.render_balance = render_balance\n\n self.mainWindow = MainWindow(\n width=self.screen_width,\n height=self.screen_height,\n top_offset=self.top_offset,\n bottom_offset=self.bottom_offset,\n window_size=self.window_size,\n candle_spacing=self.candle_spacing,\n font_ratio=self.color_theme.font_ratio\n )\n\n self._states = []\n\n try:\n import pygame\n self.pygame = pygame\n except ImportError:\n raise ImportError('Please install pygame (pip install pygame)')\n \n self.pygame.init()\n self.pygame.display.init()\n self.window = self.pygame.display.set_mode(self.mainWindow.screen_shape, self.pygame.RESIZABLE)\n self.clock = self.pygame.time.Clock()\n\n def reset(self):\n self._states = []\n \n def _prerender(func):\n \"\"\" Decorator for input data validation and pygame window rendering\"\"\"\n def wrapper(self, info: dict, rgb_array: bool=False):\n self._states += info.get('states', [])\n\n if not self._states or not bool(self.window._pixels_address):\n return\n\n for event in self.pygame.event.get():\n if event.type == self.pygame.QUIT:\n self.pygame.quit()\n return\n\n if event.type == self.pygame.VIDEORESIZE:\n self.mainWindow.screen_shape = (event.w, event.h)\n\n # pause if spacebar is pressed\n if event.type == self.pygame.KEYDOWN:\n if event.key == self.pygame.K_SPACE:\n print('Paused')\n while True:\n event = self.pygame.event.wait()\n if event.type == self.pygame.KEYDOWN:\n if event.key == self.pygame.K_SPACE:\n print('Unpaused')\n break\n if event.type == self.pygame.QUIT:\n self.pygame.quit()\n return\n \n self.mainWindow.screen_shape = self.pygame.display.get_surface().get_size()\n\n\n canvas = func(self, info)\n canvas = self.pygame.transform.scale(canvas, self.mainWindow.screen_shape)\n # The following line copies our drawings from `canvas` to the visible window\n self.window.blit(canvas, canvas.get_rect())\n self.pygame.display.update()\n self.clock.tick(self.frame_rate)\n\n if rgb_array:\n return self.pygame.surfarray.array3d(canvas)\n\n return wrapper\n \n def render_indicators(self, state: State, canvas: object, candle_offset: int, max_low: float, max_high: float):\n # connect last 2 points with a line\n for i, indicator in enumerate(state.indicators):\n for name, render_option in indicator[\"render_options\"].items():\n\n index = self._states.index(state)\n if not index:\n return\n last_state = self._states[index - 1]\n\n if render_option.render_type == RenderType.LINE:\n prev_render_option = last_state.indicators[i][\"render_options\"][name]\n if render_option.window_type == WindowType.MAIN:\n\n cur_value_map = self.mainWindow.map_price_to_window(render_option.value, max_low, max_high)\n prev_value_map = self.mainWindow.map_price_to_window(prev_render_option.value, max_low, max_high)\n\n elif render_option.window_type == WindowType.SEPERATE:\n\n cur_value_map = self.mainWindow.map_to_seperate_window(render_option.value, render_option.min, render_option.max)\n prev_value_map = self.mainWindow.map_to_seperate_window(prev_render_option.value, prev_render_option.min, prev_render_option.max)\n\n self.pygame.draw.line(canvas, render_option.color, \n (candle_offset - self.mainWindow.candle_width / 2, prev_value_map), \n (candle_offset + self.mainWindow.candle_width / 2, cur_value_map))\n \n elif render_option.render_type == RenderType.DOT:\n if render_option.window_type == WindowType.MAIN:\n self.pygame.draw.circle(canvas, render_option.color,\n (candle_offset, self.mainWindow.map_price_to_window(render_option.value, max_low, max_high)), 2)\n elif render_option.window == WindowType.SEPERATE:\n raise NotImplementedError('Seperate window for indicators is not implemented yet')\n \n def render_candle(self, state: State, canvas: object, candle_offset: int, max_low: float, max_high: float, font: object):\n assert isinstance(state, State) == True # check if state is a State object\n\n # Calculate candle coordinates\n candle_y_open = self.mainWindow.map_price_to_window(state.open, max_low, max_high)\n candle_y_close = self.mainWindow.map_price_to_window(state.close, max_low, max_high)\n candle_y_high = self.mainWindow.map_price_to_window(state.high, max_low, max_high)\n candle_y_low = self.mainWindow.map_price_to_window(state.low, max_low, max_high)\n\n # Determine candle color\n if state.open < state.close:\n # up candle\n candle_color = self.color_theme.up_candle\n candle_body_y = candle_y_close\n candle_body_height = candle_y_open - candle_y_close\n else:\n # down candle\n candle_color = self.color_theme.down_candle\n candle_body_y = candle_y_open\n candle_body_height = candle_y_close - candle_y_open\n\n # Draw candlestick wicks\n self.pygame.draw.line(canvas, self.color_theme.wick, \n (candle_offset + self.mainWindow.candle_width // 2, candle_y_high), \n (candle_offset + self.mainWindow.candle_width // 2, candle_y_low))\n\n # Draw candlestick body\n self.pygame.draw.rect(canvas, candle_color, (candle_offset, candle_body_y, self.mainWindow.candle_width, candle_body_height))\n\n # Compare with previous state to determine whether buy or sell action was taken and draw arrow\n index = self._states.index(state)\n if index > 0:\n last_state = self._states[index - 1]\n\n if last_state.allocation_percentage < state.allocation_percentage:\n # buy\n candle_y_low = self.mainWindow.map_price_to_window(last_state.low, max_low, max_high)\n self.pygame.draw.polygon(canvas, self.color_theme.buy, [\n (candle_offset - self.mainWindow.candle_width / 2, candle_y_low + self.mainWindow.spacing / 2), \n (candle_offset - self.mainWindow.candle_width * 0.1, candle_y_low + self.mainWindow.spacing), \n (candle_offset - self.mainWindow.candle_width * 0.9, candle_y_low + self.mainWindow.spacing)\n ])\n \n # add account_value label bellow candle\n if self.render_balance:\n text = str(int(last_state.account_value))\n buy_label = font.render(text, True, self.color_theme.text)\n label_width, label_height = font.size(text)\n canvas.blit(buy_label, (candle_offset - (self.mainWindow.candle_width + label_width) / 2, candle_y_low + self.mainWindow.spacing))\n\n elif last_state.allocation_percentage > state.allocation_percentage:\n # sell\n candle_y_high = self.mainWindow.map_price_to_window(last_state.high, max_low, max_high)\n self.pygame.draw.polygon(canvas, self.color_theme.sell, [\n (candle_offset - self.mainWindow.candle_width / 2, candle_y_high - self.mainWindow.spacing / 2), \n (candle_offset - self.mainWindow.candle_width * 0.1, candle_y_high - self.mainWindow.spacing), \n (candle_offset - self.mainWindow.candle_width * 0.9, candle_y_high - self.mainWindow.spacing)\n ])\n\n # add account_value label above candle\n if self.render_balance:\n text = str(int(last_state.account_value))\n sell_label = font.render(text, True, self.color_theme.text)\n label_width, label_height = font.size(text)\n canvas.blit(sell_label, (candle_offset - (self.mainWindow.candle_width + label_width) / 2, candle_y_high - self.mainWindow.spacing - label_height))\n\n @_prerender\n def render(self, info: dict):\n canvas = self.pygame.Surface(self.mainWindow.screen_shape)\n canvas.fill(self.color_theme.background)\n \n max_high = max([state.high for state in self._states[-self.window_size:]])\n max_low = min([state.low for state in self._states[-self.window_size:]])\n\n candle_offset = self.candle_spacing\n\n # Set font for labels\n font = self.pygame.font.SysFont(self.color_theme.font, self.mainWindow.font_size)\n\n for state in self._states[-self.window_size:]:\n\n # draw indicators\n self.render_indicators(state, canvas, candle_offset, max_low, max_high)\n\n # draw candle\n self.render_candle(state, canvas, candle_offset, max_low, max_high, font)\n\n # Move to the next candle\n candle_offset += self.mainWindow.candle_width + self.candle_spacing\n\n # Draw max and min ohlc values on the chart\n label_width, label_height = font.size(str(max_low))\n label_y_low = font.render(str(max_low), True, self.color_theme.text)\n canvas.blit(label_y_low, (self.candle_spacing + 5, self.mainWindow.height - label_height * 2))\n\n label_width, label_height = font.size(str(max_low))\n label_y_high = font.render(str(max_high), True, self.color_theme.text)\n canvas.blit(label_y_high, (self.candle_spacing + 5, label_height))\n\n return canvas"
},
{
"identifier": "MinMaxScaler",
"path": "finrock/scalers.py",
"snippet": "class MinMaxScaler:\n def __init__(self, min: float, max: float):\n self._min = min\n self._max = max\n \n def transform(self, observations: Observations) -> np.ndarray:\n\n assert isinstance(observations, Observations) == True, \"observations must be an instance of Observations\"\n\n transformed_data = []\n for state in observations:\n data = []\n for name in ['open', 'high', 'low', 'close']:\n value = getattr(state, name)\n transformed_value = (value - self._min) / (self._max - self._min)\n data.append(transformed_value)\n \n data.append(state.allocation_percentage)\n\n # append scaled indicators\n for indicator in state.indicators:\n for value in indicator[\"values\"].values():\n transformed_value = (value - indicator[\"min\"]) / (indicator[\"max\"] - indicator[\"min\"])\n data.append(transformed_value)\n\n transformed_data.append(data)\n\n return np.array(transformed_data)\n \n def __call__(self, observations) -> np.ndarray:\n return self.transform(observations)"
},
{
"identifier": "simpleReward",
"path": "finrock/reward.py",
"snippet": "def simpleReward(observations: Observations) -> float:\n \n assert isinstance(observations, Observations) == True, \"observations must be an instance of Observations\"\n\n last_state, next_state = observations[-2:]\n\n # buy\n if next_state.allocation_percentage > last_state.allocation_percentage:\n # check whether it was good or bad to buy\n order_size = next_state.allocation_percentage - last_state.allocation_percentage\n reward = (next_state.close - last_state.close) / last_state.close * order_size\n\n # sell\n elif next_state.allocation_percentage < last_state.allocation_percentage:\n # check whether it was good or bad to sell\n order_size = last_state.allocation_percentage - next_state.allocation_percentage\n reward = -1 * (next_state.close - last_state.close) / last_state.close * order_size\n\n # hold\n else:\n # check whether it was good or bad to hold\n ratio = -1 if not last_state.allocation_percentage else last_state.allocation_percentage\n reward = (next_state.close - last_state.close) / last_state.close * ratio\n \n return reward"
},
{
"identifier": "DifferentActions",
"path": "finrock/metrics.py",
"snippet": "class DifferentActions(Metric):\n def __init__(self, name: str=\"different_actions\") -> None:\n super().__init__(name=name)\n\n def update(self, state: State):\n super().update(state)\n\n if not self.prev_state:\n self.prev_state = state\n else:\n if state.allocation_percentage != self.prev_state.allocation_percentage:\n self.different_actions += 1\n\n self.prev_state = state\n\n @property\n def result(self):\n return self.different_actions\n \n def reset(self, prev_state: State=None):\n super().reset(prev_state)\n\n self.prev_state = prev_state\n self.different_actions = 0"
},
{
"identifier": "AccountValue",
"path": "finrock/metrics.py",
"snippet": "class AccountValue(Metric):\n def __init__(self, name: str=\"account_value\") -> None:\n super().__init__(name=name)\n\n def update(self, state: State):\n super().update(state)\n\n self.account_value = state.account_value\n\n @property\n def result(self):\n return self.account_value\n \n def reset(self, prev_state: State=None):\n super().reset(prev_state)\n \n self.account_value = prev_state.account_value if prev_state else 0.0"
},
{
"identifier": "MaxDrawdown",
"path": "finrock/metrics.py",
"snippet": "class MaxDrawdown(Metric):\n \"\"\" The Maximum Drawdown (MDD) is a measure of the largest peak-to-trough decline in the \n value of a portfolio or investment during a specific period\n\n The Maximum Drawdown Ratio represents the proportion of the peak value that was lost during \n the largest decline. It is a measure of the risk associated with a particular investment or \n portfolio. Investors and fund managers use the Maximum Drawdown and its ratio to assess the \n historical downside risk and potential losses that could be incurred.\n \"\"\"\n def __init__(self, name: str=\"max_drawdown\") -> None:\n super().__init__(name=name)\n\n def update(self, state: State):\n super().update(state)\n\n # Use min to find the trough value\n self.max_account_value = max(self.max_account_value, state.account_value)\n\n # Calculate drawdown\n drawdown = (state.account_value - self.max_account_value) / self.max_account_value\n\n # Update max drawdown if the current drawdown is greater\n self.max_drawdown = min(self.max_drawdown, drawdown)\n\n @property\n def result(self):\n return self.max_drawdown\n \n def reset(self, prev_state: State=None):\n super().reset(prev_state)\n\n self.max_account_value = prev_state.account_value if prev_state else 0.0\n self.max_drawdown = 0.0"
},
{
"identifier": "SharpeRatio",
"path": "finrock/metrics.py",
"snippet": "class SharpeRatio(Metric):\n \"\"\" The Sharpe Ratio, is a measure of the risk-adjusted performance of an investment or a portfolio. \n It helps investors evaluate the return of an investment relative to its risk.\n\n A higher Sharpe Ratio indicates a better risk-adjusted performance. Investors and portfolio managers \n often use the Sharpe Ratio to compare the risk-adjusted returns of different investments or portfolios. \n It allows them to assess whether the additional return earned by taking on additional risk is justified.\n \"\"\"\n def __init__(self, ratio_days=365.25, name: str='sharpe_ratio'):\n self.ratio_days = ratio_days\n super().__init__(name=name)\n\n def update(self, state: State):\n super().update(state)\n time_difference_days = (state.date - self.prev_state.date).days\n if time_difference_days >= 1:\n self.daily_returns.append((state.account_value - self.prev_state.account_value) / self.prev_state.account_value)\n self.account_values.append(state.account_value)\n self.prev_state = state\n \n @property\n def result(self):\n if len(self.daily_returns) == 0:\n return 0.0\n\n mean = np.mean(self.daily_returns)\n std = np.std(self.daily_returns)\n if std == 0:\n return 0.0\n \n sharpe_ratio = mean / std * np.sqrt(self.ratio_days)\n \n return sharpe_ratio\n \n def reset(self, prev_state: State=None):\n super().reset(prev_state)\n self.prev_state = prev_state\n self.account_values = []\n self.daily_returns = []"
},
{
"identifier": "BolingerBands",
"path": "finrock/indicators.py",
"snippet": "class BolingerBands(Indicator):\n \"\"\" Volatility indicator\n\n Bollinger Bands are a type of price envelope developed by John BollingerOpens in a new window. (Price envelopes define \n upper and lower price range levels.) Bollinger Bands are envelopes plotted at a standard deviation level above and \n below a simple moving average of the price. Because the distance of the bands is based on standard deviation, they \n adjust to volatility swings in the underlying price.\n\n Bollinger Bands use 2 parameters, Period and Standard Deviations, StdDev. The default values are 20 for period, and 2 \n for standard deviations, although you may customize the combinations.\n\n Bollinger bands help determine whether prices are high or low on a relative basis. They are used in pairs, both upper\n and lower bands and in conjunction with a moving average. Further, the pair of bands is not intended to be used on its own. \n Use the pair to confirm signals given with other indicators.\n \"\"\"\n def __init__(\n self, \n data: pd.DataFrame, \n period: int=20, \n std: int=2,\n target_column: str='close',\n render_options: dict={}\n ):\n self._period = period\n self._std = std\n self._names = ['SMA', 'BB_up', 'BB_dn']\n super().__init__(data, target_column, render_options)\n\n @property\n def min(self):\n return self._data['BB_dn'].min()\n \n @property\n def max(self):\n return self._data['BB_up'].max()\n\n def compute(self):\n self._data['SMA'] = self._data[self.target_column].rolling(self._period).mean()\n self._data['BB_up'] = self._data['SMA'] + self._data[self.target_column].rolling(self._period).std() * self._std\n self._data['BB_dn'] = self._data['SMA'] - self._data[self.target_column].rolling(self._period).std() * self._std\n\n def default_render_options(self):\n return {name: RenderOptions(\n name=name,\n color=(100, 100, 255),\n window_type=WindowType.MAIN,\n render_type=RenderType.LINE,\n min=self.min,\n max=self.max\n ) for name in self._names}"
},
{
"identifier": "RSI",
"path": "finrock/indicators.py",
"snippet": "class RSI(Indicator):\n \"\"\" Momentum indicator\n\n The Relative Strength Index (RSI), developed by J. Welles Wilder, is a momentum oscillator that measures the speed and \n change of price movements. The RSI oscillates between zero and 100. Traditionally the RSI is considered overbought when \n above 70 and oversold when below 30. Signals can be generated by looking for divergences and failure swings. \n RSI can also be used to identify the general trend.\n \"\"\"\n def __init__(\n self, \n data: pd.DataFrame, \n period: int=14, \n target_column: str='close',\n render_options: dict={}\n ):\n self._period = period\n self._names = ['RSI']\n super().__init__(data, target_column, render_options)\n\n @property\n def min(self):\n return 0.0\n \n @property\n def max(self):\n return 100.0\n\n def compute(self):\n delta = self._data[self.target_column].diff()\n up = delta.clip(lower=0)\n down = -1 * delta.clip(upper=0)\n ema_up = up.ewm(com=self._period-1, adjust=True, min_periods=self._period).mean()\n ema_down = down.ewm(com=self._period-1, adjust=True, min_periods=self._period).mean()\n rs = ema_up / ema_down\n self._data['RSI'] = 100 - (100 / (1 + rs))\n\n def default_render_options(self):\n custom_options = {\n \"RSI0\": 0,\n \"RSI30\": 30,\n \"RSI70\": 70,\n \"RSI100\": 100\n }\n options = {name: RenderOptions(\n name=name,\n color=(100, 100, 255),\n window_type=WindowType.SEPERATE,\n render_type=RenderType.LINE,\n min=self.min,\n max=self.max\n ) for name in self._names}\n\n for name, value in custom_options.items():\n options[name] = RenderOptions(\n name=name,\n color=(192, 192, 192),\n window_type=WindowType.SEPERATE,\n render_type=RenderType.LINE,\n min=self.min,\n max=self.max,\n value=value\n )\n return options"
},
{
"identifier": "PSAR",
"path": "finrock/indicators.py",
"snippet": "class PSAR(Indicator):\n \"\"\" Parabolic Stop and Reverse (Parabolic SAR)\n\n The Parabolic Stop and Reverse, more commonly known as the\n Parabolic SAR,is a trend-following indicator developed by\n J. Welles Wilder. The Parabolic SAR is displayed as a single\n parabolic line (or dots) underneath the price bars in an uptrend,\n and above the price bars in a downtrend.\n\n https://school.stockcharts.com/doku.php?id=technical_indicators:parabolic_sar\n \"\"\"\n def __init__(\n self, \n data: pd.DataFrame, \n step: float=0.02, \n max_step: float=0.2,\n target_column: str='close',\n render_options: dict={}\n ):\n self._names = ['PSAR']\n self._step = step\n self._max_step = max_step\n super().__init__(data, target_column, render_options)\n\n @property\n def min(self):\n return self._data['PSAR'].min()\n \n @property\n def max(self):\n return self._data['PSAR'].max()\n\n def default_render_options(self):\n return {name: RenderOptions(\n name=name,\n color=(100, 100, 255),\n window_type=WindowType.MAIN,\n render_type=RenderType.DOT,\n min=self.min,\n max=self.max\n ) for name in self._names}\n\n def compute(self):\n high = self._data['high']\n low = self._data['low']\n close = self._data[self.target_column]\n\n up_trend = True\n acceleration_factor = self._step\n up_trend_high = high.iloc[0]\n down_trend_low = low.iloc[0]\n\n self._psar = close.copy()\n self._psar_up = pd.Series(index=self._psar.index, dtype=\"float64\")\n self._psar_down = pd.Series(index=self._psar.index, dtype=\"float64\")\n\n for i in range(2, len(close)):\n reversal = False\n\n max_high = high.iloc[i]\n min_low = low.iloc[i]\n\n if up_trend:\n self._psar.iloc[i] = self._psar.iloc[i - 1] + (\n acceleration_factor * (up_trend_high - self._psar.iloc[i - 1])\n )\n\n if min_low < self._psar.iloc[i]:\n reversal = True\n self._psar.iloc[i] = up_trend_high\n down_trend_low = min_low\n acceleration_factor = self._step\n else:\n if max_high > up_trend_high:\n up_trend_high = max_high\n acceleration_factor = min(\n acceleration_factor + self._step, self._max_step\n )\n\n low1 = low.iloc[i - 1]\n low2 = low.iloc[i - 2]\n if low2 < self._psar.iloc[i]:\n self._psar.iloc[i] = low2\n elif low1 < self._psar.iloc[i]:\n self._psar.iloc[i] = low1\n else:\n self._psar.iloc[i] = self._psar.iloc[i - 1] - (\n acceleration_factor * (self._psar.iloc[i - 1] - down_trend_low)\n )\n\n if max_high > self._psar.iloc[i]:\n reversal = True\n self._psar.iloc[i] = down_trend_low\n up_trend_high = max_high\n acceleration_factor = self._step\n else:\n if min_low < down_trend_low:\n down_trend_low = min_low\n acceleration_factor = min(\n acceleration_factor + self._step, self._max_step\n )\n\n high1 = high.iloc[i - 1]\n high2 = high.iloc[i - 2]\n if high2 > self._psar.iloc[i]:\n self._psar[i] = high2\n elif high1 > self._psar.iloc[i]:\n self._psar.iloc[i] = high1\n\n up_trend = up_trend != reversal # XOR\n\n if up_trend:\n self._psar_up.iloc[i] = self._psar.iloc[i]\n else:\n self._psar_down.iloc[i] = self._psar.iloc[i]\n\n # calculate psar indicator\n self._data['PSAR'] = self._psar"
},
{
"identifier": "SMA",
"path": "finrock/indicators.py",
"snippet": "class SMA(Indicator):\n \"\"\" Trend indicator\n\n A simple moving average (SMA) calculates the average of a selected range of prices, usually closing prices, by the number \n of periods in that range.\n\n The SMA is a technical indicator for determining if an asset price will continue or reverse a bull or bear trend. It is \n calculated by summing up the closing prices of a stock over time and then dividing that total by the number of time periods \n being examined. Short-term averages respond quickly to changes in the price of the underlying, while long-term averages are \n slow to react.\n\n https://www.investopedia.com/terms/s/sma.asp\n \"\"\"\n def __init__(\n self, \n data: pd.DataFrame, \n period: int=20, \n target_column: str='close',\n render_options: dict={}\n ):\n self._period = period\n self._names = [f'SMA{period}']\n super().__init__(data, target_column, render_options)\n\n @property\n def min(self):\n return self._data[self.names[0]].min()\n \n @property\n def max(self):\n return self._data[self.names[0]].max()\n \n def default_render_options(self):\n return {name: RenderOptions(\n name=name,\n color=(100, 100, 255),\n window_type=WindowType.MAIN,\n render_type=RenderType.LINE,\n min=self.min,\n max=self.max\n ) for name in self._names}\n\n def compute(self):\n self._data[self.names[0]] = self._data[self.target_column].rolling(self._period).mean()"
}
] | import numpy as np
import pandas as pd
import tensorflow as tf
from finrock.data_feeder import PdDataFeeder
from finrock.trading_env import TradingEnv
from finrock.render import PygameRender
from finrock.scalers import MinMaxScaler
from finrock.reward import simpleReward
from finrock.metrics import DifferentActions, AccountValue, MaxDrawdown, SharpeRatio
from finrock.indicators import BolingerBands, RSI, PSAR, SMA | 9,010 | tf.get_logger().setLevel('ERROR')
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
df = pd.read_csv('Datasets/random_sinusoid.csv')
df = df[-1000:]
pd_data_feeder = PdDataFeeder(
df,
indicators = [
BolingerBands(data=df, period=20, std=2),
RSI(data=df, period=14),
PSAR(data=df),
SMA(data=df, period=7),
SMA(data=df, period=25),
SMA(data=df, period=99),
]
)
env = TradingEnv(
data_feeder = pd_data_feeder,
| tf.get_logger().setLevel('ERROR')
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
df = pd.read_csv('Datasets/random_sinusoid.csv')
df = df[-1000:]
pd_data_feeder = PdDataFeeder(
df,
indicators = [
BolingerBands(data=df, period=20, std=2),
RSI(data=df, period=14),
PSAR(data=df),
SMA(data=df, period=7),
SMA(data=df, period=25),
SMA(data=df, period=99),
]
)
env = TradingEnv(
data_feeder = pd_data_feeder, | output_transformer = MinMaxScaler(min=pd_data_feeder.min, max=pd_data_feeder.max), | 3 | 2023-10-23 07:44:54+00:00 | 12k |
hitlic/deepepochs | deepepochs/trainer.py | [
{
"identifier": "StopLoopException",
"path": "deepepochs/loops.py",
"snippet": "class StopLoopException(Exception):\r\n pass\r"
},
{
"identifier": "LoopException",
"path": "deepepochs/loops.py",
"snippet": "class LoopException(Exception):\r\n pass\r"
},
{
"identifier": "TensorTuple",
"path": "deepepochs/loops.py",
"snippet": "class TensorTuple(tuple):\r\n \"\"\"\r\n tuple of tensors\r\n \"\"\"\r\n def __new__(cls, tensors):\r\n if isinstance(tensors, torch.Tensor):\r\n tensors=(tensors,)\r\n return tuple.__new__(cls, tensors)\r\n\r\n @property\r\n def device(self):\r\n if len(self) > 0:\r\n return self[0].device\r\n else:\r\n return torch.device(type='cpu')\r\n\r\n def to(self, device, **kwargs):\r\n return TensorTuple(t.to(device, **kwargs) if isinstance(t, torch.Tensor) else t for t in self)\r\n\r\n def cpu(self):\r\n return TensorTuple(t.cpu() if isinstance(t, torch.Tensor) else t for t in self)\r\n\r\n def clone(self):\r\n return TensorTuple(t.clone() if isinstance(t, torch.Tensor) else t for t in self)\r\n\r\n def detach(self):\r\n return TensorTuple(t.detach() if isinstance(t, torch.Tensor) else t for t in self)\r\n\r\n @property\r\n def data(self):\r\n return TensorTuple(t.data if isinstance(t, torch.Tensor) else t for t in self)\r\n\r\n def float(self):\r\n return TensorTuple(t.float() if isinstance(t, torch.Tensor) else t for t in self)\r\n\r\n def long(self):\r\n return TensorTuple(t.long() if isinstance(t, torch.Tensor) else t for t in self)\r\n\r\n def int(self):\r\n return TensorTuple(t.int() if isinstance(t, torch.Tensor) else t for t in self)\r"
},
{
"identifier": "flatten_dict",
"path": "deepepochs/loops.py",
"snippet": "def flatten_dict(d, parent_key='', sep='.'):\r\n \"\"\"flatten a dict with dict as values\"\"\"\r\n items = []\r\n for k, v in d.items():\r\n new_key = f'{parent_key}{sep}{k}' if parent_key else k\r\n if isinstance(v, dict):\r\n items.extend(flatten_dict(v, new_key, sep).items())\r\n else:\r\n items.append((new_key, v))\r\n return dict(items)\r"
},
{
"identifier": "default_loss",
"path": "deepepochs/loops.py",
"snippet": "def default_loss(preds, targets):\r\n \"\"\"默认损失函数,直接返回模型预测结果,适用于模型直接返回损失值的情况。\"\"\"\r\n return preds\r"
},
{
"identifier": "concat_dicts",
"path": "deepepochs/loops.py",
"snippet": "def concat_dicts(dicts, to_np=True):\r\n if to_np:\r\n return {k: [to_numpy(d.get(k, 0)) for d in dicts] for k in keyset(dicts)}\r\n else:\r\n return {k: [d.get(k, 0) for d in dicts] for k in keyset(dicts)}\r"
},
{
"identifier": "to_numpy",
"path": "deepepochs/loops.py",
"snippet": "def to_numpy(data):\r\n \"\"\"将torch.Tensor或Tensor列表、Tensor字典转为numpy数组\"\"\"\r\n def to(d):\r\n if isinstance(d, torch.Tensor):\r\n return d.detach().cpu().numpy()\r\n else:\r\n return np.array(d, dtype=float)\r\n if isinstance(data, (list, tuple)):\r\n return [to(d) for d in data]\r\n elif isinstance(data, dict):\r\n return {k: to(v) for k, v in data.items()}\r\n else:\r\n return to(data)\r"
},
{
"identifier": "listify",
"path": "deepepochs/loops.py",
"snippet": "def listify(obj):\r\n if obj is None:\r\n return []\r\n if isinstance(obj, list):\r\n return obj\r\n if isinstance(obj, tuple):\r\n return list(obj)\r\n if isinstance(obj, (dict, str)):\r\n return [obj]\r\n if isinstance(obj, Iterable):\r\n return list(obj)\r\n return [obj]\r"
},
{
"identifier": "batch_size",
"path": "deepepochs/loops.py",
"snippet": "def batch_size(data):\r\n if isinstance(data, (list, tuple)):\r\n return data[0].shape[0]\r\n elif isinstance(data, torch.Tensor):\r\n return data.shape[0]\r\n elif hasattr(data, '__len__'):\r\n return len(data)\r\n else:\r\n return 1\r"
},
{
"identifier": "concat",
"path": "deepepochs/loops.py",
"snippet": "def concat(datas):\r\n if isinstance(datas[0], (list, tuple)):\r\n return TensorTuple([torch.concat(ds, dim=0) if ds[0].dim()> 1 else torch.concat(ds) for ds in zip(*datas)])\r\n else:\r\n return torch.concat(datas, dim=0) if datas[0].dim() > 1 else torch.concat(datas)\r"
},
{
"identifier": "detach_clone",
"path": "deepepochs/loops.py",
"snippet": "def detach_clone(data):\r\n \"\"\"对torch.Tensor或者Tensor列表、Tensor字典进行detach().clone()操作\"\"\"\r\n def to(d):\r\n if isinstance(d, torch.Tensor):\r\n return d.detach().clone()\r\n else:\r\n return d\r\n if isinstance(data, (list, tuple)):\r\n return [to(d) for d in data]\r\n elif isinstance(data, dict):\r\n return {k: to(v) for k, v in data.items()}\r\n else:\r\n return to(data)\r"
},
{
"identifier": "batches",
"path": "deepepochs/tools.py",
"snippet": "def batches(inputs, batch_size):\n \"\"\"\n 把inputs按batch_size进行划分\n \"\"\"\n is_list_input = isinstance(inputs, (list, tuple)) # inputs是否是多个输入组成的列表或元素\n start_idx = 0\n is_over = False\n while True:\n if is_list_input:\n batch = TensorTuple([data[start_idx: start_idx + batch_size] for data in inputs])\n is_over = len(batch[0]) > 0\n start_idx += len(batch[0])\n else:\n batch = inputs[start_idx: start_idx + batch_size]\n is_over = len(batch) > 0\n start_idx += len(batch)\n if is_over > 0:\n yield batch\n else:\n break"
},
{
"identifier": "Optimizer",
"path": "deepepochs/optimizer.py",
"snippet": "class Optimizer:\n def __init__(self, opt, scheduler=None, sched_on='epoch', sched_with_loss=False):\n \"\"\"\n 优化器组合,对优化器和学习率调度器进行统一管理。\n Args:\n opt: torch.optim.*\n scheduler: torch.optim.lr_scheduler.*\n sched_on: 学习率调整是每个epoch还是每个step\n sched_with_loss: scheduler.step方法是否需要损失作为参数(例如ReduceLROnPlateau)\n \"\"\"\n self.opt = opt\n self.scheduler = scheduler\n assert sched_on in ['step', 'epoch'], '`sched_on`取值为\"step\"或\"epoch\"!'\n self.sched_on = sched_on\n self.sched_with_loss = sched_with_loss\n\n def zero_grad(self):\n self.opt.zero_grad()\n\n def get_last_lr(self):\n return self.scheduler.get_last_lr() if self.scheduler is not None else None\n\n def step(self, at='step', loss=None):\n if at == 'step':\n self.opt.step()\n if self.sched_on == 'step':\n self.sched_step(loss)\n elif at == 'epoch':\n if self.sched_on == 'epoch':\n self.sched_step(loss)\n else:\n raise ValueError('Optimizer.step方法的`at`参数取值为\"step\"或\"epoch\"')\n\n def sched_step(self, loss):\n if self.scheduler is not None:\n if self.sched_with_loss:\n assert loss is not None, \"学习率调度要求损失作为参数,但`train_step`和`evaluate_step`都没有返回`loss`!\"\n self.scheduler.step(loss)\n else:\n self.scheduler.step()\n\n def state_dict(self):\n sched_state = None if self.scheduler is None else self.scheduler.state_dict()\n return {'opt_state': self.opt.state_dict(), 'sched_state': sched_state}\n\n def load_state_dict(self, state):\n opt_state, sched_state = state['opt_state'], state['sched_state']\n self.opt.load_state_dict(opt_state)\n if sched_state is not None and self.scheduler is not None:\n self.scheduler.load_state_dict(opt_state)\n\n @property\n def param_groups(self):\n return self.opt.param_groups\n\n def get_current_lr(self):\n for param_group in self.param_groups:\n return param_group['lr']"
},
{
"identifier": "Optimizers",
"path": "deepepochs/optimizer.py",
"snippet": "class Optimizers(list):\n \"\"\"\n 用于管理多个优化器组合(Optimizer),对多个优化器提供支持。\n \"\"\"\n def zero_grad(self):\n for opt in self:\n opt.zero_grad()\n\n def get_last_lr(self):\n return [opt.get_last_lr() for opt in self]\n\n def step(self, at='step', loss=None):\n for opt in self:\n opt.step(at, loss)\n\n def state_dict(self):\n return [opt.state_dict() for opt in self]\n\n def load_state_dict(self, states):\n for opt, state in zip(self, states):\n opt.load_state_dict(state)\n\n def get_current_lr(self):\n return [opt.get_current_lr() for opt in self]"
},
{
"identifier": "PatchBase",
"path": "deepepochs/patches.py",
"snippet": "class PatchBase(abc.ABC):\n \"\"\"\n 所有Patch对象的基类\n \"\"\"\n def __init__(self, name=None):\n \"\"\"\n Args:\n name: 显示在输出日志中的名称,当为空时使用指标函数的__name__属性\n \"\"\"\n super().__init__()\n self.name = name\n\n def __add__(self, obj):\n return self.__add(obj)\n\n def __radd__(self, obj):\n return self.__add(obj)\n\n def __call__(self):\n return self.forward()\n\n @abc.abstractmethod\n def forward(self):\n \"\"\"\n 基于当前Patch中保存的数据,计算一个结果(如指标值)并返回,被__call__方法自动调用。\n \"\"\"\n\n def __add(self, obj):\n if obj == 0:\n return self\n assert isinstance(obj, self.__class__), '相加的两个Patch的类型不一致!'\n return self.add(obj)\n\n @abc.abstractmethod\n def add(self, obj):\n \"\"\"\n 用于重载“+”运算符,将self和obj两个对象相加,得到一个新的对象。\n 注意:在相加之前检查self和obj是否能够相加\n \"\"\""
},
{
"identifier": "MeanPatch",
"path": "deepepochs/patches.py",
"snippet": "class MeanPatch(PatchBase):\n def __init__(self, metric, batch_preds, batch_targets=None, name=None):\n \"\"\"\n 用于累积多个mini-batch的指标值,计算Epoch的指标。\n Args:\n metric: 计算指标的函数(或其他适当的可调用对象),必须返回经过平均指标值。\n batch_pres: 一个mini_batch的模型预测\n batch_targets: 一个mini_batch的标签(当指标计算不需要标签时为空值)\n name: 显示在输出日志中的名称\n \"\"\"\n super().__init__(name)\n assert callable(metric), '指标`metric`应当是一个可调用对象!'\n self.metric = metric\n self.batch_size = len(batch_preds)\n m_value = metric(batch_preds, batch_targets)\n if isinstance(m_value, dict):\n self.batch_value = {k: v * self.batch_size for k, v in m_value.items()}\n else:\n self.batch_value = m_value * self.batch_size\n\n def forward(self):\n if isinstance(self.batch_value, dict):\n return {k: v / self.batch_size for k, v in self.batch_value.items()}\n else:\n return self.batch_value / self.batch_size\n\n def add(self, obj):\n assert self.metric is obj.metric, '相加的两个Patch的`metric`不一致'\n return add_patch_value(self, obj)"
},
{
"identifier": "TensorPatch",
"path": "deepepochs/patches.py",
"snippet": "class TensorPatch(PatchBase):\n def __init__(self, metric, batch_preds, batch_targets=None, name=None, single_batch=True):\n \"\"\"\n 用于累积多个mini-batch的preds和targets,计算Epoch的指标。\n 例如:\n batch 1的模型预测为preds1, 标签为targets1;\n batch 1的模型预测为preds2, 标签为targets2;\n m_fun 为指标计算函数;\n 计算两个batch的指标:\n p1 = Patch(m_fun, preds1, targets1)\n p2 = Patch(m_fun, preds2, targets2)\n p = 0 + p1 + p2 # 两个Patch可直接相加,而且可与0相加\n p = sum([p1, p2]) # 可利用sum进行运算\n p1() # batch 1上的指标值\n p2() # batch 2上的指标值\n p() # 两个batch上的指标值\n Args:\n metric: 计算指标的函数(或其他适当的可调用对象)\n batch_pres: 一个mini_batch的模型预测\n batch_targets: 一个mini_batch的标签(当指标计算不需要标签时为空值)\n name: 显示在输出日志中的名称\n single_batch: batch_preds, batch_targets中包含的是单个还是多个batch的Patch\n \"\"\"\n super().__init__(name)\n assert callable(metric), '指标`metric`应当是一个可调用对象!'\n self.metric = metric\n if single_batch: # 单个mini-batch的模型预测输出\n # 应对模型有多个输出的情况\n self.batch_preds = [batch_preds] if isinstance(batch_preds, (list, tuple)) else [[batch_preds]]\n else: # 多个mini-batch模型预测输出\n self.batch_preds = batch_preds\n if batch_targets is None:\n self.batch_targets = None\n else:\n if single_batch: # 单个mini-batch的标签数据\n # 应对模型有多个标签的情况\n self.batch_targets = [batch_targets] if isinstance(batch_targets, (list, tuple)) else [[batch_targets]]\n else: # 多个mini-batch的标签数据\n self.batch_targets = batch_targets\n\n self.concat = torch.concat if isinstance(self.batch_preds[0][0], torch.Tensor) else np.concatenate\n\n def forward(self):\n preds = [self.concat(bpreds, 0) for bpreds in zip(*self.batch_preds)]\n targets = None if self.batch_targets is None else [self.concat(btargets, 0) for btargets in zip(*self.batch_targets)]\n preds = preds[0] if len(preds) == 1 else preds\n targets = targets[0] if len(targets) == 1 else targets\n return self.metric(preds, targets)\n\n def add(self, obj):\n assert self.metric is obj.metric, '相加的两个Patch的`metric`不一致'\n new_preds = self.batch_preds + obj.batch_preds\n if self.batch_targets != None:\n assert obj.batch_targets is not None, '相加的两个Patch的`batch_targets`其中一个为None!'\n new_targets = self.batch_targets + obj.batch_targets\n else:\n new_targets = None\n return self.__class__(self.metric, new_preds, new_targets, self.name, single_batch=False)"
},
{
"identifier": "run_patch_dict",
"path": "deepepochs/patches.py",
"snippet": "def run_patch_dict(patch_dict):\n \"\"\"\n 计算一个Patch字典的指标值(计算Batch指标)\n \"\"\"\n return {patch_name(k, v): v() for k, v in patch_dict.items()}"
},
{
"identifier": "run_patch_dicts",
"path": "deepepochs/patches.py",
"snippet": "def run_patch_dicts(patch_dicts):\n \"\"\"\n 计算Patch字典的列表的指标值(计算Epoch指标)\n \"\"\"\n if len(patch_dicts) == 0:\n return None\n return {patch_name(k, patch_dicts[0][k]): sum(dic[k] for dic in patch_dicts if dic)() for k in keyset(patch_dicts)}"
},
{
"identifier": "CallbackPool",
"path": "deepepochs/callbacks/callback.py",
"snippet": "class CallbackPool(list):\n \"\"\"\n 用于管理、执行Callback方法的类\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def prepare(self):\n self.sort(key=lambda cbk: cbk.priority)\n\n def append(self, callback: Callback):\n assert isinstance(callback, Callback), '`callback`必须是Callback的子类对象!'\n return super().append(callback)\n\n def extend(self, callbacks: Iterable):\n assert all(isinstance(cbk, Callback) for cbk in callbacks), '`callbacks`中必须都是Callback的子类对象!'\n return super().extend(callbacks)\n\n def trigger(self, event, *args, **kwargs):\n if 'before' in event:\n cbk_ids = range(len(self))\n else:\n cbk_ids = range(len(self)-1, -1, -1)\n for i in cbk_ids:\n getattr(self[i], f'on_{event}')(*args, **kwargs)"
},
{
"identifier": "CallbackException",
"path": "deepepochs/callbacks/callback.py",
"snippet": "class CallbackException(Exception):\n pass"
},
{
"identifier": "DefaultCallback",
"path": "deepepochs/callbacks/default.py",
"snippet": "class DefaultCallback(Callback):\n def __init__(self, log_long, log_batch, log_tqdm):\n \"\"\"\n 默认启用的Callback,实现功能:\n 指标输出\n 学习率调度\n 为mini-batch构建每个指标的Patch\n Args:\n log_long: 指标输出为长格式(7位小说)还是短格式(4位小数)\n log_batch: 是否输出batch的指标值\n tqdm_iter: tqdm迭代对象\n \"\"\"\n super().__init__(priority=0)\n self.round_to = 7 if log_long else 4\n self.log_batch = log_batch\n self.epoch_width = 4\n self.batch_width = 5\n self.log_tqdm = log_tqdm\n self.tqdm_iter = None\n\n def on_before_fit(self, trainer, epochs):\n self.total_epochs = epochs\n\n def on_before_epoch(self, trainer, train_tasks, val_tasks, epoch_idx):\n self.epoch_idx = epoch_idx\n self.total_train_batchs = sum(task.batchs for task in train_tasks) # 所有训练任务总batch数量\n self.total_val_batchs = sum(task.batchs for task in val_tasks) # 所有验证任务总batch数量\n self.global_train_batch_idx = 0 # 当前训练batch\n self.global_val_batch_idx = 0 # 当前验证batch\n\n self.epoch_width = len(str(self.total_epochs))\n self.batch_width = len(str(max(self.total_val_batchs, self.total_train_batchs)))\n\n def on_after_train_batch(self, trainer, metrics, batch_idx):\n self.global_train_batch_idx += 1\n if self.log_batch and trainer.main_process:\n log_batch(metrics, self.epoch_idx+1, self.total_epochs, self.global_train_batch_idx, self.total_train_batchs, 'TRAIN', self.epoch_width, self.batch_width, self.round_to)\n\n def on_after_val_batch(self, trainer, metrics, batch_idx):\n self.global_val_batch_idx += 1\n if self.log_batch and trainer.main_process:\n log_batch(metrics, self.epoch_idx+1, self.total_epochs, self.global_val_batch_idx, self.total_val_batchs, 'VAL', self.epoch_width, self.batch_width, self.round_to)\n\n def on_after_epoch(self, trainer, train_tasks, val_tasks, train_metrics, val_metrics, epoch_idx):\n if trainer.main_process:\n if val_metrics:\n log_epoch({'train': train_metrics, 'val': val_metrics}, epoch_idx+1, self.total_epochs, self.epoch_width, self.round_to, self.tqdm_iter)\n else:\n log_epoch({'train': train_metrics}, epoch_idx+1, self.total_epochs, self.epoch_width, self.round_to, self.tqdm_iter)\n\n # 根据调度器的配置改变优化器学习率\n if val_metrics: # 优先使用验证损失\n sched_loss = val_metrics.get('loss')\n else:\n sched_loss = train_metrics.get('loss')\n trainer.opt.step(at='epoch', loss=sched_loss)\n\n def on_before_test_epochs(self, trainer, tasks):\n self.total_test_epochs = len(tasks)\n self.global_test_epoch_idx = 0\n self.total_test_batchs = sum(task.batchs for task in tasks)\n self.global_test_batch_idx = 0\n\n def on_after_test_epoch(self, trainer, task, metrics):\n if trainer.main_process:\n log_epoch({'test': metrics}, self.global_test_epoch_idx+1, self.total_test_epochs, self.epoch_width, self.round_to)\n self.global_test_epoch_idx += 1\n\n def on_after_test_batch(self, trainer, metrics, batch_idx):\n self.global_test_batch_idx += 1\n if self.log_batch and trainer.main_process:\n log_batch(metrics, self.global_test_epoch_idx+1, self.total_test_epochs, self.global_test_batch_idx, self.total_test_batchs, 'TEST', self.epoch_width, self.batch_width, self.round_to)\n\n def on_train_metrics(self, trainer, loss, model_out, batch_y, task):\n \"\"\"当前task的每个指标构建Patch,并注入task.batch_patch_dict\"\"\"\n task.batch_patch_dict = self.make_patch_dict(trainer, loss, model_out, batch_y, task.metrics, 'train')\n\n def on_val_metrics(self, trainer, loss, model_out, batch_y, task):\n \"\"\"当前task的每个指标构建Patch,并注入task.batch_patch_dict\"\"\"\n task.batch_patch_dict = self.make_patch_dict(trainer, loss, model_out, batch_y, task.metrics, 'val')\n\n def on_test_metrics(self, trainer, loss, model_out, batch_y, task):\n \"\"\"当前task的每个指标构建Patch,并注入task.batch_patch_dict\"\"\"\n task.batch_patch_dict = self.make_patch_dict(trainer, loss, model_out, batch_y, task.metrics, 'test')\n\n def make_patch_dict(self, trainer, loss, model_out, batch_y, metrics, stage):\n b_size = torch.tensor(batch_size(model_out)).to(trainer.device)\n # Accelerate 分布式训练时,获取各Process的数据\n if trainer.accelerator is not None and stage!='train': # 训练时仅在主线程上计算指标\n if loss is not None:\n loss = trainer.accelerator.gather_for_metrics(loss)\n b_size = trainer.accelerator.gather_for_metrics(b_size)\n loss = (loss * b_size).sum()\n b_size = b_size.sum()\n loss = loss/b_size\n model_out = trainer.accelerator.gather_for_metrics(model_out)\n batch_y = trainer.accelerator.gather_for_metrics(batch_y)\n\n patch_dict = {} if loss is None else {'loss': ValuePatch(loss, b_size)}\n for m in metrics:\n patch_dict[m.__name__] = trainer.metric_patch(m, model_out, batch_y)\n return patch_dict"
}
] | import math
import time
import torch
from datetime import datetime
from collections import defaultdict
from typing import List, Dict, Callable
from torch.optim import Adam
from torch.utils.data import DataLoader
from accelerate import Accelerator
from .loops import (StopLoopException, LoopException, TensorTuple,
flatten_dict, default_loss, concat_dicts, to_numpy, listify, batch_size, concat, detach_clone)
from .tools import batches
from .optimizer import Optimizer, Optimizers
from .patches import PatchBase, MeanPatch, TensorPatch, run_patch_dict, run_patch_dicts
from .callbacks import CallbackPool, DefaultCallback, CallbackException
from tqdm import tqdm
| 9,134 | batch_y_4cbk = batch_y
if self.do_loss:
loss_4cbk = self.loss_fn(model_out, batch_y)
else:
loss_4cbk = None
self.trainer.callbacks.trigger(f'{self.stage}_metrics', trainer=self.trainer, loss=loss_4cbk, model_out=model_out_4cbk, batch_y=batch_y_4cbk, task=self.task)
return loss_4cbk
class TrainerBase:
def __init__(self, model,
loss=None,
opt=None,
epochs=1000,
device=None,
callbacks=None,
metrics=None,
metric_patch:['mean', 'tensor']='tensor',
resume=False,
running_id=None,
hyper_params=None,
log_long=False,
log_batch=True,
log_tqdm=False,
show_info=True,
compile_model=False,
grad_accumulate_steps=1,
):
"""
Args:
model: Pytorch模型(nn.Module)
loss: 损失函数
opt: 优化器,或优化器列表;优化器是Pytorch优化器或deepepochs.Optimizer对象
epochs [int]: 迭代次数
device [str]: 加速设备,可取值包括
- cpu、cuda、mps等Pytorch支持的设备
- Accelerator对象,利用Hugging Face Accelerate实现多机多卡或混合精度训练
callbacks [List[Callback]]: Callback或Callback列表。
metrics [Callable]: 指标函数列表;通用于训练、验证和测试。
metric_patch [PatchBase]: 封装metrics所用的Patch类型,可选项为 mean 或 tensor
resume [bool, int, str]: 是否从logs文件平中的Checkpoint加载
- False表示不加载
- True表示从最新的Checkpoint加载
- int、str表示加载相应ID的Checkpoint
running_id [int, str, None]: 当前训练的运行编号,用于指定日志和checkpoint的文件夹名
hyper_params [dict, None]: 调参所关注的重要超参数,用于写入日志文件辅助调参
log_long [bool]: 指标输出为长格式(7位小数)还是短格式(4位小数)
log_batch [bool]: 训练过程中是否每个batch输出一次指标值
log_tqdm [bool]: 是否使用tqdm显示进度
compile_model [bool]: 利用PyTorch 2.x对模型compile以提升速度(暂不支持mps、Windows [v2.1])
grad_accumulate_steps [int]: 累积梯度更新时的累积次数,大于1表示启用累积梯度更新
"""
self.show_info = show_info
# 检测与配置加速设备
if device is not None:
self.device = device
elif torch.cuda.is_available():
self.device = 'cuda'
elif torch.backends.mps.is_available() and not compile_model:
self.device = 'mps'
else:
self.device = 'cpu'
# Pytorch支持的设备类型
device_types = ['cpu', 'cuda', 'ipu', 'xpu', 'mkldnn', 'opengl', 'opencl',
'ideep', 'hip', 've', 'fpga', 'ort', 'xla', 'lazy', 'vulkan',
'mps', 'meta', 'hpu', 'mtia', 'privateuseone']
# 使用Accelerate,用于实现分布式或混合精度训练
if isinstance(self.device, Accelerator):
self.accelerator = self.device
self.device = self.accelerator.device
self.main_process = self.accelerator.is_main_process # 是否主进程
else:
assert str(self.device).split(':', maxsplit=1)[0] in device_types, f'Pytorch不支持的{self.device}设备!\nPytorch支持的设备有:{device_types}'
self.accelerator = None
self.main_process = True
# 配置模型
if compile_model:
model = torch.compile(model)
self.model = ModelWrapper(model, self).to(self.device)
# 梯度累积次数
assert isinstance(grad_accumulate_steps, int) and grad_accumulate_steps > 0, '梯度累积次数`grad_accumulate_steps`必须为正整数!'
self.grad_accumulate_steps = grad_accumulate_steps
if self.accelerator is not None and self.accelerator.gradient_accumulation_steps > 1:
# 优先使用accelerator中的gradient_accumulation_steps
self.grad_accumulate_steps = self.accelerator.gradient_accumulation_steps
# 配置损失函数
if loss is None:
self.loss = LossWrapper(default_loss, self)
else:
self.loss = LossWrapper(loss, self)
# 配置优化器
if opt is None:
self.opt = Optimizer(Adam(model.parameters(), lr=0.001))
elif isinstance(opt, torch.optim.Optimizer):
self.opt = Optimizer(opt)
elif isinstance(opt, (Optimizer, Optimizers)): # Optimizers是多个Optimizer的列表
self.opt = opt
elif isinstance(opt, (list, tuple)): # 多个优化器的情况
opt_lst = [Optimizer(o) if isinstance(o, torch.optim.Optimizer) else o for o in opt]
assert all(isinstance(o, Optimizer) for o in opt_lst), "优化器参数存在错误!"
self.opt = Optimizers(opt_lst)
else:
raise ValueError('`opt`参数取值错误!')
# 迭代次数
self.max_epochs = epochs
# 起始迭代
self.init_epoch = 0
# 配置Callbacks
callbacks = listify(callbacks)
self.log_tqdm = log_tqdm
log_batch = False if log_tqdm else log_batch
| """
@author: liuchen
"""
class EpochTask:
"""一个Epoch的训练、验证或测试任务"""
def __init__(self, dataloader, metrics=None, do_loss=True, **step_args):
"""
Args:
dataloader: pytorch Dataloader
metrics: 指标函数列表
do_loss: 验证和测试中是否计算据损失
step_args: 其他需要传递给`step`、`train_step`、`val_step`、`test_step`和`evaluate`方法的参数
"""
self.dataloader = dataloader
self.batchs = len(dataloader)
self.metrics = listify(metrics)
self.do_loss = do_loss
self.trainer = None
self.stage = None
self.val_freq = None
self.step_args = step_args
self.batch_patch_dict = {} # 由DefaultCallback中的on_train/val/test_prediction回调注入
def __len__(self):
return self.batchs
def __getattr__(self, name):
"""如果要找的属性和方法不存在,则到trainer中找"""
return getattr(self.trainer, name, None)
def __call__(self):
phase = 'train' if self.stage=='train' else 'evaluate'
if self.stage == 'train':
self.model.train()
else:
self.model.eval()
self.model.stage = self.stage
self.loss.stage = self.stage
self.loss.do_loss = self.do_loss
self.loss.task = self
# 配置指标,在DefaultCallback中的on_train/val/test_prediction中用于构造Patch
if self.stage == 'train':
self.metrics = [m for m in self.metrics if m not in self.train_metrics] + self.train_metrics
elif self.stage == 'val':
self.metrics = [m for m in self.metrics if m not in self.val_metrics] + self.val_metrics
else:
self.metrics = [m for m in self.metrics if m not in self.test_metrics] + self.test_metrics
with torch.no_grad():
self.callbacks.trigger(f'before_{self.stage}_epoch', trainer=self, task=self)
epoch_patch_dicts = []
for batch_idx, batch_data in enumerate(self.dataloader):
batch_x, batch_y = self.prepare_data(batch_data)
self.callbacks.trigger(f'before_{self.stage}_batch', trainer=self.trainer, batch_x=batch_x, batch_y=batch_y, batch_idx=batch_idx)
# 获取mini-batch的`*step`方法
# 1. 最优先使用`EpochTask.step`、`Trainer.step`
step_method = getattr(self, 'step', None)
# 2. 次优先使用`EpochTask.train_step`、`Epoch.val_step`、`EpochTask.test_step`
# 3. 其次使用`Trainer.train_step`、`Trainer.val_step`、`Trainer.test_step`
step_method = getattr(self, f'{self.stage}_step') if step_method is None else step_method
# 4. 再次使用`EpochTask.evaluate_step`方法
# 5. 最次使用`Trainer.evaluate_step`
step_method = getattr(self, f'{phase}_step') if step_method is None else step_method
# 运行mini-batch的`*step`方法
if self.stage == 'train':
with torch.enable_grad():
step_out = step_method(batch_x, batch_y, **self.step_args)
else:
step_out = step_method(batch_x, batch_y, **self.step_args)
if step_out is not None:
if not isinstance(step_out, dict):
raise LoopException(f'{step_method} 方法的返回值必须为字典!')
if not all(isinstance(v, PatchBase) for k, v in step_out.items()):
raise LoopException(f'{step_method} 方法返回字典的value必须为Patch(deepepochs.PatchBase子类对象)!')
patch_dict = step_out
else:
patch_dict = {}
self.batch_patch_dict.update(patch_dict)
epoch_patch_dicts.append(self.batch_patch_dict)
# 计算当前batch的指标
batch_metric_values = flatten_dict(run_patch_dict(self.batch_patch_dict), sep='')
self.callbacks.trigger(f'after_{self.stage}_batch', trainer=self.trainer, metrics=batch_metric_values, batch_idx=batch_idx)
# 清空 self.batch_patch_dict
self.batch_patch_dict = {}
# 计算当前epoch的指标
epoch_metrics_values = flatten_dict(run_patch_dicts(epoch_patch_dicts), sep='')
self.callbacks.trigger(f'after_{self.stage}_epoch', trainer=self.trainer, task=self, metrics=epoch_metrics_values)
return epoch_metrics_values
class ModelWrapper:
"""
用于实现回调:
on_before_train_forward on_after_train_forward
on_before_val_forward on_after_val_forward
on_before_test_forward on_after_test_forward
"""
def __init__(self, model, trainer):
# self.model = torch.compile(model)
self.model = model
self.trainer = trainer
self.stage = None
def __getattr__(self, name):
return getattr(self.model, name)
def __call__(self, *args, **kwds):
self.trainer.callbacks.trigger(f'before_{self.stage}_forward', trainer=self)
model_out = self.model(*args, **kwds)
self.trainer.callbacks.trigger(f'after_{self.stage}_forward', trainer=self, model_out=model_out)
return model_out
def train(self):
self.model.train()
def eval(self):
self.model.eval()
def to(self, device):
self.model = self.model.to(device)
return self
def cpu(self):
self.model = self.model.cpu()
return self
def cuda(self):
self.model = self.model.cuda()
return self
def parameters(self):
return self.model.parameters()
def modules(self):
return self.model.modules()
def state_dict(self):
return self.model.state_dict()
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict)
class LossWrapper:
"""
1. 自动完成zero_grad、backward、opt.step等操作
2. 配合实现梯度累积
3. 实现回调
on_before_backward on_after_backward
on_before_optimize on_after_optimize
on_train_metrics on_val_metrics on_test_metrics
"""
def __init__(self, loss_fn, trainer):
self.loss_fn = loss_fn
self.trainer = trainer
self.stage = None
self.do_loss = None
self.task = None
self.total_loss = 0 # 用于实现累积梯度
self.model_outs = [] # 用于实现累积梯度
self.batch_ys = [] # 用于实现累积梯度
def optimize(self):
self.trainer.callbacks.trigger('before_optimize', trainer=self)
self.trainer.opt.step()
self.trainer.opt.zero_grad()
self.trainer.callbacks.trigger('after_optimize', trainer=self)
def __call__(self, model_out, batch_y, grad_accumulate=False):
"""
Args:
model_out: 模型预测输出
batch_y: 标签
grad_accumulate: 是否累积梯度
"""
if self.stage == 'train':
# 计算损失
loss = self.loss_fn(model_out, batch_y)
# backward
self.trainer.callbacks.trigger('before_backward', trainer=self, loss=loss)
if self.trainer.accelerator is None:
(loss/self.trainer.grad_accumulate_steps).backward()
else: # accelerate的backward
self.trainer.accelerator.backward(loss/self.trainer.grad_accumulate_steps)
self.trainer.callbacks.trigger('after_backward', trainer=self, loss=loss)
# 记录各sub-batch的总损失、模型输出、标签
_loss = loss.detach().clone()
self.total_loss += _loss * batch_size(model_out)
self.model_outs.append(detach_clone(model_out))
self.batch_ys.append(batch_y)
# 梯度累积
if grad_accumulate:
if self.trainer.accelerator is not None: # DeepEpochs的梯度累积要求仅最后一个sub-batch优化
self.optimize() # Accelerate的梯度累积要求每个sub-batch都优化
return _loss
else:
self.optimize()
# 计算平均损失,拼接多次累积度累积中的sub-batch的model_out和batch_y
loss_4cbk = self.total_loss / sum(batch_size(o) for o in self.model_outs)
model_out_4cbk = concat(self.model_outs)
batch_y_4cbk = concat(self.batch_ys)
self.total_loss = 0
self.model_outs = []
self.batch_ys = []
else:
# 验证与测试不需要实现分批,如果需要的话可使用较小的batch_size
model_out_4cbk = model_out
batch_y_4cbk = batch_y
if self.do_loss:
loss_4cbk = self.loss_fn(model_out, batch_y)
else:
loss_4cbk = None
self.trainer.callbacks.trigger(f'{self.stage}_metrics', trainer=self.trainer, loss=loss_4cbk, model_out=model_out_4cbk, batch_y=batch_y_4cbk, task=self.task)
return loss_4cbk
class TrainerBase:
def __init__(self, model,
loss=None,
opt=None,
epochs=1000,
device=None,
callbacks=None,
metrics=None,
metric_patch:['mean', 'tensor']='tensor',
resume=False,
running_id=None,
hyper_params=None,
log_long=False,
log_batch=True,
log_tqdm=False,
show_info=True,
compile_model=False,
grad_accumulate_steps=1,
):
"""
Args:
model: Pytorch模型(nn.Module)
loss: 损失函数
opt: 优化器,或优化器列表;优化器是Pytorch优化器或deepepochs.Optimizer对象
epochs [int]: 迭代次数
device [str]: 加速设备,可取值包括
- cpu、cuda、mps等Pytorch支持的设备
- Accelerator对象,利用Hugging Face Accelerate实现多机多卡或混合精度训练
callbacks [List[Callback]]: Callback或Callback列表。
metrics [Callable]: 指标函数列表;通用于训练、验证和测试。
metric_patch [PatchBase]: 封装metrics所用的Patch类型,可选项为 mean 或 tensor
resume [bool, int, str]: 是否从logs文件平中的Checkpoint加载
- False表示不加载
- True表示从最新的Checkpoint加载
- int、str表示加载相应ID的Checkpoint
running_id [int, str, None]: 当前训练的运行编号,用于指定日志和checkpoint的文件夹名
hyper_params [dict, None]: 调参所关注的重要超参数,用于写入日志文件辅助调参
log_long [bool]: 指标输出为长格式(7位小数)还是短格式(4位小数)
log_batch [bool]: 训练过程中是否每个batch输出一次指标值
log_tqdm [bool]: 是否使用tqdm显示进度
compile_model [bool]: 利用PyTorch 2.x对模型compile以提升速度(暂不支持mps、Windows [v2.1])
grad_accumulate_steps [int]: 累积梯度更新时的累积次数,大于1表示启用累积梯度更新
"""
self.show_info = show_info
# 检测与配置加速设备
if device is not None:
self.device = device
elif torch.cuda.is_available():
self.device = 'cuda'
elif torch.backends.mps.is_available() and not compile_model:
self.device = 'mps'
else:
self.device = 'cpu'
# Pytorch支持的设备类型
device_types = ['cpu', 'cuda', 'ipu', 'xpu', 'mkldnn', 'opengl', 'opencl',
'ideep', 'hip', 've', 'fpga', 'ort', 'xla', 'lazy', 'vulkan',
'mps', 'meta', 'hpu', 'mtia', 'privateuseone']
# 使用Accelerate,用于实现分布式或混合精度训练
if isinstance(self.device, Accelerator):
self.accelerator = self.device
self.device = self.accelerator.device
self.main_process = self.accelerator.is_main_process # 是否主进程
else:
assert str(self.device).split(':', maxsplit=1)[0] in device_types, f'Pytorch不支持的{self.device}设备!\nPytorch支持的设备有:{device_types}'
self.accelerator = None
self.main_process = True
# 配置模型
if compile_model:
model = torch.compile(model)
self.model = ModelWrapper(model, self).to(self.device)
# 梯度累积次数
assert isinstance(grad_accumulate_steps, int) and grad_accumulate_steps > 0, '梯度累积次数`grad_accumulate_steps`必须为正整数!'
self.grad_accumulate_steps = grad_accumulate_steps
if self.accelerator is not None and self.accelerator.gradient_accumulation_steps > 1:
# 优先使用accelerator中的gradient_accumulation_steps
self.grad_accumulate_steps = self.accelerator.gradient_accumulation_steps
# 配置损失函数
if loss is None:
self.loss = LossWrapper(default_loss, self)
else:
self.loss = LossWrapper(loss, self)
# 配置优化器
if opt is None:
self.opt = Optimizer(Adam(model.parameters(), lr=0.001))
elif isinstance(opt, torch.optim.Optimizer):
self.opt = Optimizer(opt)
elif isinstance(opt, (Optimizer, Optimizers)): # Optimizers是多个Optimizer的列表
self.opt = opt
elif isinstance(opt, (list, tuple)): # 多个优化器的情况
opt_lst = [Optimizer(o) if isinstance(o, torch.optim.Optimizer) else o for o in opt]
assert all(isinstance(o, Optimizer) for o in opt_lst), "优化器参数存在错误!"
self.opt = Optimizers(opt_lst)
else:
raise ValueError('`opt`参数取值错误!')
# 迭代次数
self.max_epochs = epochs
# 起始迭代
self.init_epoch = 0
# 配置Callbacks
callbacks = listify(callbacks)
self.log_tqdm = log_tqdm
log_batch = False if log_tqdm else log_batch
| self.default_cbk = DefaultCallback(log_long, log_batch, log_tqdm)
| 21 | 2023-10-19 05:41:48+00:00 | 12k |
vorausrobotik/voraus-ad-dataset | train.py | [
{
"identifier": "Configuration",
"path": "configuration.py",
"snippet": "class Configuration(BaseModel):\n \"\"\"Describes the configuration parameters.\"\"\"\n\n seed: int\n epochs: int\n batchsize: int\n n_hidden_layers: int = Field(alias=\"nHiddenLayers\")\n n_coupling_blocks: int = Field(alias=\"nCouplingBlocks\")\n scale: int\n columns: Literal[\"machine\", \"mechanical\", \"electrical\", \"computed\", \"measured\"]\n clamp: float\n pad: bool\n frequency_divider: int = Field(alias=\"frequencyDivider\")\n train_gain: float = Field(alias=\"trainGain\")\n normalize: bool\n kernel_size_1: int = Field(alias=\"kernelSize1\")\n dilation_1: int = Field(alias=\"dilation1\")\n kernel_size_2: int = Field(alias=\"kernelSize2\")\n dilation_2: int = Field(alias=\"dilation2\")\n kernel_size_3: int = Field(alias=\"kernelSize3\")\n dilation_3: int = Field(alias=\"dilation3\")\n milestones: list[int]\n gamma: float\n learning_rate: float = Field(alias=\"learningRate\")"
},
{
"identifier": "NormalizingFlow",
"path": "normalizing_flow.py",
"snippet": "class NormalizingFlow(GraphINN):\r\n \"\"\"Describes the normalizing flow model.\"\"\"\r\n\r\n def __init__(self, input_dimension: Tuple[int, ...], config: Configuration) -> None:\r\n \"\"\"Initializes the normalizing flow model.\r\n\r\n Args:\r\n input_dimension: The input dimensions.\r\n config: The configuration of the model.\r\n \"\"\"\r\n nodes = [InputNode(*input_dimension, name=\"input\")]\r\n\r\n int_network = InternalNetwork.setup(\r\n input_dimension[1],\r\n input_dimension[0],\r\n n_hidden_layers=config.n_hidden_layers,\r\n scale=config.scale,\r\n kernel_size_1=config.kernel_size_1,\r\n dilation_1=config.dilation_1,\r\n kernel_size_2=config.kernel_size_2,\r\n dilation_2=config.dilation_2,\r\n kernel_size_3=config.kernel_size_3,\r\n dilation_3=config.dilation_3,\r\n )\r\n\r\n for cbi in range(config.n_coupling_blocks):\r\n kwargs: Dict[Any, Any] = {}\r\n\r\n nodes.append(\r\n Node(nodes[-1], PermuteRandom, kwargs, name=f\"permute{cbi}\"),\r\n )\r\n nodes.append(\r\n Node(\r\n nodes[-1],\r\n CouplingBlock,\r\n {\r\n \"subnet_constructor\": int_network.constructor,\r\n \"clamp\": config.clamp,\r\n },\r\n name=f\"cb{cbi}\",\r\n )\r\n )\r\n\r\n output_node = OutputNode(nodes[-1], name=\"output\")\r\n nodes.append(output_node)\r\n\r\n super().__init__(nodes)\r"
},
{
"identifier": "get_loss",
"path": "normalizing_flow.py",
"snippet": "def get_loss(z_space: Tensor, jac: Tensor) -> Tensor:\r\n \"\"\"Calculate the loss of a batch.\r\n\r\n Computes the negative log likelihood loss (per dimension) assuming z should be Gaussian.\r\n\r\n Args:\r\n z_space: The batch result.\r\n jac: The jacobian matrix.\r\n\r\n Returns:\r\n The loss of the batch.\r\n \"\"\"\r\n sum_dimension = tuple(range(1, z_space.dim()))\r\n number = numpy.prod(z_space.shape[1:])\r\n return torch.mean(torch.sum(z_space**2, dim=sum_dimension) - jac) / number\r"
},
{
"identifier": "get_loss_per_sample",
"path": "normalizing_flow.py",
"snippet": "def get_loss_per_sample(z_space: Tensor, jac: Tensor) -> Tensor:\r\n \"\"\"Calculates the loss per sample.\r\n\r\n Args:\r\n z_space: The batch result.\r\n jac: The jacobian matrix.\r\n\r\n Returns:\r\n The loss per sample.\r\n \"\"\"\r\n sum_dimension = tuple(range(1, z_space.dim()))\r\n loss = 0.5 * torch.sum(z_space**2, dim=sum_dimension) - jac\r\n return loss\r"
},
{
"identifier": "ANOMALY_CATEGORIES",
"path": "voraus_ad.py",
"snippet": "ANOMALY_CATEGORIES = [\n Category.AXIS_FRICTION,\n Category.AXIS_WEIGHT,\n Category.COLLISION_FOAM,\n Category.COLLISION_CABLE,\n Category.COLLISION_CARTON,\n Category.MISS_CAN,\n Category.LOSE_CAN,\n Category.CAN_WEIGHT,\n Category.ENTANGLED,\n Category.INVALID_POSITION,\n Category.MOTOR_COMMUTATION,\n Category.WOBBLING_STATION,\n]"
},
{
"identifier": "Signals",
"path": "voraus_ad.py",
"snippet": "class Signals:\n \"\"\"Contains the signals of the robot used in the dataset.\"\"\"\n\n TIME = \"time\"\n SAMPLE = \"sample\"\n ANOMALY = \"anomaly\"\n CATEGORY = \"category\"\n SETTING = \"setting\"\n ACTION = \"action\"\n ACTIVE = \"active\"\n ROBOT_VOLTAGE = \"robot_voltage\"\n ROBOT_CURRENT = \"robot_current\"\n IO_CURRENT = \"io_current\"\n SYSTEM_CURRENT = \"system_current\"\n TARGET_POSITION_1 = \"target_position_1\"\n TARGET_VELOCITY_1 = \"target_velocity_1\"\n TARGET_ACCELERATION_1 = \"target_acceleration_1\"\n TARGET_TORQUE_1 = \"target_torque_1\"\n COMPUTED_INERTIA_1 = \"computed_inertia_1\"\n COMPUTED_TORQUE_1 = \"computed_torque_1\"\n MOTOR_POSITION_1 = \"motor_position_1\"\n MOTOR_VELOCITY_1 = \"motor_velocity_1\"\n JOINT_POSITION_1 = \"joint_position_1\"\n JOINT_VELOCITY_1 = \"joint_velocity_1\"\n MOTOR_TORQUE_1 = \"motor_torque_1\"\n TORQUE_SENSOR_A_1 = \"torque_sensor_a_1\"\n TORQUE_SENSOR_B_1 = \"torque_sensor_b_1\"\n MOTOR_IQ_1 = \"motor_iq_1\"\n MOTOR_ID_1 = \"motor_id_1\"\n POWER_MOTOR_EL_1 = \"power_motor_el_1\"\n POWER_MOTOR_MECH_1 = \"power_motor_mech_1\"\n POWER_LOAD_MECH_1 = \"power_load_mech_1\"\n MOTOR_VOLTAGE_1 = \"motor_voltage_1\"\n SUPPLY_VOLTAGE_1 = \"supply_voltage_1\"\n BRAKE_VOLTAGE_1 = \"brake_voltage_1\"\n TARGET_POSITION_2 = \"target_position_2\"\n TARGET_VELOCITY_2 = \"target_velocity_2\"\n TARGET_ACCELERATION_2 = \"target_acceleration_2\"\n TARGET_TORQUE_2 = \"target_torque_2\"\n COMPUTED_INERTIA_2 = \"computed_inertia_2\"\n COMPUTED_TORQUE_2 = \"computed_torque_2\"\n MOTOR_POSITION_2 = \"motor_position_2\"\n MOTOR_VELOCITY_2 = \"motor_velocity_2\"\n JOINT_POSITION_2 = \"joint_position_2\"\n JOINT_VELOCITY_2 = \"joint_velocity_2\"\n MOTOR_TORQUE_2 = \"motor_torque_2\"\n TORQUE_SENSOR_A_2 = \"torque_sensor_a_2\"\n TORQUE_SENSOR_B_2 = \"torque_sensor_b_2\"\n MOTOR_IQ_2 = \"motor_iq_2\"\n MOTOR_ID_2 = \"motor_id_2\"\n POWER_MOTOR_EL_2 = \"power_motor_el_2\"\n POWER_MOTOR_MECH_2 = \"power_motor_mech_2\"\n POWER_LOAD_MECH_2 = \"power_load_mech_2\"\n MOTOR_VOLTAGE_2 = \"motor_voltage_2\"\n SUPPLY_VOLTAGE_2 = \"supply_voltage_2\"\n BRAKE_VOLTAGE_2 = \"brake_voltage_2\"\n TARGET_POSITION_3 = \"target_position_3\"\n TARGET_VELOCITY_3 = \"target_velocity_3\"\n TARGET_ACCELERATION_3 = \"target_acceleration_3\"\n TARGET_TORQUE_3 = \"target_torque_3\"\n COMPUTED_INERTIA_3 = \"computed_inertia_3\"\n COMPUTED_TORQUE_3 = \"computed_torque_3\"\n MOTOR_POSITION_3 = \"motor_position_3\"\n MOTOR_VELOCITY_3 = \"motor_velocity_3\"\n JOINT_POSITION_3 = \"joint_position_3\"\n JOINT_VELOCITY_3 = \"joint_velocity_3\"\n MOTOR_TORQUE_3 = \"motor_torque_3\"\n TORQUE_SENSOR_A_3 = \"torque_sensor_a_3\"\n TORQUE_SENSOR_B_3 = \"torque_sensor_b_3\"\n MOTOR_IQ_3 = \"motor_iq_3\"\n MOTOR_ID_3 = \"motor_id_3\"\n POWER_MOTOR_EL_3 = \"power_motor_el_3\"\n POWER_MOTOR_MECH_3 = \"power_motor_mech_3\"\n POWER_LOAD_MECH_3 = \"power_load_mech_3\"\n MOTOR_VOLTAGE_3 = \"motor_voltage_3\"\n SUPPLY_VOLTAGE_3 = \"supply_voltage_3\"\n BRAKE_VOLTAGE_3 = \"brake_voltage_3\"\n TARGET_POSITION_4 = \"target_position_4\"\n TARGET_VELOCITY_4 = \"target_velocity_4\"\n TARGET_ACCELERATION_4 = \"target_acceleration_4\"\n TARGET_TORQUE_4 = \"target_torque_4\"\n COMPUTED_INERTIA_4 = \"computed_inertia_4\"\n COMPUTED_TORQUE_4 = \"computed_torque_4\"\n MOTOR_POSITION_4 = \"motor_position_4\"\n MOTOR_VELOCITY_4 = \"motor_velocity_4\"\n JOINT_POSITION_4 = \"joint_position_4\"\n JOINT_VELOCITY_4 = \"joint_velocity_4\"\n MOTOR_TORQUE_4 = \"motor_torque_4\"\n TORQUE_SENSOR_A_4 = \"torque_sensor_a_4\"\n TORQUE_SENSOR_B_4 = \"torque_sensor_b_4\"\n MOTOR_IQ_4 = \"motor_iq_4\"\n MOTOR_ID_4 = \"motor_id_4\"\n POWER_MOTOR_EL_4 = \"power_motor_el_4\"\n POWER_MOTOR_MECH_4 = \"power_motor_mech_4\"\n POWER_LOAD_MECH_4 = \"power_load_mech_4\"\n MOTOR_VOLTAGE_4 = \"motor_voltage_4\"\n SUPPLY_VOLTAGE_4 = \"supply_voltage_4\"\n BRAKE_VOLTAGE_4 = \"brake_voltage_4\"\n TARGET_POSITION_5 = \"target_position_5\"\n TARGET_VELOCITY_5 = \"target_velocity_5\"\n TARGET_ACCELERATION_5 = \"target_acceleration_5\"\n TARGET_TORQUE_5 = \"target_torque_5\"\n COMPUTED_INERTIA_5 = \"computed_inertia_5\"\n COMPUTED_TORQUE_5 = \"computed_torque_5\"\n MOTOR_POSITION_5 = \"motor_position_5\"\n MOTOR_VELOCITY_5 = \"motor_velocity_5\"\n JOINT_POSITION_5 = \"joint_position_5\"\n JOINT_VELOCITY_5 = \"joint_velocity_5\"\n MOTOR_TORQUE_5 = \"motor_torque_5\"\n TORQUE_SENSOR_A_5 = \"torque_sensor_a_5\"\n TORQUE_SENSOR_B_5 = \"torque_sensor_b_5\"\n MOTOR_IQ_5 = \"motor_iq_5\"\n MOTOR_ID_5 = \"motor_id_5\"\n POWER_MOTOR_EL_5 = \"power_motor_el_5\"\n POWER_MOTOR_MECH_5 = \"power_motor_mech_5\"\n POWER_LOAD_MECH_5 = \"power_load_mech_5\"\n MOTOR_VOLTAGE_5 = \"motor_voltage_5\"\n SUPPLY_VOLTAGE_5 = \"supply_voltage_5\"\n BRAKE_VOLTAGE_5 = \"brake_voltage_5\"\n TARGET_POSITION_6 = \"target_position_6\"\n TARGET_VELOCITY_6 = \"target_velocity_6\"\n TARGET_ACCELERATION_6 = \"target_acceleration_6\"\n TARGET_TORQUE_6 = \"target_torque_6\"\n COMPUTED_INERTIA_6 = \"computed_inertia_6\"\n COMPUTED_TORQUE_6 = \"computed_torque_6\"\n MOTOR_POSITION_6 = \"motor_position_6\"\n MOTOR_VELOCITY_6 = \"motor_velocity_6\"\n JOINT_POSITION_6 = \"joint_position_6\"\n JOINT_VELOCITY_6 = \"joint_velocity_6\"\n MOTOR_TORQUE_6 = \"motor_torque_6\"\n TORQUE_SENSOR_A_6 = \"torque_sensor_a_6\"\n TORQUE_SENSOR_B_6 = \"torque_sensor_b_6\"\n MOTOR_IQ_6 = \"motor_iq_6\"\n MOTOR_ID_6 = \"motor_id_6\"\n POWER_MOTOR_EL_6 = \"power_motor_el_6\"\n POWER_MOTOR_MECH_6 = \"power_motor_mech_6\"\n POWER_LOAD_MECH_6 = \"power_load_mech_6\"\n MOTOR_VOLTAGE_6 = \"motor_voltage_6\"\n SUPPLY_VOLTAGE_6 = \"supply_voltage_6\"\n BRAKE_VOLTAGE_6 = \"brake_voltage_6\"\n\n @classmethod\n def all(cls) -> tuple[str, ...]:\n \"\"\"Returns all signals (machine data and meta) included in the voraus-AD dataset.\n\n Returns:\n All signals of the voraus-AD dataset.\n \"\"\"\n return (\n cls.TIME,\n cls.SAMPLE,\n cls.ANOMALY,\n cls.CATEGORY,\n cls.SETTING,\n cls.ACTION,\n cls.ACTIVE,\n cls.ROBOT_VOLTAGE,\n cls.ROBOT_CURRENT,\n cls.IO_CURRENT,\n cls.SYSTEM_CURRENT,\n cls.TARGET_POSITION_1,\n cls.TARGET_VELOCITY_1,\n cls.TARGET_ACCELERATION_1,\n cls.TARGET_TORQUE_1,\n cls.COMPUTED_INERTIA_1,\n cls.COMPUTED_TORQUE_1,\n cls.MOTOR_POSITION_1,\n cls.MOTOR_VELOCITY_1,\n cls.JOINT_POSITION_1,\n cls.JOINT_VELOCITY_1,\n cls.MOTOR_TORQUE_1,\n cls.TORQUE_SENSOR_A_1,\n cls.TORQUE_SENSOR_B_1,\n cls.MOTOR_IQ_1,\n cls.MOTOR_ID_1,\n cls.POWER_MOTOR_EL_1,\n cls.POWER_MOTOR_MECH_1,\n cls.POWER_LOAD_MECH_1,\n cls.MOTOR_VOLTAGE_1,\n cls.SUPPLY_VOLTAGE_1,\n cls.BRAKE_VOLTAGE_1,\n cls.TARGET_POSITION_2,\n cls.TARGET_VELOCITY_2,\n cls.TARGET_ACCELERATION_2,\n cls.TARGET_TORQUE_2,\n cls.COMPUTED_INERTIA_2,\n cls.COMPUTED_TORQUE_2,\n cls.MOTOR_POSITION_2,\n cls.MOTOR_VELOCITY_2,\n cls.JOINT_POSITION_2,\n cls.JOINT_VELOCITY_2,\n cls.MOTOR_TORQUE_2,\n cls.TORQUE_SENSOR_A_2,\n cls.TORQUE_SENSOR_B_2,\n cls.MOTOR_IQ_2,\n cls.MOTOR_ID_2,\n cls.POWER_MOTOR_EL_2,\n cls.POWER_MOTOR_MECH_2,\n cls.POWER_LOAD_MECH_2,\n cls.MOTOR_VOLTAGE_2,\n cls.SUPPLY_VOLTAGE_2,\n cls.BRAKE_VOLTAGE_2,\n cls.TARGET_POSITION_3,\n cls.TARGET_VELOCITY_3,\n cls.TARGET_ACCELERATION_3,\n cls.TARGET_TORQUE_3,\n cls.COMPUTED_INERTIA_3,\n cls.COMPUTED_TORQUE_3,\n cls.MOTOR_POSITION_3,\n cls.MOTOR_VELOCITY_3,\n cls.JOINT_POSITION_3,\n cls.JOINT_VELOCITY_3,\n cls.MOTOR_TORQUE_3,\n cls.TORQUE_SENSOR_A_3,\n cls.TORQUE_SENSOR_B_3,\n cls.MOTOR_IQ_3,\n cls.MOTOR_ID_3,\n cls.POWER_MOTOR_EL_3,\n cls.POWER_MOTOR_MECH_3,\n cls.POWER_LOAD_MECH_3,\n cls.MOTOR_VOLTAGE_3,\n cls.SUPPLY_VOLTAGE_3,\n cls.BRAKE_VOLTAGE_3,\n cls.TARGET_POSITION_4,\n cls.TARGET_VELOCITY_4,\n cls.TARGET_ACCELERATION_4,\n cls.TARGET_TORQUE_4,\n cls.COMPUTED_INERTIA_4,\n cls.COMPUTED_TORQUE_4,\n cls.MOTOR_POSITION_4,\n cls.MOTOR_VELOCITY_4,\n cls.JOINT_POSITION_4,\n cls.JOINT_VELOCITY_4,\n cls.MOTOR_TORQUE_4,\n cls.TORQUE_SENSOR_A_4,\n cls.TORQUE_SENSOR_B_4,\n cls.MOTOR_IQ_4,\n cls.MOTOR_ID_4,\n cls.POWER_MOTOR_EL_4,\n cls.POWER_MOTOR_MECH_4,\n cls.POWER_LOAD_MECH_4,\n cls.MOTOR_VOLTAGE_4,\n cls.SUPPLY_VOLTAGE_4,\n cls.BRAKE_VOLTAGE_4,\n cls.TARGET_POSITION_5,\n cls.TARGET_VELOCITY_5,\n cls.TARGET_ACCELERATION_5,\n cls.TARGET_TORQUE_5,\n cls.COMPUTED_INERTIA_5,\n cls.COMPUTED_TORQUE_5,\n cls.MOTOR_POSITION_5,\n cls.MOTOR_VELOCITY_5,\n cls.JOINT_POSITION_5,\n cls.JOINT_VELOCITY_5,\n cls.MOTOR_TORQUE_5,\n cls.TORQUE_SENSOR_A_5,\n cls.TORQUE_SENSOR_B_5,\n cls.MOTOR_IQ_5,\n cls.MOTOR_ID_5,\n cls.POWER_MOTOR_EL_5,\n cls.POWER_MOTOR_MECH_5,\n cls.POWER_LOAD_MECH_5,\n cls.MOTOR_VOLTAGE_5,\n cls.SUPPLY_VOLTAGE_5,\n cls.BRAKE_VOLTAGE_5,\n cls.TARGET_POSITION_6,\n cls.TARGET_VELOCITY_6,\n cls.TARGET_ACCELERATION_6,\n cls.TARGET_TORQUE_6,\n cls.COMPUTED_INERTIA_6,\n cls.COMPUTED_TORQUE_6,\n cls.MOTOR_POSITION_6,\n cls.MOTOR_VELOCITY_6,\n cls.JOINT_POSITION_6,\n cls.JOINT_VELOCITY_6,\n cls.MOTOR_TORQUE_6,\n cls.TORQUE_SENSOR_A_6,\n cls.TORQUE_SENSOR_B_6,\n cls.MOTOR_IQ_6,\n cls.MOTOR_ID_6,\n cls.POWER_MOTOR_EL_6,\n cls.POWER_MOTOR_MECH_6,\n cls.POWER_LOAD_MECH_6,\n cls.MOTOR_VOLTAGE_6,\n cls.SUPPLY_VOLTAGE_6,\n cls.BRAKE_VOLTAGE_6,\n )\n\n @classmethod\n def meta(cls) -> tuple[str, ...]:\n \"\"\"Returns the meta colums of the voraus-AD dataset.\n\n Returns:\n The meta columns of the dataset.\n \"\"\"\n return (\n cls.TIME,\n cls.SAMPLE,\n cls.ANOMALY,\n cls.CATEGORY,\n cls.SETTING,\n cls.ACTION,\n cls.ACTIVE,\n )\n\n @classmethod\n def meta_constant(cls) -> tuple[str, ...]:\n \"\"\"Returns time invariant meta colums of the voraus-AD dataset.\n\n Returns:\n The time invariant meta columns.\n \"\"\"\n return (\n cls.SAMPLE,\n cls.ANOMALY,\n cls.CATEGORY,\n cls.SETTING,\n )\n\n @classmethod\n def electrical(cls) -> tuple[str, ...]:\n \"\"\"Returns the part of the machine data columns, which describes electrical values.\n\n Returns:\n The electrical signals.\n \"\"\"\n return (\n cls.ROBOT_VOLTAGE,\n cls.ROBOT_CURRENT,\n cls.IO_CURRENT,\n cls.SYSTEM_CURRENT,\n cls.MOTOR_IQ_1,\n cls.MOTOR_ID_1,\n cls.POWER_MOTOR_EL_1,\n cls.MOTOR_VOLTAGE_1,\n cls.SUPPLY_VOLTAGE_1,\n cls.BRAKE_VOLTAGE_1,\n cls.MOTOR_IQ_2,\n cls.MOTOR_ID_2,\n cls.POWER_MOTOR_EL_2,\n cls.MOTOR_VOLTAGE_2,\n cls.SUPPLY_VOLTAGE_2,\n cls.BRAKE_VOLTAGE_2,\n cls.MOTOR_IQ_3,\n cls.MOTOR_ID_3,\n cls.POWER_MOTOR_EL_3,\n cls.MOTOR_VOLTAGE_3,\n cls.SUPPLY_VOLTAGE_3,\n cls.BRAKE_VOLTAGE_3,\n cls.MOTOR_IQ_4,\n cls.MOTOR_ID_4,\n cls.POWER_MOTOR_EL_4,\n cls.MOTOR_VOLTAGE_4,\n cls.SUPPLY_VOLTAGE_4,\n cls.BRAKE_VOLTAGE_4,\n cls.MOTOR_IQ_5,\n cls.MOTOR_ID_5,\n cls.POWER_MOTOR_EL_5,\n cls.MOTOR_VOLTAGE_5,\n cls.SUPPLY_VOLTAGE_5,\n cls.BRAKE_VOLTAGE_5,\n cls.MOTOR_IQ_6,\n cls.MOTOR_ID_6,\n cls.POWER_MOTOR_EL_6,\n cls.MOTOR_VOLTAGE_6,\n cls.SUPPLY_VOLTAGE_6,\n cls.BRAKE_VOLTAGE_6,\n )\n\n @classmethod\n def measured(cls) -> tuple[str, ...]:\n \"\"\"Returns the part of the machine data, which describes measured values.\n\n Returns:\n The measured signals.\n \"\"\"\n return (\n cls.ROBOT_VOLTAGE,\n cls.ROBOT_CURRENT,\n cls.IO_CURRENT,\n cls.SYSTEM_CURRENT,\n cls.MOTOR_POSITION_1,\n cls.MOTOR_VELOCITY_1,\n cls.JOINT_POSITION_1,\n cls.JOINT_VELOCITY_1,\n cls.TORQUE_SENSOR_A_1,\n cls.TORQUE_SENSOR_B_1,\n cls.MOTOR_VOLTAGE_1,\n cls.SUPPLY_VOLTAGE_1,\n cls.BRAKE_VOLTAGE_1,\n cls.MOTOR_POSITION_2,\n cls.MOTOR_VELOCITY_2,\n cls.JOINT_POSITION_2,\n cls.JOINT_VELOCITY_2,\n cls.TORQUE_SENSOR_A_2,\n cls.TORQUE_SENSOR_B_2,\n cls.MOTOR_VOLTAGE_2,\n cls.SUPPLY_VOLTAGE_2,\n cls.BRAKE_VOLTAGE_2,\n cls.MOTOR_POSITION_3,\n cls.MOTOR_VELOCITY_3,\n cls.JOINT_POSITION_3,\n cls.JOINT_VELOCITY_3,\n cls.TORQUE_SENSOR_A_3,\n cls.TORQUE_SENSOR_B_3,\n cls.MOTOR_VOLTAGE_3,\n cls.SUPPLY_VOLTAGE_3,\n cls.BRAKE_VOLTAGE_3,\n cls.MOTOR_POSITION_4,\n cls.MOTOR_VELOCITY_4,\n cls.JOINT_POSITION_4,\n cls.JOINT_VELOCITY_4,\n cls.TORQUE_SENSOR_A_4,\n cls.TORQUE_SENSOR_B_4,\n cls.MOTOR_VOLTAGE_4,\n cls.SUPPLY_VOLTAGE_4,\n cls.BRAKE_VOLTAGE_4,\n cls.MOTOR_POSITION_5,\n cls.MOTOR_VELOCITY_5,\n cls.JOINT_POSITION_5,\n cls.JOINT_VELOCITY_5,\n cls.TORQUE_SENSOR_A_5,\n cls.TORQUE_SENSOR_B_5,\n cls.MOTOR_VOLTAGE_5,\n cls.SUPPLY_VOLTAGE_5,\n cls.BRAKE_VOLTAGE_5,\n cls.MOTOR_POSITION_6,\n cls.MOTOR_VELOCITY_6,\n cls.JOINT_POSITION_6,\n cls.JOINT_VELOCITY_6,\n cls.TORQUE_SENSOR_A_6,\n cls.TORQUE_SENSOR_B_6,\n cls.MOTOR_VOLTAGE_6,\n cls.SUPPLY_VOLTAGE_6,\n cls.BRAKE_VOLTAGE_6,\n )\n\n @classmethod\n def robot(cls) -> tuple[str, ...]:\n \"\"\"Returns all columns, which are not related to the robot axes, but to the robot itself.\n\n Returns:\n The robot system signals.\n \"\"\"\n return (\n cls.ROBOT_VOLTAGE,\n cls.ROBOT_CURRENT,\n cls.IO_CURRENT,\n cls.SYSTEM_CURRENT,\n )\n\n @classmethod\n def machine(cls) -> tuple[str, ...]:\n \"\"\"Returns all columns, which are machine data.\n\n This excludes the meta columns of the dataset.\n The machine data should be used for training, it contains all available measurements and target values.\n\n Returns:\n The machine data signals.\n \"\"\"\n return tuple(s for s in cls.all() if s not in cls.meta())\n\n @classmethod\n def mechanical(cls) -> tuple[str, ...]:\n \"\"\"Returns the columns, which describe mechanical values.\n\n Returns:\n The machanical signals.\n \"\"\"\n return tuple(s for s in cls.machine() if s not in cls.electrical())\n\n @classmethod\n def computed(cls) -> tuple[str, ...]:\n \"\"\"Returns the columns, which describe computed values like targets.\n\n Returns:\n The computed signals.\n \"\"\"\n return tuple(s for s in cls.machine() if s not in cls.measured())\n\n @classmethod\n def axis(cls) -> tuple[str, ...]:\n \"\"\"Returns the columns, which describe robot axis specific values.\n\n Returns:\n The robot axis specific signals.\n \"\"\"\n signals_axis = tuple(s for s in cls.machine() if s not in cls.robot())\n number_of_axis = 6\n assert len(signals_axis) % number_of_axis == 0\n signals_per_axis = round(len(signals_axis) / number_of_axis)\n print(signals_per_axis)\n return signals_axis\n\n @classmethod\n def groups(cls) -> dict[str, tuple[str, ...]]:\n \"\"\"Access the signal groups by name.\n\n Returns:\n The signal group dictionary.\n \"\"\"\n return {\n \"mechanical\": cls.mechanical(),\n \"electrical\": cls.electrical(),\n \"computed\": cls.computed(),\n \"measured\": cls.measured(),\n \"machine\": cls.machine(), # all machine data\n }"
},
{
"identifier": "load_torch_dataloaders",
"path": "voraus_ad.py",
"snippet": "def load_torch_dataloaders( # pylint: disable=too-many-locals\n dataset: Union[Path, str],\n batch_size: int,\n seed: int,\n columns: Union[List[str], Tuple],\n normalize: bool,\n frequency_divider: int,\n train_gain: float,\n pad: bool = True,\n) -> tuple[VorausADDataset, VorausADDataset, DataLoader, DataLoader]:\n \"\"\"Loads the voraus-AD dataset (train and test) as torch data loaders and datasets.\n\n Args:\n dataset: The path to the dataset.\n batch_size: The batch size to use.\n seed: The seed o use for the dataloader random generator.\n columns: The colums to load.\n normalize: Whether to normalize the data with standard scaler or not.\n frequency_divider: Scale the dataset down by dropping every nth sample.\n train_gain: The factor of train samples to use.\n pad: Whether to use zero padding or not.\n\n Returns:\n The data loaders and datasets.\n \"\"\"\n x_train, y_train, x_test, y_test = load_torch_tensors(\n path=dataset,\n columns=columns,\n normalize=normalize,\n frequency_divider=frequency_divider,\n train_gain=train_gain,\n pad=pad,\n )\n\n train_dataset = VorausADDataset(x_train, y_train, list(columns))\n test_dataset = VorausADDataset(x_test, y_test, list(columns))\n\n generator = torch.Generator()\n generator.manual_seed(seed)\n\n train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, generator=generator)\n test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n\n return train_dataset, test_dataset, train_dataloader, test_dataloader"
}
] | import random
import numpy
import pandas
import torch
import torch.backends.cudnn
from pathlib import Path
from typing import Dict, List, Optional
from sklearn import metrics
from torch import optim
from configuration import Configuration
from normalizing_flow import NormalizingFlow, get_loss, get_loss_per_sample
from voraus_ad import ANOMALY_CATEGORIES, Signals, load_torch_dataloaders | 7,694 | """Contains the training of the normalizing flow model."""
# If deterministic CUDA is activated, some calculations cannot be calculated in parallel on the GPU.
# The training will take much longer but is reproducible.
DETERMINISTIC_CUDA = False
DATASET_PATH = Path.home() / "Downloads" / "voraus-ad-dataset-100hz.parquet"
MODEL_PATH: Optional[Path] = Path.cwd() / "model.pth"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Define the training configuration and hyperparameters of the model.
configuration = Configuration(
columns="machine",
epochs=70,
frequencyDivider=1,
trainGain=1.0,
seed=177,
batchsize=32,
nCouplingBlocks=4,
clamp=1.2,
learningRate=8e-4,
normalize=True,
pad=True,
nHiddenLayers=0,
scale=2,
kernelSize1=13,
dilation1=2,
kernelSize2=1,
dilation2=1,
kernelSize3=1,
dilation3=1,
milestones=[11, 61],
gamma=0.1,
)
# Make the training reproducible.
torch.manual_seed(configuration.seed)
torch.cuda.manual_seed_all(configuration.seed)
numpy.random.seed(configuration.seed)
random.seed(configuration.seed)
if DETERMINISTIC_CUDA:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Disable pylint too-many-variables here for readability.
# The whole training should run in a single function call.
def train() -> List[Dict]: # pylint: disable=too-many-locals
"""Trains the model with the paper-given parameters.
Returns:
The auroc (mean over categories) and loss per epoch.
"""
# Load the dataset as torch data loaders.
train_dataset, _, train_dl, test_dl = load_torch_dataloaders(
dataset=DATASET_PATH,
batch_size=configuration.batchsize,
columns=Signals.groups()[configuration.columns],
seed=configuration.seed,
frequency_divider=configuration.frequency_divider,
train_gain=configuration.train_gain,
normalize=configuration.normalize,
pad=configuration.pad,
)
# Retrieve the shape of the data for the model initialization.
n_signals = train_dataset.tensors[0].shape[1]
n_times = train_dataset.tensors[0].shape[0]
# Initialize the model, optimizer and scheduler.
model = NormalizingFlow((n_signals, n_times), configuration).float().to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=configuration.learning_rate)
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=configuration.milestones, gamma=configuration.gamma
)
training_results: List[Dict] = []
# Iterate over all epochs.
for epoch in range(configuration.epochs):
# TRAIN THE MODEL.
model.train()
loss: float = 0
for tensors, _ in train_dl:
tensors = tensors.float().to(DEVICE)
# Execute the forward and jacobian calculation.
optimizer.zero_grad()
latent_z, jacobian = model.forward(tensors.transpose(2, 1))
jacobian = torch.sum(jacobian, dim=tuple(range(1, jacobian.dim())))
# Back propagation and loss calculation.
| """Contains the training of the normalizing flow model."""
# If deterministic CUDA is activated, some calculations cannot be calculated in parallel on the GPU.
# The training will take much longer but is reproducible.
DETERMINISTIC_CUDA = False
DATASET_PATH = Path.home() / "Downloads" / "voraus-ad-dataset-100hz.parquet"
MODEL_PATH: Optional[Path] = Path.cwd() / "model.pth"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Define the training configuration and hyperparameters of the model.
configuration = Configuration(
columns="machine",
epochs=70,
frequencyDivider=1,
trainGain=1.0,
seed=177,
batchsize=32,
nCouplingBlocks=4,
clamp=1.2,
learningRate=8e-4,
normalize=True,
pad=True,
nHiddenLayers=0,
scale=2,
kernelSize1=13,
dilation1=2,
kernelSize2=1,
dilation2=1,
kernelSize3=1,
dilation3=1,
milestones=[11, 61],
gamma=0.1,
)
# Make the training reproducible.
torch.manual_seed(configuration.seed)
torch.cuda.manual_seed_all(configuration.seed)
numpy.random.seed(configuration.seed)
random.seed(configuration.seed)
if DETERMINISTIC_CUDA:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Disable pylint too-many-variables here for readability.
# The whole training should run in a single function call.
def train() -> List[Dict]: # pylint: disable=too-many-locals
"""Trains the model with the paper-given parameters.
Returns:
The auroc (mean over categories) and loss per epoch.
"""
# Load the dataset as torch data loaders.
train_dataset, _, train_dl, test_dl = load_torch_dataloaders(
dataset=DATASET_PATH,
batch_size=configuration.batchsize,
columns=Signals.groups()[configuration.columns],
seed=configuration.seed,
frequency_divider=configuration.frequency_divider,
train_gain=configuration.train_gain,
normalize=configuration.normalize,
pad=configuration.pad,
)
# Retrieve the shape of the data for the model initialization.
n_signals = train_dataset.tensors[0].shape[1]
n_times = train_dataset.tensors[0].shape[0]
# Initialize the model, optimizer and scheduler.
model = NormalizingFlow((n_signals, n_times), configuration).float().to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=configuration.learning_rate)
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=configuration.milestones, gamma=configuration.gamma
)
training_results: List[Dict] = []
# Iterate over all epochs.
for epoch in range(configuration.epochs):
# TRAIN THE MODEL.
model.train()
loss: float = 0
for tensors, _ in train_dl:
tensors = tensors.float().to(DEVICE)
# Execute the forward and jacobian calculation.
optimizer.zero_grad()
latent_z, jacobian = model.forward(tensors.transpose(2, 1))
jacobian = torch.sum(jacobian, dim=tuple(range(1, jacobian.dim())))
# Back propagation and loss calculation. | batch_loss = get_loss(latent_z, jacobian) | 2 | 2023-10-18 15:09:24+00:00 | 12k |
invictus717/UniDG | domainbed/scripts/visualize_adaption.py | [
{
"identifier": "datasets",
"path": "domainbed/datasets.py",
"snippet": "DATASETS = [\n # Debug\n \"Debug28\",\n \"Debug224\",\n # Small images\n \"ColoredMNIST\",\n \"RotatedMNIST\",\n # Big images\n \"VLCS\",\n \"PACS\",\n \"OfficeHome\",\n \"TerraIncognita\",\n \"DomainNet\",\n \"SVIRO\",\n # WILDS datasets\n \"WILDSCamelyon\",\n \"WILDSFMoW\"\n]\n N_STEPS = 5001 # Default, subclasses may override\n CHECKPOINT_FREQ = 100 # Default, subclasses may override\n N_WORKERS = 4 # Default, subclasses may override\n ENVIRONMENTS = None # Subclasses should override\n INPUT_SHAPE = None # Subclasses should override\n INPUT_SHAPE = (3, 28, 28)\n ENVIRONMENTS = ['0', '1', '2']\n INPUT_SHAPE = (3, 224, 224)\n ENVIRONMENTS = ['0', '1', '2']\n ENVIRONMENTS = ['+90%', '+80%', '-90%']\n ENVIRONMENTS = ['0', '15', '30', '45', '60', '75']\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"C\", \"L\", \"S\", \"V\"]\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"A\", \"C\", \"P\", \"S\"]\n CHECKPOINT_FREQ = 1000\n ENVIRONMENTS = [\"clip\", \"info\", \"paint\", \"quick\", \"real\", \"sketch\"]\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"A\", \"C\", \"P\", \"R\"]\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"L100\", \"L38\", \"L43\", \"L46\"]\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"aclass\", \"escape\", \"hilux\", \"i3\", \"lexus\", \"tesla\", \"tiguan\", \"tucson\", \"x5\", \"zoe\"]\n INPUT_SHAPE = (3, 224, 224)\n ENVIRONMENTS = [ \"hospital_0\", \"hospital_1\", \"hospital_2\", \"hospital_3\",\n \"hospital_4\"]\n ENVIRONMENTS = [ \"region_0\", \"region_1\", \"region_2\", \"region_3\",\n \"region_4\", \"region_5\"]\nclass MyDataParallel(torch.nn.DataParallel):\nclass MultipleDomainDataset:\nclass Debug(MultipleDomainDataset):\nclass Debug28(Debug):\nclass Debug224(Debug):\nclass MultipleEnvironmentMNIST(MultipleDomainDataset):\nclass ColoredMNIST(MultipleEnvironmentMNIST):\nclass RotatedMNIST(MultipleEnvironmentMNIST):\nclass MultipleEnvironmentImageFolder(MultipleDomainDataset):\nclass VLCS(MultipleEnvironmentImageFolder):\nclass PACS(MultipleEnvironmentImageFolder):\nclass DomainNet(MultipleEnvironmentImageFolder):\nclass OfficeHome(MultipleEnvironmentImageFolder):\nclass TerraIncognita(MultipleEnvironmentImageFolder):\nclass SVIRO(MultipleEnvironmentImageFolder):\nclass WILDSEnvironment:\nclass WILDSDataset(MultipleDomainDataset):\nclass WILDSCamelyon(WILDSDataset):\nclass WILDSFMoW(WILDSDataset):\n def __getattr__(self, name):\ndef get_dataset_class(dataset_name):\ndef num_environments(dataset_name):\n def __getitem__(self, index):\n def __len__(self):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, environments, dataset_transform, input_shape,\n num_classes):\n def __init__(self, root, test_envs, hparams):\n def color_dataset(self, images, labels, environment):\n def torch_bernoulli_(self, p, size):\n def torch_xor_(self, a, b):\n def __init__(self, root, test_envs, hparams):\n def rotate_dataset(self, images, labels, angle):\n def __init__(self, root, test_envs, augment, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(\n self,\n wilds_dataset,\n metadata_name,\n metadata_value,\n transform=None):\n def __getitem__(self, i):\n def __len__(self):\n def __init__(self, dataset, metadata_name, test_envs, augment, hparams):\n def metadata_values(self, wilds_dataset, metadata_name):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):"
},
{
"identifier": "hparams_registry",
"path": "domainbed/hparams_registry.py",
"snippet": "def _define_hparam(hparams, hparam_name, default_val, random_val_fn):\ndef _hparams(algorithm, dataset, random_seed):\n def _hparam(name, default_val, random_val_fn):\ndef default_hparams(algorithm, dataset):\ndef random_hparams(algorithm, dataset, seed):\n SMALL_IMAGES = ['Debug28', 'RotatedMNIST', 'ColoredMNIST']"
},
{
"identifier": "algorithms",
"path": "domainbed/algorithms.py",
"snippet": "ALGORITHMS = [\n 'ERM',\n 'IRM',\n 'GroupDRO',\n 'Mixup',\n 'MLDG',\n 'CORAL',\n 'MMD',\n 'DANN',\n 'CDANN',\n 'MTL',\n 'SagNet',\n 'ARM',\n 'VREx',\n 'RSC',\n 'SD',\n 'MIRO'\n]\n D = self.my_cdist(x, y)\n K = torch.zeros_like(D)\ndef get_algorithm_class(algorithm_name):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def predict(self, x):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def predict(self, x):\n def forward(self, x):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def predict(self, x):\n def __init__(self, input_shape, num_classes, num_domains,\n hparams, conditional, class_balance):\n def update(self, minibatches, unlabeled=None):\n def predict(self, x):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def _irm_penalty(logits, y):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams, gaussian):\n def my_cdist(self, x1, x2):\n def gaussian_kernel(self, x, y, gamma=[0.001, 0.01, 0.1, 1, 10, 100,\n 1000]):\n def mmd(self, x, y):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def update_embeddings_(self, features, env=None):\n def predict(self, x, env=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def opt(p):\n def forward_c(self, x):\n def forward_s(self, x):\n def randomize(self, x, what=\"style\", eps=1e-5):\n def update(self, minibatches, unlabeled=None):\n def predict(self, x):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, network):\n def forward(self, x):\n def predict(self, x):\n def __init__(self, shape):\n def forward(self, x):\n def __init__(self, shape, init=0.1, channelwise=True, eps=1e-5):\n def forward(self, x):\ndef get_shapes(model, input_shape):\n def __init__(self, input_shape, num_classes, num_domains, hparams, **kwargs):\n def update(self, x, y, **kwargs):\n def predict(self, x):\n def get_forward_model(self):\nclass Algorithm(torch.nn.Module):\nclass ERM(Algorithm):\nclass ARM(ERM):\nclass AbstractDANN(Algorithm):\nclass DANN(AbstractDANN):\nclass CDANN(AbstractDANN):\nclass IRM(ERM):\nclass VREx(ERM):\nclass Mixup(ERM):\nclass GroupDRO(ERM):\nclass MLDG(ERM):\nclass AbstractMMD(ERM):\nclass MMD(AbstractMMD):\nclass CORAL(AbstractMMD):\nclass MTL(Algorithm):\nclass SagNet(Algorithm):\nclass RSC(ERM):\nclass SD(ERM):\nclass ForwardModel(nn.Module):\nclass MeanEncoder(nn.Module):\nclass VarianceEncoder(nn.Module):\nclass MIRO(Algorithm):"
},
{
"identifier": "misc",
"path": "domainbed/lib/misc.py",
"snippet": "def make_weights_for_balanced_classes(dataset):\ndef pdb():\ndef seed_hash(*args):\ndef print_separator():\ndef print_row(row, colwidth=10, latex=False):\n def format_val(x):\n def __init__(self, underlying_dataset, keys):\n def __getitem__(self, key):\n def __len__(self):\ndef split_dataset(dataset, n, seed=0):\ndef random_pairs_of_minibatches(minibatches):\ndef accuracy(network, loader, weights, device):\ndef softmax_entropy(x: torch.Tensor) -> torch.Tensor:\ndef accuracy_ent(network, loader, weights, device, adapt=False):\n def __init__(self, fname, mode=\"a\"):\n def write(self, message):\n def flush(self):\nclass _SplitDataset(torch.utils.data.Dataset):\nclass Tee:"
},
{
"identifier": "accuracy_ent",
"path": "domainbed/lib/misc.py",
"snippet": "def accuracy_ent(network, loader, weights, device, adapt=False):\n correct = 0\n total = 0\n weights_offset = 0\n ent = 0\n \n if adapt == False:\n network.eval()\n #with torch.no_grad():\n for x, y in loader:\n x = x.to(device)\n y = y.to(device)\n if adapt is None:\n p = network(x)\n else:\n p = network(x, adapt)\n if weights is None:\n batch_weights = torch.ones(len(p)) # x\n else:\n batch_weights = weights[weights_offset: weights_offset + len(x)]\n weights_offset += len(x)\n batch_weights = batch_weights.to(device)\n if len(p) != len(x):\n y = torch.cat((y,y))\n if p.size(1) == 1:\n correct += (p.gt(0).eq(y).float() * batch_weights.view(-1, 1)).sum().item()\n else:\n correct += (p.argmax(1).eq(y).float() * batch_weights).sum().item()\n total += batch_weights.sum().item()\n ent += softmax_entropy(p).sum().item()\n if adapt == False:\n network.train()\n\n return correct / total, ent / total"
},
{
"identifier": "InfiniteDataLoader",
"path": "domainbed/lib/fast_data_loader.py",
"snippet": "class InfiniteDataLoader:\n def __init__(self, dataset, weights, batch_size, num_workers):\n super().__init__()\n\n if weights is None:\n sampler = torch.utils.data.RandomSampler(dataset,\n replacement=True)\n else:\n sampler = torch.utils.data.WeightedRandomSampler(weights,\n replacement=True,\n num_samples=batch_size)\n\n # if weights is None:\n # weights = torch.ones(len(dataset))\n\n batch_sampler = torch.utils.data.BatchSampler(\n sampler,\n batch_size=batch_size,\n drop_last=True)\n\n self._infinite_iterator = iter(torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n batch_sampler=_InfiniteSampler(batch_sampler)\n ))\n\n def __iter__(self):\n while True:\n yield next(self._infinite_iterator)\n\n def __len__(self):\n raise ValueError"
},
{
"identifier": "FastDataLoader",
"path": "domainbed/lib/fast_data_loader.py",
"snippet": "class FastDataLoader:\n \"\"\"DataLoader wrapper with slightly improved speed by not respawning worker\n processes at every epoch.\"\"\"\n def __init__(self, dataset, batch_size, num_workers):\n super().__init__()\n\n batch_sampler = torch.utils.data.BatchSampler(\n torch.utils.data.RandomSampler(dataset, replacement=False),\n batch_size=batch_size,\n drop_last=False\n )\n\n self._infinite_iterator = iter(torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n batch_sampler=_InfiniteSampler(batch_sampler)\n ))\n\n self._length = len(batch_sampler)\n\n def __iter__(self):\n for _ in range(len(self)):\n yield next(self._infinite_iterator)\n\n def __len__(self):\n return self._length"
},
{
"identifier": "DataParallelPassthrough",
"path": "domainbed/lib/fast_data_loader.py",
"snippet": "class DataParallelPassthrough(torch.nn.DataParallel):\n def __getattr__(self, name):\n try:\n return super().__getattr__(name)\n except AttributeError:\n return getattr(self.module, name)"
},
{
"identifier": "model_selection",
"path": "domainbed/model_selection.py",
"snippet": "def get_test_records(records):\n def __init__(self):\n def run_acc(self, run_records):\n def hparams_accs(self, records):\n def sweep_acc(self, records):\n def run_acc(self, run_records):\n def _step_acc(self, record):\n def run_acc(self, run_records):\n def _step_acc(self, record):\n def run_acc(self, run_records):\n def _step_acc(self, records):\n def run_acc(self, records):\nclass SelectionMethod:\nclass OracleSelectionMethod(SelectionMethod):\nclass IIDAccuracySelectionMethod(SelectionMethod):\nclass IIDAccuracySelectionMethod_Adaption(SelectionMethod):\nclass LeaveOneOutSelectionMethod(SelectionMethod):"
},
{
"identifier": "Q",
"path": "domainbed/lib/query.py",
"snippet": "class Q(object):\n def __init__(self, list_):\n super(Q, self).__init__()\n self._list = list_\n\n def __len__(self):\n return len(self._list)\n\n def __getitem__(self, key):\n return self._list[key]\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self._list == other._list\n else:\n return self._list == other\n\n def __str__(self):\n return str(self._list)\n\n def __repr__(self):\n return repr(self._list)\n\n def _append(self, item):\n \"\"\"Unsafe, be careful you know what you're doing.\"\"\"\n self._list.append(item)\n\n def group(self, selector):\n \"\"\"\n Group elements by selector and return a list of (group, group_records)\n tuples.\n \"\"\"\n selector = make_selector_fn(selector)\n groups = {}\n for x in self._list:\n group = selector(x)\n group_key = hashable(group)\n if group_key not in groups:\n groups[group_key] = (group, Q([]))\n groups[group_key][1]._append(x)\n results = [groups[key] for key in sorted(groups.keys())]\n return Q(results)\n\n def group_map(self, selector, fn):\n \"\"\"\n Group elements by selector, apply fn to each group, and return a list\n of the results.\n \"\"\"\n return self.group(selector).map(fn)\n\n def map(self, fn):\n \"\"\"\n map self onto fn. If fn takes multiple args, tuple-unpacking\n is applied.\n \"\"\"\n if len(inspect.signature(fn).parameters) > 1:\n return Q([fn(*x) for x in self._list])\n else:\n return Q([fn(x) for x in self._list])\n\n def select(self, selector):\n selector = make_selector_fn(selector)\n return Q([selector(x) for x in self._list])\n\n def min(self):\n return min(self._list)\n\n def max(self):\n return max(self._list)\n\n def sum(self):\n return sum(self._list)\n\n def len(self):\n return len(self._list)\n\n def mean(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return float(np.mean(self._list))\n\n def std(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return float(np.std(self._list))\n\n def mean_std(self):\n return (self.mean(), self.std())\n\n def argmax(self, selector):\n selector = make_selector_fn(selector)\n return max(self._list, key=selector)\n\n def filter(self, fn):\n return Q([x for x in self._list if fn(x)])\n\n def filter_equals(self, selector, value):\n \"\"\"like [x for x in y if x.selector == value]\"\"\"\n selector = make_selector_fn(selector)\n return self.filter(lambda r: selector(r) == value)\n\n def filter_in(self, selector, values):\n assert isinstance(values, list)\n selector = make_selector_fn(selector)\n return self.filter(lambda r: selector(r) in values)\n\n def filter_not_none(self):\n return self.filter(lambda r: r is not None)\n\n def filter_not_nan(self):\n return self.filter(lambda r: not np.isnan(r))\n\n def flatten(self):\n return Q([y for x in self._list for y in x])\n\n def unique(self):\n result = []\n result_set = set()\n for x in self._list:\n hashable_x = hashable(x)\n if hashable_x not in result_set:\n result_set.add(hashable_x)\n result.append(x)\n return Q(result)\n\n def sorted(self, key=None):\n if key is None:\n key = lambda x: x\n def key2(x):\n x = key(x)\n if isinstance(x, (np.floating, float)) and np.isnan(x):\n return float('-inf')\n else:\n return x\n return Q(sorted(self._list, key=key2))"
},
{
"identifier": "adapt_algorithms",
"path": "domainbed/adapt_algorithms.py",
"snippet": "ALGORITHMS = [\n 'T3A', \n 'TentFull', \n 'TentNorm', \n 'TentPreBN', # Tent-BN in the paper\n 'TentClf', # Tent-C in the paper\n 'PseudoLabel', \n 'PLClf', \n 'SHOT', \n 'SHOTIM',\n 'T3A_Aug',\n 'UniDG',\n]\ndef get_tta_transforms(gaussian_std: float=0.005, soft=False, clip_inputs=False):\ndef get_algorithm_class(algorithm_name):\n def __init__(self, input_shape, num_classes, num_domains, hparams, algorithm):\n def forward(self, x, adapt=False):\n def select_supports(self):\n def predict(self, x, adapt=False):\n def reset(self):\n def __init__(self, input_shape, num_classes, num_domains, hparams, algorithm):\n def forward(self, x, adapt=False):\n def select_supports(self):\n def predict(self, x, adapt=False):\n def reset(self):\n def __init__(self, input_shape, num_classes, num_domains, hparams, algorithm):\n def forward(self, x, adapt=False):\n def forward_and_adapt(self, x, model, optimizer):\n def configure_model_optimizer(self, algorithm, alpha):\n def reset(self):\n def forward(self, x, adapt=False):\n def configure_model_optimizer(self, algorithm, alpha):\n def configure_model_optimizer(self, algorithm, alpha):\ndef configure_model(model):\ndef copy_model_and_optimizer(model, optimizer):\ndef load_model_and_optimizer(model, optimizer, model_state, optimizer_state):\ndef softmax_entropy(x: torch.Tensor) -> torch.Tensor:\n def __init__(self, m, num_features, **kwargs):\n def forward(self, x):\n def predict(self, x):\ndef collect_params(model):\n def __init__(self, input_shape, num_classes, num_domains, hparams, algorithm):\n def forward(self, x, adapt=False):\n def forward_and_adapt(self, x, model, optimizer):\n def configure_model_optimizer(self, algorithm, alpha):\n def predict(self, x, adapt=False):\n def reset(self):\n def configure_model_optimizer(self, algorithm, alpha):\n def predict(self, x, adapt=False):\n def reset(self):\n def __init__(self, input_shape, num_classes, num_domains, hparams, algorithm):\n def forward(self, x, adapt=False):\n def forward_and_adapt(self, x, model, optimizer):\n def loss(self, outputs):\n def configure_model_optimizer(self, algorithm, alpha):\n def reset(self):\n def loss(self, outputs):\n def __init__(self, input_shape, num_classes, num_domains, hparams, algorithm):\n def forward(self, x, adapt=False):\n def select_supports(self):\n def predict(self, x, adapt=False):\n def reset(self):\nclass T3A_Aug(Algorithm):\nclass T3A(Algorithm):\nclass TentFull(Algorithm):\nclass TentNorm(TentFull):\nclass TentPreBN(TentFull):\nclass TentClf(TentFull):\nclass PreBN(torch.nn.Module):\nclass PseudoLabel(Algorithm):\nclass PLClf(PseudoLabel):\nclass SHOT(Algorithm):\nclass SHOTIM(SHOT): \nclass UniDG(Algorithm):"
}
] | import argparse
import collections
import json
import os
import random
import sys
import time
import uuid
import itertools
import copy
import numpy as np
import PIL
import torch
import torchvision
import torch.utils.data
import itertools
import matplotlib.pyplot as plt
import numpy as np
from argparse import Namespace
from itertools import chain
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed.lib import misc
from domainbed.lib.misc import accuracy_ent
from domainbed.lib.fast_data_loader import InfiniteDataLoader, FastDataLoader, DataParallelPassthrough
from domainbed import model_selection
from domainbed.lib.query import Q
from domainbed import adapt_algorithms
from MulticoreTSNE import MulticoreTSNE as TSNE | 7,900 | print("\tPython: {}".format(sys.version.split(" ")[0]))
print("\tPyTorch: {}".format(torch.__version__))
print("\tTorchvision: {}".format(torchvision.__version__))
print("\tCUDA: {}".format(torch.version.cuda))
print("\tCUDNN: {}".format(torch.backends.cudnn.version()))
print("\tNumPy: {}".format(np.__version__))
print("\tPIL: {}".format(PIL.__version__))
print('Args:')
for k, v in sorted(vars(args).items()):
print('\t{}: {}'.format(k, v))
if args.hparams_seed == 0:
hparams = hparams_registry.default_hparams(args.algorithm, args.dataset)
else:
hparams = hparams_registry.random_hparams(args.algorithm, args.dataset,
misc.seed_hash(args.hparams_seed, args.trial_seed))
if args.hparams:
hparams.update(json.loads(args.hparams))
print('HParams:')
for k, v in sorted(hparams.items()):
print('\t{}: {}'.format(k, v))
assert os.path.exists(os.path.join(args.output_dir, 'done'))
assert os.path.exists(os.path.join(args.output_dir, 'IID_best.pkl')) # IID_best is produced by train.py
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
if args.dataset in vars(datasets):
dataset = vars(datasets)[args.dataset](args.data_dir,
args.test_envs, hparams)
else:
raise NotImplementedError
# Split each env into an 'in-split' and an 'out-split'. We'll train on
# each in-split except the test envs, and evaluate on all splits.
# To allow unsupervised domain adaptation experiments, we split each test
# env into 'in-split', 'uda-split' and 'out-split'. The 'in-split' is used
# by collect_results.py to compute classification accuracies. The
# 'out-split' is used by the Oracle model selectino method. The unlabeled
# samples in 'uda-split' are passed to the algorithm at training time if
# args.task == "domain_adaptation". If we are interested in comparing
# domain generalization and domain adaptation results, then domain
# generalization algorithms should create the same 'uda-splits', which will
# be discared at training.
in_splits = []
out_splits = []
uda_splits = []
for env_i, env in enumerate(dataset):
uda = []
out, in_ = misc.split_dataset(env,
int(len(env)*args.holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if env_i in args.test_envs:
uda, in_ = misc.split_dataset(in_,
int(len(in_)*args.uda_holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if hparams['class_balanced']:
in_weights = misc.make_weights_for_balanced_classes(in_)
out_weights = misc.make_weights_for_balanced_classes(out)
if uda is not None:
uda_weights = misc.make_weights_for_balanced_classes(uda)
else:
in_weights, out_weights, uda_weights = None, None, None
in_splits.append((in_, in_weights))
out_splits.append((out, out_weights))
if len(uda):
uda_splits.append((uda, uda_weights))
# Use out splits as training data (to fair comparison with train.py)
train_loaders = [FastDataLoader(
dataset=env,
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(out_splits)
if i in args.test_envs]
uda_loaders = [InfiniteDataLoader(
dataset=env,
weights=env_weights,
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(uda_splits)
if i in args.test_envs]
eval_loaders = [FastDataLoader(
dataset=env,
batch_size=args.test_batch_size,
num_workers=dataset.N_WORKERS)
for env, _ in (in_splits + out_splits + uda_splits)]
eval_weights = [None for _, weights in (in_splits + out_splits + uda_splits)]
eval_loader_names = ['env{}_in'.format(i)
for i in range(len(in_splits))]
eval_loader_names += ['env{}_out'.format(i)
for i in range(len(out_splits))]
eval_loader_names += ['env{}_uda'.format(i)
for i in range(len(uda_splits))]
algorithm_class = algorithms.get_algorithm_class(args.algorithm)
algorithm = algorithm_class(dataset.input_shape, dataset.num_classes,
len(dataset) - len(args.test_envs), hparams)
if algorithm_dict is not None:
algorithm.load_state_dict(algorithm_dict)
algorithm.to(device)
if hasattr(algorithm, 'network'):
| # The code is modified from domainbed.scripts.train
def softmax_entropy(x: torch.Tensor) -> torch.Tensor:
"""Entropy of softmax distribution from logits."""
return -(x.softmax(1) * x.log_softmax(1)).sum(1)
class Dataset:
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def generate_featurelized_loader(loader, network, classifier, batch_size=128):
"""
The classifier adaptation does not need to repeat the heavy forward path,
We speeded up the experiments by converting the observations into representations.
"""
z_list = []
y_list = []
p_list = []
network.eval()
classifier.eval()
for x, y in loader:
x = x.to(device)
z = network(x)
p = classifier(z)
z_list.append(z.detach().cpu())
y_list.append(y.detach().cpu())
p_list.append(p.detach().cpu())
# p_list.append(p.argmax(1).float().cpu().detach())
network.train()
classifier.train()
z = torch.cat(z_list)
y = torch.cat(y_list)
p = torch.cat(p_list)
ent = softmax_entropy(p)
py = p.argmax(1).float().cpu().detach()
dataset1, dataset2 = Dataset(z, y), Dataset(z, py)
loader1 = torch.utils.data.DataLoader(dataset1, batch_size=batch_size, shuffle=False, drop_last=True)
loader2 = torch.utils.data.DataLoader(dataset2, batch_size=batch_size, shuffle=False, drop_last=True)
return loader1, loader2, ent
def visualize_tsne(network, loader, weights, device, adapt,env, name):
print("Start visualizing {}...".format(name))
if adapt:
flag = 'Adapted'
else:
flag = 'Base'
network.eval()
for x, y in loader:
x = x.to(device)
y = y.to(device)
if adapt is False:
p = network(x)
else:
p = network(x, adapt)
x = p.detach().cpu().numpy()
tsne = TSNE(n_components=2).fit_transform(x)
label = np.squeeze(y.cpu().numpy())
plt.figure(figsize=(6, 6))
size=100
line=0.7
t=.8
# plt.scatter(tsne[:, 0], tsne[:, 1], c=label,cmap=plt.get_cmap('hsv'),marker = 'o',linewidths=line,alpha=t,edgecolors='black')
plt.scatter(tsne[:, 0], tsne[:, 1], c=label,cmap=plt.get_cmap('terrain'),marker = 'o',linewidths=line,alpha=t,edgecolors='black')
plt.axis('off')
plt.colorbar()
plt.savefig('./visualization/vis_test_{}_{}_{}.jpg'.format(env,flag,name))
print("Visualization Results Saved...")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Domain generalization')
parser.add_argument('--input_dir', type=str)
parser.add_argument('--adapt_algorithm', type=str, default="UniDG")
args_in = parser.parse_args()
epochs_path = os.path.join(args_in.input_dir, 'results.jsonl')
records = []
with open(epochs_path, 'r') as f:
for line in f:
records.append(json.loads(line[:-1]))
records = Q(records)
r = records[0]
args = Namespace(**r['args'])
print(args)
args.input_dir = args_in.input_dir
if '-' in args_in.adapt_algorithm:
args.adapt_algorithm, test_batch_size = args_in.adapt_algorithm.split('-')
args.test_batch_size = int(test_batch_size)
else:
args.adapt_algorithm = args_in.adapt_algorithm
args.test_batch_size = 128 # default
args.test_batch_size = 128 # default
args.output_dir = args.input_dir
alg_name = args_in.adapt_algorithm
if args.adapt_algorithm in['T3A', 'TentPreBN', 'TentClf', 'PLClf']:
use_featurer_cache = True
else:
use_featurer_cache = False
if os.path.exists(os.path.join(args.output_dir, 'done_{}'.format(alg_name))):
print("{} has already excecuted".format(alg_name))
# If we ever want to implement checkpointing, just persist these values
# every once in a while, and then load them from disk here.
algorithm_dict = None
# os.makedirs(args.output_dir, exist_ok=True)
sys.stdout = misc.Tee(os.path.join(args.output_dir, 'out_{}.txt'.format(alg_name)))
sys.stderr = misc.Tee(os.path.join(args.output_dir, 'err_{}.txt'.format(alg_name)))
print("Environment:")
print("\tPython: {}".format(sys.version.split(" ")[0]))
print("\tPyTorch: {}".format(torch.__version__))
print("\tTorchvision: {}".format(torchvision.__version__))
print("\tCUDA: {}".format(torch.version.cuda))
print("\tCUDNN: {}".format(torch.backends.cudnn.version()))
print("\tNumPy: {}".format(np.__version__))
print("\tPIL: {}".format(PIL.__version__))
print('Args:')
for k, v in sorted(vars(args).items()):
print('\t{}: {}'.format(k, v))
if args.hparams_seed == 0:
hparams = hparams_registry.default_hparams(args.algorithm, args.dataset)
else:
hparams = hparams_registry.random_hparams(args.algorithm, args.dataset,
misc.seed_hash(args.hparams_seed, args.trial_seed))
if args.hparams:
hparams.update(json.loads(args.hparams))
print('HParams:')
for k, v in sorted(hparams.items()):
print('\t{}: {}'.format(k, v))
assert os.path.exists(os.path.join(args.output_dir, 'done'))
assert os.path.exists(os.path.join(args.output_dir, 'IID_best.pkl')) # IID_best is produced by train.py
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
if args.dataset in vars(datasets):
dataset = vars(datasets)[args.dataset](args.data_dir,
args.test_envs, hparams)
else:
raise NotImplementedError
# Split each env into an 'in-split' and an 'out-split'. We'll train on
# each in-split except the test envs, and evaluate on all splits.
# To allow unsupervised domain adaptation experiments, we split each test
# env into 'in-split', 'uda-split' and 'out-split'. The 'in-split' is used
# by collect_results.py to compute classification accuracies. The
# 'out-split' is used by the Oracle model selectino method. The unlabeled
# samples in 'uda-split' are passed to the algorithm at training time if
# args.task == "domain_adaptation". If we are interested in comparing
# domain generalization and domain adaptation results, then domain
# generalization algorithms should create the same 'uda-splits', which will
# be discared at training.
in_splits = []
out_splits = []
uda_splits = []
for env_i, env in enumerate(dataset):
uda = []
out, in_ = misc.split_dataset(env,
int(len(env)*args.holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if env_i in args.test_envs:
uda, in_ = misc.split_dataset(in_,
int(len(in_)*args.uda_holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if hparams['class_balanced']:
in_weights = misc.make_weights_for_balanced_classes(in_)
out_weights = misc.make_weights_for_balanced_classes(out)
if uda is not None:
uda_weights = misc.make_weights_for_balanced_classes(uda)
else:
in_weights, out_weights, uda_weights = None, None, None
in_splits.append((in_, in_weights))
out_splits.append((out, out_weights))
if len(uda):
uda_splits.append((uda, uda_weights))
# Use out splits as training data (to fair comparison with train.py)
train_loaders = [FastDataLoader(
dataset=env,
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(out_splits)
if i in args.test_envs]
uda_loaders = [InfiniteDataLoader(
dataset=env,
weights=env_weights,
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(uda_splits)
if i in args.test_envs]
eval_loaders = [FastDataLoader(
dataset=env,
batch_size=args.test_batch_size,
num_workers=dataset.N_WORKERS)
for env, _ in (in_splits + out_splits + uda_splits)]
eval_weights = [None for _, weights in (in_splits + out_splits + uda_splits)]
eval_loader_names = ['env{}_in'.format(i)
for i in range(len(in_splits))]
eval_loader_names += ['env{}_out'.format(i)
for i in range(len(out_splits))]
eval_loader_names += ['env{}_uda'.format(i)
for i in range(len(uda_splits))]
algorithm_class = algorithms.get_algorithm_class(args.algorithm)
algorithm = algorithm_class(dataset.input_shape, dataset.num_classes,
len(dataset) - len(args.test_envs), hparams)
if algorithm_dict is not None:
algorithm.load_state_dict(algorithm_dict)
algorithm.to(device)
if hasattr(algorithm, 'network'): | algorithm.network = DataParallelPassthrough(algorithm.network) | 7 | 2023-10-15 14:26:12+00:00 | 12k |
jianlanluo/SAQ | vqn/brac_main.py | [
{
"identifier": "SAC",
"path": "vqn/brac.py",
"snippet": "class SAC(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.discount = 0.99\n config.alpha_multiplier = 1.0\n config.use_automatic_entropy_tuning = False\n config.backup_entropy = False\n config.target_entropy = 0.0\n config.policy_lr = 3e-4\n config.behavior_policy_lr = 3e-4\n config.behavior_policy_weight_decay = 0.0\n config.qf_lr = 3e-4\n config.qf_weight_decay = 0.0\n config.optimizer_type = 'adam'\n config.soft_target_update_rate = 5e-3\n config.brac_policy_kl_weight = 1.0\n config.brac_q_kl_weight = 1.0\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, behavior_policy, policy, qf):\n self.config = self.get_default_config(config)\n self.policy = policy\n self.behavior_policy = behavior_policy\n self.qf = qf\n self.observation_dim = policy.observation_dim\n self.action_dim = policy.action_dim\n\n self._train_states = {}\n\n optimizer_class = {\n 'adam': optax.adam,\n 'sgd': optax.sgd,\n }[self.config.optimizer_type]\n\n policy_params = self.policy.init(\n next_rng(self.policy.rng_keys()),\n jnp.zeros((10, self.observation_dim))\n )\n self._train_states['policy'] = TrainState.create(\n params=policy_params,\n tx=optimizer_class(self.config.policy_lr),\n apply_fn=None\n )\n\n behavior_policy_params = self.behavior_policy.init(\n next_rng(self.behavior_policy.rng_keys()),\n jnp.zeros((10, self.observation_dim))\n )\n self._train_states['behavior_policy'] = TrainState.create(\n params=behavior_policy_params,\n tx=optax.adamw(\n self.config.behavior_policy_lr, \n weight_decay=self.config.behavior_policy_weight_decay),\n apply_fn=None\n )\n self._behavior_policy_total_steps = 0\n\n qf1_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((10, self.observation_dim)),\n jnp.zeros((10, self.action_dim))\n )\n self._train_states['qf1'] = TrainState.create(\n params=qf1_params,\n tx=optax.adamw(self.config.qf_lr, weight_decay=self.config.qf_weight_decay),\n apply_fn=None,\n )\n qf2_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((10, self.observation_dim)),\n jnp.zeros((10, self.action_dim))\n )\n self._train_states['qf2'] = TrainState.create(\n params=qf2_params,\n tx=optax.adamw(self.config.qf_lr, weight_decay=self.config.qf_weight_decay),\n apply_fn=None,\n )\n self._target_qf_params = deepcopy({'qf1': qf1_params, 'qf2': qf2_params})\n\n model_keys = ['policy', 'behavior_policy', 'qf1', 'qf2']\n\n if self.config.use_automatic_entropy_tuning:\n self.log_alpha = Scalar(0.0)\n self._train_states['log_alpha'] = TrainState.create(\n params=self.log_alpha.init(next_rng()),\n tx=optimizer_class(self.config.policy_lr),\n apply_fn=None\n )\n model_keys.append('log_alpha')\n\n self._model_keys = tuple(model_keys)\n self._total_steps = 0\n\n def train_behavior_policy(self, batch):\n self._train_states['behavior_policy'], metrics = self._behavior_policy_train_step(\n next_rng(), self._train_states['behavior_policy'], batch\n )\n self._behavior_policy_total_steps += 1\n return metrics\n\n def copy_behavior_policy_to_policy(self):\n self._train_states['policy'] = self._train_states['policy'].replace(\n params=self._train_states['behavior_policy'].params\n )\n\n @partial(jax.jit, static_argnames=('self', ))\n def _behavior_policy_train_step(self, rng, train_state, batch):\n observations = batch['observations']\n actions = batch['actions']\n rng_generator = JaxRNG(rng)\n\n @partial(jax.grad, has_aux=True)\n def grad_fn(train_param, rng):\n observations = batch['observations']\n actions = batch['actions']\n\n @wrap_function_with_rng(rng_generator())\n def forward_behavior_policy(rng, *args, **kwargs):\n return self.behavior_policy.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.policy.rng_keys())\n )\n \n log_probs = forward_behavior_policy(train_param, observations, actions, method=self.behavior_policy.log_prob)\n log_probs = jnp.mean(log_probs, axis=-1)\n policy_loss = - log_probs\n\n return policy_loss, locals()\n grads, aux_values = grad_fn(train_state.params, rng)\n new_train_state = train_state.apply_gradients(grads=grads)\n metrics = collect_jax_metrics(\n aux_values,\n ['policy_loss', 'log_probs'],\n )\n return new_train_state, metrics\n\n def train(self, batch):\n self._total_steps += 1\n self._train_states, self._target_qf_params, metrics = self._train_step(\n self._train_states, self._target_qf_params, next_rng(), batch\n )\n return metrics\n\n @partial(jax.jit, static_argnames='self')\n def _train_step(self, train_states, target_qf_params, rng, batch):\n rng_generator = JaxRNG(rng)\n\n def loss_fn(train_params, rng):\n observations = batch['observations']\n actions = batch['actions']\n rewards = batch['rewards']\n next_observations = batch['next_observations']\n dones = batch['dones']\n\n loss_collection = {}\n\n @wrap_function_with_rng(rng_generator())\n def forward_policy(rng, *args, **kwargs):\n return self.policy.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.policy.rng_keys())\n )\n\n @wrap_function_with_rng(rng_generator())\n def forward_behavior_policy(rng, *args, **kwargs):\n return self.behavior_policy.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.policy.rng_keys())\n )\n\n @wrap_function_with_rng(rng_generator())\n def forward_qf(rng, *args, **kwargs):\n return self.qf.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.qf.rng_keys())\n )\n\n new_actions, log_pi = forward_policy(train_params['policy'], observations)\n log_pi_beta = forward_behavior_policy(\n train_params['behavior_policy'], observations, new_actions,\n method=self.behavior_policy.log_prob)\n if self.config.use_automatic_entropy_tuning:\n alpha_loss = -self.log_alpha.apply(train_params['log_alpha']) * (log_pi + self.config.target_entropy).mean()\n loss_collection['log_alpha'] = alpha_loss\n alpha = jnp.exp(self.log_alpha.apply(train_params['log_alpha'])) * self.config.alpha_multiplier\n else:\n alpha_loss = 0.0\n alpha = self.config.alpha_multiplier\n\n \"\"\" Policy loss \"\"\"\n q_new_actions = jnp.minimum(\n forward_qf(train_params['qf1'], observations, new_actions),\n forward_qf(train_params['qf2'], observations, new_actions),\n )\n\n brac_kl = log_pi - log_pi_beta\n policy_loss = (self.config.brac_policy_kl_weight * brac_kl - q_new_actions).mean()\n loss_collection['policy'] = policy_loss\n\n \"\"\" Q function loss \"\"\"\n q1_pred = forward_qf(train_params['qf1'], observations, actions)\n q2_pred = forward_qf(train_params['qf2'], observations, actions)\n\n new_next_actions, next_log_pi = forward_policy(train_params['policy'], next_observations)\n target_q_values = jnp.minimum(\n forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),\n forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),\n )\n next_log_pi_beta = forward_behavior_policy(\n train_params['behavior_policy'], next_observations, new_next_actions,\n method=self.behavior_policy.log_prob)\n next_brac_kl = next_log_pi - next_log_pi_beta\n target_q_values = target_q_values - self.config.brac_q_kl_weight * next_brac_kl\n if self.config.backup_entropy:\n target_q_values = target_q_values - alpha * next_log_pi\n\n q_target = jax.lax.stop_gradient(\n rewards + (1. - dones) * self.config.discount * target_q_values\n )\n qf1_loss = mse_loss(q1_pred, q_target)\n qf2_loss = mse_loss(q2_pred, q_target)\n\n loss_collection['qf1'] = qf1_loss\n loss_collection['qf2'] = qf2_loss\n loss_collection['behavior_policy'] = 0.0\n\n return tuple(loss_collection[key] for key in self.model_keys), locals()\n\n train_params = {key: train_states[key].params for key in self.model_keys}\n (_, aux_values), grads = value_and_multi_grad(loss_fn, len(self.model_keys), has_aux=True)(train_params, rng)\n\n new_train_states = {\n key: train_states[key].apply_gradients(grads=grads[i][key])\n for i, key in enumerate(self.model_keys)\n }\n new_target_qf_params = {}\n new_target_qf_params['qf1'] = update_target_network(\n new_train_states['qf1'].params, target_qf_params['qf1'],\n self.config.soft_target_update_rate\n )\n new_target_qf_params['qf2'] = update_target_network(\n new_train_states['qf2'].params, target_qf_params['qf2'],\n self.config.soft_target_update_rate\n )\n\n metrics = collect_jax_metrics(\n aux_values,\n ['log_pi', 'log_pi_beta','next_log_pi_beta', 'next_log_pi',\n 'policy_loss', 'brac_kl', 'next_brac_kl', 'qf1_loss', 'qf2_loss', 'alpha_loss',\n 'alpha', 'q1_pred', 'q2_pred', 'target_q_values']\n )\n return new_train_states, new_target_qf_params, metrics\n\n @property\n def model_keys(self):\n return self._model_keys\n\n @property\n def train_states(self):\n return self._train_states\n\n @property\n def train_params(self):\n return {key: self.train_states[key].params for key in self.model_keys}\n\n @property\n def total_steps(self):\n return self._total_steps"
},
{
"identifier": "ReplayBuffer",
"path": "vqn/replay_buffer.py",
"snippet": "class ReplayBuffer(object):\n def __init__(self, max_size, data=None):\n self._max_size = max_size\n self._next_idx = 0\n self._size = 0\n self._initialized = False\n self._total_steps = 0\n\n if data is not None:\n if self._max_size < data['observations'].shape[0]:\n self._max_size = data['observations'].shape[0]\n self.add_batch(data)\n\n def __len__(self):\n return self._size\n\n def _init_storage(self, observation_dim, action_dim):\n self._observation_dim = observation_dim\n self._action_dim = action_dim\n self._observations = np.zeros((self._max_size, observation_dim), dtype=np.float32)\n self._next_observations = np.zeros((self._max_size, observation_dim), dtype=np.float32)\n self._actions = np.zeros((self._max_size, action_dim), dtype=np.float32)\n self._rewards = np.zeros(self._max_size, dtype=np.float32)\n self._dones = np.zeros(self._max_size, dtype=np.float32)\n self._next_idx = 0\n self._size = 0\n self._initialized = True\n\n def add_sample(self, observation, action, reward, next_observation, done):\n if not self._initialized:\n self._init_storage(observation.size, action.size)\n\n self._observations[self._next_idx, :] = np.array(observation, dtype=np.float32)\n self._next_observations[self._next_idx, :] = np.array(next_observation, dtype=np.float32)\n self._actions[self._next_idx, :] = np.array(action, dtype=np.float32)\n self._rewards[self._next_idx] = reward\n self._dones[self._next_idx] = float(done)\n\n if self._size < self._max_size:\n self._size += 1\n self._next_idx = (self._next_idx + 1) % self._max_size\n self._total_steps += 1\n\n def add_traj(self, observations, actions, rewards, next_observations, dones):\n for o, a, r, no, d in zip(observations, actions, rewards, next_observations, dones):\n self.add_sample(o, a, r, no, d)\n\n def add_batch(self, batch):\n self.add_traj(\n batch['observations'], batch['actions'], batch['rewards'],\n batch['next_observations'], batch['dones']\n )\n\n def sample(self, batch_size):\n indices = np.random.randint(len(self), size=batch_size)\n return self.select(indices)\n\n def select(self, indices):\n return dict(\n observations=self._observations[indices, ...],\n actions=self._actions[indices, ...],\n rewards=self._rewards[indices, ...],\n next_observations=self._next_observations[indices, ...],\n dones=self._dones[indices, ...],\n )\n\n def generator(self, batch_size, n_batchs=None):\n i = 0\n while n_batchs is None or i < n_batchs:\n yield self.sample(batch_size)\n i += 1\n\n @property\n def total_steps(self):\n return self._total_steps\n\n @property\n def data(self):\n return dict(\n observations=self._observations[:self._size, ...],\n actions=self._actions[:self._size, ...],\n rewards=self._rewards[:self._size, ...],\n next_observations=self._next_observations[:self._size, ...],\n dones=self._dones[:self._size, ...]\n )"
},
{
"identifier": "get_d4rl_dataset",
"path": "vqn/replay_buffer.py",
"snippet": "def get_d4rl_dataset(env):\n dataset = d4rl.qlearning_dataset(env)\n return dict(\n observations=dataset['observations'],\n actions=dataset['actions'],\n next_observations=dataset['next_observations'],\n rewards=dataset['rewards'],\n dones=dataset['terminals'].astype(np.float32),\n )"
},
{
"identifier": "subsample_batch",
"path": "vqn/replay_buffer.py",
"snippet": "def subsample_batch(batch, size):\n indices = np.random.randint(batch['observations'].shape[0], size=size)\n return index_batch(batch, indices)"
},
{
"identifier": "ReplayBuffer",
"path": "vqn/replay_buffer.py",
"snippet": "class ReplayBuffer(object):\n def __init__(self, max_size, data=None):\n self._max_size = max_size\n self._next_idx = 0\n self._size = 0\n self._initialized = False\n self._total_steps = 0\n\n if data is not None:\n if self._max_size < data['observations'].shape[0]:\n self._max_size = data['observations'].shape[0]\n self.add_batch(data)\n\n def __len__(self):\n return self._size\n\n def _init_storage(self, observation_dim, action_dim):\n self._observation_dim = observation_dim\n self._action_dim = action_dim\n self._observations = np.zeros((self._max_size, observation_dim), dtype=np.float32)\n self._next_observations = np.zeros((self._max_size, observation_dim), dtype=np.float32)\n self._actions = np.zeros((self._max_size, action_dim), dtype=np.float32)\n self._rewards = np.zeros(self._max_size, dtype=np.float32)\n self._dones = np.zeros(self._max_size, dtype=np.float32)\n self._next_idx = 0\n self._size = 0\n self._initialized = True\n\n def add_sample(self, observation, action, reward, next_observation, done):\n if not self._initialized:\n self._init_storage(observation.size, action.size)\n\n self._observations[self._next_idx, :] = np.array(observation, dtype=np.float32)\n self._next_observations[self._next_idx, :] = np.array(next_observation, dtype=np.float32)\n self._actions[self._next_idx, :] = np.array(action, dtype=np.float32)\n self._rewards[self._next_idx] = reward\n self._dones[self._next_idx] = float(done)\n\n if self._size < self._max_size:\n self._size += 1\n self._next_idx = (self._next_idx + 1) % self._max_size\n self._total_steps += 1\n\n def add_traj(self, observations, actions, rewards, next_observations, dones):\n for o, a, r, no, d in zip(observations, actions, rewards, next_observations, dones):\n self.add_sample(o, a, r, no, d)\n\n def add_batch(self, batch):\n self.add_traj(\n batch['observations'], batch['actions'], batch['rewards'],\n batch['next_observations'], batch['dones']\n )\n\n def sample(self, batch_size):\n indices = np.random.randint(len(self), size=batch_size)\n return self.select(indices)\n\n def select(self, indices):\n return dict(\n observations=self._observations[indices, ...],\n actions=self._actions[indices, ...],\n rewards=self._rewards[indices, ...],\n next_observations=self._next_observations[indices, ...],\n dones=self._dones[indices, ...],\n )\n\n def generator(self, batch_size, n_batchs=None):\n i = 0\n while n_batchs is None or i < n_batchs:\n yield self.sample(batch_size)\n i += 1\n\n @property\n def total_steps(self):\n return self._total_steps\n\n @property\n def data(self):\n return dict(\n observations=self._observations[:self._size, ...],\n actions=self._actions[:self._size, ...],\n rewards=self._rewards[:self._size, ...],\n next_observations=self._next_observations[:self._size, ...],\n dones=self._dones[:self._size, ...]\n )"
},
{
"identifier": "get_d4rl_dataset",
"path": "vqn/replay_buffer.py",
"snippet": "def get_d4rl_dataset(env):\n dataset = d4rl.qlearning_dataset(env)\n return dict(\n observations=dataset['observations'],\n actions=dataset['actions'],\n next_observations=dataset['next_observations'],\n rewards=dataset['rewards'],\n dones=dataset['terminals'].astype(np.float32),\n )"
},
{
"identifier": "subsample_batch",
"path": "vqn/replay_buffer.py",
"snippet": "def subsample_batch(batch, size):\n indices = np.random.randint(batch['observations'].shape[0], size=size)\n return index_batch(batch, indices)"
},
{
"identifier": "batch_to_jax",
"path": "vqn/jax_utils.py",
"snippet": "@jax.jit\ndef batch_to_jax(batch):\n return jax.tree_util.tree_map(jax.device_put, batch)"
},
{
"identifier": "TanhGaussianPolicy",
"path": "vqn/model.py",
"snippet": "class TanhGaussianPolicy(nn.Module):\n observation_dim: int\n action_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n log_std_multiplier: float = 1.0\n log_std_offset: float = -1.0\n use_tanh: bool = True\n\n def setup(self):\n self.base_network = FullyConnectedNetwork(\n output_dim=2 * self.action_dim, arch=self.arch, orthogonal_init=self.orthogonal_init\n )\n self.log_std_multiplier_module = Scalar(self.log_std_multiplier)\n self.log_std_offset_module = Scalar(self.log_std_offset)\n\n def log_prob(self, observations, actions):\n if actions.ndim == 3:\n observations = extend_and_repeat(observations, 1, actions.shape[1])\n base_network_output = self.base_network(observations)\n mean, log_std = jnp.split(base_network_output, 2, axis=-1)\n log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()\n log_std = jnp.clip(log_std, -20.0, 2.0)\n action_distribution = distrax.MultivariateNormalDiag(mean, jnp.exp(log_std))\n if self.use_tanh:\n action_distribution = distrax.Transformed(\n action_distribution, distrax.Block(distrax.Tanh(), ndims=1)\n )\n return action_distribution.log_prob(actions)\n\n def __call__(self, observations, deterministic=False, repeat=None):\n if repeat is not None:\n observations = extend_and_repeat(observations, 1, repeat)\n base_network_output = self.base_network(observations)\n mean, log_std = jnp.split(base_network_output, 2, axis=-1)\n log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()\n log_std = jnp.clip(log_std, -20.0, 2.0)\n action_distribution = distrax.MultivariateNormalDiag(mean, jnp.exp(log_std))\n if self.use_tanh:\n action_distribution = distrax.Transformed(\n action_distribution, distrax.Block(distrax.Tanh(), ndims=1)\n )\n if deterministic:\n samples = mean\n if self.use_tanh:\n samples = jnp.tanh(samples)\n log_prob = action_distribution.log_prob(samples)\n else:\n samples, log_prob = action_distribution.sample_and_log_prob(seed=self.make_rng('noise'))\n\n return samples, log_prob\n\n @nn.nowrap\n def rng_keys(self):\n return ('params', 'noise')"
},
{
"identifier": "FullyConnectedQFunction",
"path": "vqn/model.py",
"snippet": "class FullyConnectedQFunction(nn.Module):\n observation_dim: int\n action_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n\n @nn.compact\n @multiple_action_q_function\n def __call__(self, observations, actions):\n x = jnp.concatenate([observations, actions], axis=-1)\n x = FullyConnectedNetwork(output_dim=1, arch=self.arch, orthogonal_init=self.orthogonal_init)(x)\n return jnp.squeeze(x, -1)\n\n @nn.nowrap\n def rng_keys(self):\n return ('params', )"
},
{
"identifier": "SamplerPolicy",
"path": "vqn/model.py",
"snippet": "class SamplerPolicy(object):\n\n def __init__(self, policy, params):\n self.policy = policy\n self.params = params\n\n def update_params(self, params):\n self.params = params\n return self\n\n @partial(jax.jit, static_argnames=('self', 'deterministic'))\n def act(self, params, rng, observations, deterministic):\n return self.policy.apply(\n params, observations, deterministic, repeat=None,\n rngs=JaxRNG(rng)(self.policy.rng_keys())\n )\n\n def __call__(self, observations, deterministic=False):\n actions, _ = self.act(self.params, next_rng(), observations, deterministic=deterministic)\n assert jnp.all(jnp.isfinite(actions))\n return jax.device_get(actions)"
},
{
"identifier": "StepSampler",
"path": "vqn/sampler.py",
"snippet": "class StepSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n self._traj_steps = 0\n self._current_observation = self.env.reset()\n\n def sample(self, policy, n_steps, deterministic=False, replay_buffer=None):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n for _ in range(n_steps):\n self._traj_steps += 1\n observation = self._current_observation\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n self._current_observation = next_observation\n\n if done or self._traj_steps >= self.max_traj_length:\n self._traj_steps = 0\n self._current_observation = self.env.reset()\n\n return dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n )\n\n @property\n def env(self):\n return self._env"
},
{
"identifier": "TrajSampler",
"path": "vqn/sampler.py",
"snippet": "class TrajSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n\n def sample(self, policy, n_trajs, replay_buffer=None, deterministic=False):\n trajs = []\n for _ in range(n_trajs):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n observation = self.env.reset()\n\n for _ in range(self.max_traj_length):\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n observation = next_observation\n\n if done:\n break\n\n trajs.append(dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n ))\n\n return trajs\n\n @property\n def env(self):\n return self._env"
},
{
"identifier": "Timer",
"path": "vqn/utils.py",
"snippet": "class Timer(object):\n\n def __init__(self):\n self._time = None\n\n def __enter__(self):\n self._start_time = time.time()\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self._time = time.time() - self._start_time\n\n def __call__(self):\n return self._time"
},
{
"identifier": "define_flags_with_default",
"path": "vqn/utils.py",
"snippet": "def define_flags_with_default(**kwargs):\n for key, val in kwargs.items():\n if isinstance(val, ConfigDict):\n config_flags.DEFINE_config_dict(key, val)\n elif isinstance(val, bool):\n # Note that True and False are instances of int.\n absl.flags.DEFINE_bool(key, val, 'automatically defined flag')\n elif isinstance(val, int):\n absl.flags.DEFINE_integer(key, val, 'automatically defined flag')\n elif isinstance(val, float):\n absl.flags.DEFINE_float(key, val, 'automatically defined flag')\n elif isinstance(val, str):\n absl.flags.DEFINE_string(key, val, 'automatically defined flag')\n else:\n raise ValueError('Incorrect value type')\n return kwargs"
},
{
"identifier": "set_random_seed",
"path": "vqn/utils.py",
"snippet": "def set_random_seed(seed):\n np.random.seed(seed)\n random.seed(seed)\n init_rng(seed)"
},
{
"identifier": "print_flags",
"path": "vqn/utils.py",
"snippet": "def print_flags(flags, flags_def):\n logging.info(\n 'Running training with hyperparameters: \\n{}'.format(\n pprint.pformat(\n ['{}: {}'.format(key, val) for key, val in get_user_flags(flags, flags_def).items()]\n )\n )\n )"
},
{
"identifier": "get_user_flags",
"path": "vqn/utils.py",
"snippet": "def get_user_flags(flags, flags_def):\n output = {}\n for key in flags_def:\n val = getattr(flags, key)\n if isinstance(val, ConfigDict):\n output.update(flatten_config_dict(val, prefix=key))\n else:\n output[key] = val\n\n return output"
},
{
"identifier": "prefix_metrics",
"path": "vqn/utils.py",
"snippet": "def prefix_metrics(metrics, prefix):\n return {\n '{}/{}'.format(prefix, key): value for key, value in metrics.items()\n }"
},
{
"identifier": "WandBLogger",
"path": "vqn/utils.py",
"snippet": "class WandBLogger(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.online = False\n config.prefix = 'JaxCQL'\n config.project = ''\n config.output_dir = '/tmp/JaxCQL'\n config.random_delay = 0.0\n config.experiment_id = config_dict.placeholder(str)\n config.anonymous = config_dict.placeholder(str)\n config.notes = config_dict.placeholder(str)\n config.entity = config_dict.placeholder(str)\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, variant):\n self.config = self.get_default_config(config)\n\n if self.config.experiment_id is None:\n self.config.experiment_id = uuid.uuid4().hex\n\n if self.config.prefix != '':\n self.config.project = '{}--{}'.format(self.config.prefix, self.config.project)\n\n if self.config.output_dir == '':\n self.config.output_dir = tempfile.mkdtemp()\n else:\n self.config.output_dir = os.path.join(self.config.output_dir, self.config.experiment_id)\n os.makedirs(self.config.output_dir, exist_ok=True)\n\n self._variant = copy(variant)\n\n if 'hostname' not in self._variant:\n self._variant['hostname'] = gethostname()\n\n if self.config.random_delay > 0:\n time.sleep(np.random.uniform(0, self.config.random_delay))\n\n self.run = wandb.init(\n reinit=True,\n config=self._variant,\n project=self.config.project,\n dir=self.config.output_dir,\n entity=config.entity,\n id=self.config.experiment_id,\n anonymous=self.config.anonymous,\n notes=self.config.notes,\n settings=wandb.Settings(\n start_method=\"thread\",\n _disable_stats=True,\n ),\n mode='online' if self.config.online else 'offline',\n )\n\n def log(self, *args, **kwargs):\n self.run.log(*args, **kwargs)\n\n def save_pickle(self, obj, filename):\n with open(os.path.join(self.config.output_dir, filename), 'wb') as fout:\n pickle.dump(obj, fout)\n\n @property\n def experiment_id(self):\n return self.config.experiment_id\n\n @property\n def variant(self):\n return self.config.variant\n\n @property\n def output_dir(self):\n return self.config.output_dir"
}
] | import os
import time
import uuid
import numpy as np
import gym
import jax
import jax.numpy as jnp
import flax
import absl.app
import absl.flags
from copy import deepcopy
from pprint import pprint
from pprint import pprint
from .brac import SAC
from .replay_buffer import ReplayBuffer, get_d4rl_dataset, subsample_batch
from .replay_buffer import ReplayBuffer, get_d4rl_dataset, subsample_batch
from .jax_utils import batch_to_jax
from .model import TanhGaussianPolicy, FullyConnectedQFunction, SamplerPolicy
from .sampler import StepSampler, TrajSampler
from .utils import (
Timer, define_flags_with_default, set_random_seed, print_flags,
get_user_flags, prefix_metrics, WandBLogger
)
from viskit.logging import logger, setup_logger | 8,624 |
FLAGS_DEF = define_flags_with_default(
env='HalfCheetah-v2',
max_traj_length=1000,
replay_buffer_size=1000000,
seed=42,
save_model=False,
reward_scale=1.0,
reward_bias=0.0,
clip_action=0.999,
policy_arch='256-256',
qf_arch='256-256',
orthogonal_init=False,
policy_log_std_multiplier=1.0,
policy_log_std_offset=-1.0,
n_epochs=2000,
n_pi_beta_epochs=5000,
n_train_step_per_epoch=1000,
eval_period=10,
eval_n_trajs=5,
batch_size=256,
sac=SAC.get_default_config(),
logging=WandBLogger.get_default_config(),
)
def main(argv):
FLAGS = absl.flags.FLAGS
variant = get_user_flags(FLAGS, FLAGS_DEF)
wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant)
setup_logger(
variant=variant,
exp_id=wandb_logger.experiment_id,
seed=FLAGS.seed,
base_log_dir=FLAGS.logging.output_dir,
include_exp_prefix_sub_dir=False
)
set_random_seed(FLAGS.seed)
eval_sampler = TrajSampler(gym.make(FLAGS.env).unwrapped, FLAGS.max_traj_length)
dataset = get_d4rl_dataset(eval_sampler.env)
dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias
dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action)
dataset = get_d4rl_dataset(eval_sampler.env)
dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias
dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action)
observation_dim = eval_sampler.env.observation_space.shape[0]
action_dim = eval_sampler.env.action_space.shape[0]
policy = TanhGaussianPolicy(
observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init,
FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset, use_tanh=True
)
behavior_policy = TanhGaussianPolicy(
observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init,
FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset, use_tanh=False
)
qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init)
if FLAGS.sac.target_entropy >= 0.0:
FLAGS.sac.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item()
sac = SAC(FLAGS.sac, behavior_policy, policy, qf)
sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy'])
viskit_metrics = {}
for pi_beta_epoch in range(FLAGS.n_pi_beta_epochs):
metrics = {'behavior_policy_epoch': pi_beta_epoch}
for batch_idx in range(FLAGS.n_train_step_per_epoch):
batch = batch_to_jax(subsample_batch(dataset, FLAGS.batch_size))
|
FLAGS_DEF = define_flags_with_default(
env='HalfCheetah-v2',
max_traj_length=1000,
replay_buffer_size=1000000,
seed=42,
save_model=False,
reward_scale=1.0,
reward_bias=0.0,
clip_action=0.999,
policy_arch='256-256',
qf_arch='256-256',
orthogonal_init=False,
policy_log_std_multiplier=1.0,
policy_log_std_offset=-1.0,
n_epochs=2000,
n_pi_beta_epochs=5000,
n_train_step_per_epoch=1000,
eval_period=10,
eval_n_trajs=5,
batch_size=256,
sac=SAC.get_default_config(),
logging=WandBLogger.get_default_config(),
)
def main(argv):
FLAGS = absl.flags.FLAGS
variant = get_user_flags(FLAGS, FLAGS_DEF)
wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant)
setup_logger(
variant=variant,
exp_id=wandb_logger.experiment_id,
seed=FLAGS.seed,
base_log_dir=FLAGS.logging.output_dir,
include_exp_prefix_sub_dir=False
)
set_random_seed(FLAGS.seed)
eval_sampler = TrajSampler(gym.make(FLAGS.env).unwrapped, FLAGS.max_traj_length)
dataset = get_d4rl_dataset(eval_sampler.env)
dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias
dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action)
dataset = get_d4rl_dataset(eval_sampler.env)
dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias
dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action)
observation_dim = eval_sampler.env.observation_space.shape[0]
action_dim = eval_sampler.env.action_space.shape[0]
policy = TanhGaussianPolicy(
observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init,
FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset, use_tanh=True
)
behavior_policy = TanhGaussianPolicy(
observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init,
FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset, use_tanh=False
)
qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init)
if FLAGS.sac.target_entropy >= 0.0:
FLAGS.sac.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item()
sac = SAC(FLAGS.sac, behavior_policy, policy, qf)
sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy'])
viskit_metrics = {}
for pi_beta_epoch in range(FLAGS.n_pi_beta_epochs):
metrics = {'behavior_policy_epoch': pi_beta_epoch}
for batch_idx in range(FLAGS.n_train_step_per_epoch):
batch = batch_to_jax(subsample_batch(dataset, FLAGS.batch_size)) | metrics.update(prefix_metrics(sac.train_behavior_policy(batch), 'behavior_policy')) | 18 | 2023-10-18 06:31:20+00:00 | 12k |
naver-ai/dual-teacher | tools/train.py | [
{
"identifier": "__version__",
"path": "mmseg/version.py",
"snippet": "def parse_version_info(version_str):"
},
{
"identifier": "set_random_seed",
"path": "mmseg/apis/train.py",
"snippet": "def set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False"
},
{
"identifier": "train_segmentor",
"path": "mmseg/apis/train.py",
"snippet": "def train_segmentor(model,\n dataset,\n cfg,\n distributed=False,\n validate=False,\n timestamp=None,\n meta=None):\n \"\"\"Launch segmentor training.\"\"\"\n logger = get_root_logger(cfg.log_level)\n\n # prepare data loaders\n dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]\n data_loaders = [\n build_dataloader(\n ds,\n cfg.data.samples_per_gpu,\n cfg.data.workers_per_gpu,\n # cfg.gpus will be ignored if distributed\n len(cfg.gpu_ids),\n dist=distributed,\n seed=cfg.seed,\n drop_last=True) for ds in dataset\n ]\n\n # put model on gpus\n if distributed:\n find_unused_parameters = cfg.get('find_unused_parameters', False)\n # Sets the `find_unused_parameters` parameter in\n # torch.nn.parallel.DistributedDataParallel\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n model = MMDataParallel(\n model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)\n\n # build runner\n optimizer = build_optimizer(model, cfg.optimizer)\n\n if cfg.get('runner') is None:\n cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters}\n warnings.warn(\n 'config is now expected to have a `runner` section, '\n 'please set `runner` in your config.', UserWarning)\n\n runner = build_runner(\n cfg.runner,\n default_args=dict(\n model=model,\n batch_processor=None,\n optimizer=optimizer,\n work_dir=cfg.work_dir,\n logger=logger,\n meta=meta))\n\n # register hooks\n runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,\n cfg.checkpoint_config, cfg.log_config,\n cfg.get('momentum_config', None))\n\n # an ugly walkaround to make the .log and .log.json filenames the same\n runner.timestamp = timestamp\n\n # register eval hooks\n if validate:\n val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))\n val_dataloader = build_dataloader(\n val_dataset,\n samples_per_gpu=1,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n eval_cfg = cfg.get('evaluation', {})\n eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'\n eval_hook = DistEvalHook if distributed else EvalHook\n runner.register_hook(eval_hook(val_dataloader, **eval_cfg))\n\n if cfg.resume_from:\n runner.resume(cfg.resume_from)\n elif cfg.load_from:\n runner.load_checkpoint(cfg.load_from)\n runner.run(data_loaders, cfg.workflow)"
},
{
"identifier": "build_dataloader",
"path": "mmseg/datasets/builder.py",
"snippet": "def build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=False,\n pin_memory=True,\n dataloader_type='PoolDataLoader',\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n seed (int | None): Seed to be used. Default: None.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: False\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader'\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n assert dataloader_type in (\n 'DataLoader',\n 'PoolDataLoader'), f'unsupported dataloader {dataloader_type}'\n\n if dataloader_type == 'PoolDataLoader':\n dataloader = PoolDataLoader\n elif dataloader_type == 'DataLoader':\n dataloader = DataLoader\n\n data_loader = dataloader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader"
},
{
"identifier": "build_dataset",
"path": "mmseg/datasets/builder.py",
"snippet": "def build_dataset(cfg, default_args=None):\n \"\"\"Build datasets.\"\"\"\n from .dataset_wrappers import ConcatDataset, RepeatDataset\n if isinstance(cfg, (list, tuple)):\n dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])\n elif cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(\n cfg.get('split', None), (list, tuple)):\n dataset = _concat_dataset(cfg, default_args)\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n\n return dataset"
},
{
"identifier": "build_segmentor",
"path": "mmseg/models/builder.py",
"snippet": "def build_segmentor(cfg, train_cfg=None, test_cfg=None):\n \"\"\"Build segmentor.\"\"\"\n if train_cfg is not None or test_cfg is not None:\n warnings.warn(\n 'train_cfg and test_cfg is deprecated, '\n 'please specify them in model', UserWarning)\n assert cfg.get('train_cfg') is None or train_cfg is None, \\\n 'train_cfg specified in both outer field and model field '\n assert cfg.get('test_cfg') is None or test_cfg is None, \\\n 'test_cfg specified in both outer field and model field '\n return build(cfg, SEGMENTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))"
},
{
"identifier": "collect_env",
"path": "mmseg/utils/collect_env.py",
"snippet": "def collect_env():\n \"\"\"Collect the information of the running environments.\"\"\"\n env_info = collect_base_env()\n env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}'\n\n return env_info"
},
{
"identifier": "get_root_logger",
"path": "mmseg/utils/logger.py",
"snippet": "def get_root_logger(log_file=None, log_level=logging.INFO):\n \"\"\"Get the root logger.\n\n The logger will be initialized if it has not been initialized. By default a\n StreamHandler will be added. If `log_file` is specified, a FileHandler will\n also be added. The name of the root logger is the top-level package name,\n e.g., \"mmseg\".\n\n Args:\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the root logger.\n log_level (int): The root logger level. Note that only the process of\n rank 0 is affected, while other processes will set the level to\n \"Error\" and be silent most of the time.\n\n Returns:\n logging.Logger: The root logger.\n \"\"\"\n\n logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level)\n\n return logger"
},
{
"identifier": "MiT_SegFormer",
"path": "seg_core/model.py",
"snippet": "class MiT_SegFormer(nn.Module):\n def __init__(self, backbone, num_classes=20, embedding_dim=256, pretrained=None):\n super().__init__()\n self.num_classes = num_classes\n self.embedding_dim = embedding_dim\n self.feature_strides = [4, 8, 16, 32]\n # self.in_channels = [32, 64, 160, 256]\n # self.in_channels = [64, 128, 320, 512]\n\n self.encoder = getattr(mix_transformer, backbone)()\n self.in_channels = self.encoder.embed_dims\n mit_num = backbone.split('_')[1][1]\n ## initilize encoder\n if pretrained:\n state_dict = torch.load('/home/najm/DualTeacher/pretrained/mit_b' + mit_num + '.pth')\n state_dict.pop('head.weight')\n state_dict.pop('head.bias')\n self.encoder.load_state_dict(state_dict, )\n\n self.decoder = SegFormerHead(feature_strides=self.feature_strides, in_channels=self.in_channels, embedding_dim=self.embedding_dim, num_classes=self.num_classes)\n\n self.classifier = nn.Conv2d(in_channels=self.in_channels[-1], out_channels=self.num_classes, kernel_size=1, bias=False)\n\n def _forward_cam(self, x):\n\n cam = F.conv2d(x, self.classifier.weight)\n cam = F.relu(cam)\n\n return cam\n\n def get_param_groups(self):\n\n param_groups = [[], [], []] #\n\n for name, param in list(self.encoder.named_parameters()):\n if \"norm\" in name:\n param_groups[1].append(param)\n else:\n param_groups[0].append(param)\n\n for param in list(self.decoder.parameters()):\n param_groups[2].append(param)\n\n param_groups[2].append(self.classifier.weight)\n\n return param_groups\n\n def forward(self, x):\n\n _x = self.encoder(x)\n _x1, _x2, _x3, _x4 = _x\n cls = self.classifier(_x4)\n return self.decoder(_x)"
},
{
"identifier": "PolyWarmupAdamW",
"path": "seg_core/optimizer.py",
"snippet": "class PolyWarmupAdamW(torch.optim.AdamW):\n\n def __init__(self, params, lr, weight_decay, betas, warmup_iter=None, max_iter=None, warmup_ratio=None, power=None):\n super().__init__(params, lr=lr, betas=betas, weight_decay=weight_decay, eps=1e-8)\n\n self.global_step = 0\n self.warmup_iter = warmup_iter\n self.warmup_ratio = warmup_ratio\n self.max_iter = max_iter\n self.power = power\n\n self.__init_lr = [group['lr'] for group in self.param_groups]\n\n def step(self, closure=None):\n ## adjust lr\n if self.global_step < self.warmup_iter:\n\n lr_mult = 1 - (1 - self.global_step / self.warmup_iter) * (1 - self.warmup_ratio)\n for i in range(len(self.param_groups)):\n self.param_groups[i]['lr'] = self.__init_lr[i] * lr_mult\n\n elif self.global_step < self.max_iter:\n\n lr_mult = (1 - self.global_step / self.max_iter) ** self.power\n for i in range(len(self.param_groups)):\n self.param_groups[i]['lr'] = self.__init_lr[i] * lr_mult\n\n # step\n super().step(closure)\n\n self.global_step += 1"
},
{
"identifier": "ClassMixLoss",
"path": "seg_core/augmentations.py",
"snippet": "class ClassMixLoss(nn.Module):\n def __init__(self, weight=None, reduction=None, ignore_index=None):\n super(ClassMixLoss, self).__init__()\n self.CE = nn.CrossEntropyLoss(weight=weight, reduction=reduction, ignore_index=ignore_index)\n\n def forward(self, output, target, pixel_weight):\n loss = self.CE(output, target)\n loss = torch.mean(loss * pixel_weight)\n return loss"
},
{
"identifier": "compute_classmix",
"path": "seg_core/augmentations.py",
"snippet": "def compute_classmix(b, h, w, criterion, cm_loss_fn, model, ema_model, imgs, labels, unsup_imgs, image_u_strong, threshold):\n # Unlabeled Process\n with torch.no_grad():\n logits_occluder = ema_model(unsup_imgs) # 129\n logits_occluder = F.interpolate(logits_occluder, (h, w), mode=\"bilinear\", align_corners=False) # 513\n softmax_occluder = torch.softmax(logits_occluder, dim=1)\n max_prob_occluder, argmax_occluder = torch.max(softmax_occluder, dim=1)\n\n binary_mask = get_bin_mask(b, argmax_occluder)\n binary_mask = binary_mask.squeeze(dim=1)\n if b == 2:\n shuffle_index = torch.tensor([1, 0])\n else:\n shuffle_index = torch.randperm(b).cuda()\n class_mixed_img = class_mix(occluder_mask=binary_mask, occluder=image_u_strong, occludee=image_u_strong[shuffle_index])\n\n num_labeled = len(imgs)\n outputs = model(torch.cat([imgs, class_mixed_img]))\n outputs, outputs_u = outputs[:num_labeled], outputs[num_labeled:]\n\n pred_large = F.interpolate(outputs, size=labels.shape[1:], mode='bilinear', align_corners=False)\n sup_loss = criterion(pred_large, labels.type(torch.long).clone())\n del outputs, pred_large\n torch.cuda.empty_cache()\n logits_class_mixed = F.interpolate(outputs_u, (h, w), mode=\"bilinear\", align_corners=False)\n\n class_mixed_softmax = class_mix(occluder_mask=binary_mask, occluder=softmax_occluder, occludee=softmax_occluder[shuffle_index])\n max_prob_occluder, pseudo_label = torch.max(class_mixed_softmax, dim=1)\n\n unlabeled_weight = torch.sum(max_prob_occluder.ge(threshold).long() == 1).item() / np.size(np.array(pseudo_label.cpu()))\n pixel_weight = unlabeled_weight * torch.ones(max_prob_occluder.shape).cuda()\n\n class_mix_loss = cm_loss_fn(logits_class_mixed, pseudo_label, pixel_weight)\n loss = sup_loss + class_mix_loss\n return loss"
},
{
"identifier": "compute_cutmix",
"path": "seg_core/augmentations.py",
"snippet": "def compute_cutmix(h, w, imgs, labels, criterion, model, ema_model, image_u, threshold):\n with torch.no_grad():\n pred = ema_model(image_u)\n pred = F.interpolate(pred, (h, w), mode=\"bilinear\", align_corners=False)\n pred = F.softmax(pred, dim=1)\n pred_logit, pred_label = torch.max(pred, dim=1)\n\n image_aug, label_aug = cut_mixer(image_u, pred_label.clone())\n\n image_aug, label_aug, pred_logit = \\\n batch_transform(image_aug, label_aug, pred_logit,\n crop_size=(pred_logit.shape[1], pred_logit.shape[2]), scale_size=(1.0, 1.0), apply_augmentation=True)\n\n num_labeled = len(imgs)\n outputs = model(torch.cat([imgs, image_aug]))\n outputs, outputs_u = outputs[:num_labeled], outputs[num_labeled:]\n pred_large = F.interpolate(outputs, size=labels.shape[1:], mode='bilinear', align_corners=False)\n sup_loss = criterion(pred_large, labels.type(torch.long).clone())\n\n pred_u = F.interpolate(outputs_u, (h, w), mode=\"bilinear\", align_corners=False)\n\n cutmix_loss = compute_unsupervised_loss(pred_u, label_aug.clone(), pred_logit, threshold)\n return sup_loss + cutmix_loss"
},
{
"identifier": "compute_ic",
"path": "seg_core/augmentations.py",
"snippet": "def compute_ic(model, ema_model, image_u, image_u_strong, criterion_u, label_u, h, w, threshold):\n with torch.no_grad():\n logits = ema_model(image_u) # 129\n logits = F.interpolate(logits, (h, w), mode=\"bilinear\", align_corners=False) # 513\n softmax_out = torch.softmax(logits, dim=1)\n max_probs, argmax_label = torch.max(softmax_out, dim=1)\n pred_dc = model(image_u_strong)\n pred_dc = F.interpolate(pred_dc, (h, w), mode=\"bilinear\", align_corners=False) # 513\n loss_dc = criterion_u(pred_dc, argmax_label)\n loss_dc = loss_dc * ((max_probs >= threshold) & (label_u != 255))\n loss_dc = loss_dc.sum() / (label_u != 255).sum().item()\n return loss_dc.clone()"
},
{
"identifier": "single_gpu_test",
"path": "mmseg/apis/test.py",
"snippet": "def single_gpu_test(model,\n data_loader,\n show=False,\n out_dir=None,\n efficient_test=False):\n \"\"\"Test with single GPU.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (utils.data.Dataloader): Pytorch data loader.\n show (bool): Whether show results during infernece. Default: False.\n out_dir (str, optional): If specified, the results will be dumped into\n the directory to save output results.\n efficient_test (bool): Whether save the results as local numpy files to\n save CPU memory during evaluation. Default: False.\n\n Returns:\n list: The prediction results.\n \"\"\"\n\n model.eval()\n results = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, **data)\n\n if show or out_dir:\n img_tensor = data['img'][0]\n img_metas = data['img_metas'][0].data[0]\n imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])\n assert len(imgs) == len(img_metas)\n\n for img, img_meta in zip(imgs, img_metas):\n h, w, _ = img_meta['img_shape']\n img_show = img[:h, :w, :]\n\n ori_h, ori_w = img_meta['ori_shape'][:-1]\n img_show = mmcv.imresize(img_show, (ori_w, ori_h))\n\n if out_dir:\n out_file = osp.join(out_dir, img_meta['ori_filename'])\n else:\n out_file = None\n\n model.module.show_result(\n img_show,\n result,\n palette=dataset.PALETTE,\n show=show,\n out_file=out_file)\n\n if isinstance(result, list):\n if efficient_test:\n result = [np2tmp(_) for _ in result]\n results.extend(result)\n else:\n if efficient_test:\n result = np2tmp(result)\n results.append(result)\n\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size):\n prog_bar.update()\n return results"
}
] | import argparse
import copy
import os
import os.path as osp
import time
import logging
import mmcv
import torch
import numpy as np
import seg_core.eval_seg as eval_seg
import torch.nn.functional as F
import warnings
import torch.distributed as dist
import random
import tempfile
from mmcv.runner import init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from torchvision.transforms import ToTensor
from mmseg import __version__
from mmseg.apis import set_random_seed, train_segmentor
from mmseg.datasets import build_dataset, build_dataloader
from mmseg.models import build_segmentor
from mmseg.utils import collect_env, get_root_logger
from seg_core.model import MiT_SegFormer
from seg_core.optimizer import PolyWarmupAdamW
from seg_core.augmentations import ClassMixLoss, compute_classmix, compute_cutmix, compute_ic
from torchvision.utils import save_image
from dist_helper import setup_distributed
from mmseg.apis import single_gpu_test
from mmcv.image import tensor2imgs
from PIL import Image, ImageOps, ImageFilter
from torchvision import transforms
from copy import deepcopy | 8,161 |
def setup_logger(filename='test.log'):
## setup logger
# logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(filename)s - %(levelname)s: %(message)s')
logFormatter = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)s: %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fHandler = logging.FileHandler(filename, mode='w')
fHandler.setFormatter(logFormatter)
logger.addHandler(fHandler)
cHandler = logging.StreamHandler()
cHandler.setFormatter(logFormatter)
logger.addHandler(cHandler)
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('--ddp', default=False, action='store_true')
parser.add_argument('--dual_teacher', default=False, action='store_true')
parser.add_argument('--unimatch_aug', default=False, action='store_true')
parser.add_argument('--save_path', type=str, help='log moemo')
parser.add_argument('--out', default='work_dirs/res.pkl', help='output result file in pickle format')
parser.add_argument('--config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--load-from', help='the checkpoint file to load weights from')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument("--backbone", type=str)
parser.add_argument("--port", default=None, type=int)
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--dc', default=False, action='store_true')
args = parser.parse_args()
# if 'LOCAL_RANK' not in os.environ:
# os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def np2tmp(array, temp_file_name=None):
"""Save ndarray to local numpy file.
Args:
array (ndarray): Ndarray to save.
temp_file_name (str): Numpy file name. If 'temp_file_name=None', this
function will generate a file name with tempfile.NamedTemporaryFile
to save ndarray. Default: None.
Returns:
str: The numpy file name.
"""
if temp_file_name is None:
temp_file_name = tempfile.NamedTemporaryFile(
suffix='.npy', delete=False).name
np.save(temp_file_name, array)
return temp_file_name
def image_saver(input, name):
"""
:param name: "path/name"
"""
if input.dim() == 3:
input = input.unsqueeze(dim=0)
save_image(input.float(), str(name) + '.jpg')
def main():
setup_logger()
args = parse_args()
mit_type = args.backbone[-1]
if mit_type == '5':
args.config = 'local_configs/segformer/B' + mit_type + '/segformer.b' + mit_type + '.640x640.ade.160k.py'
else:
args.config = 'local_configs/segformer/B' + mit_type + '/segformer.b' + mit_type + '.512x512.ade.160k.py'
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
torch.backends.cudnn.benchmark = False
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
distributed = False
if args.ddp:
rank, word_size = setup_distributed(port=args.port)
distributed = True
else:
rank = 0
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
| """
Dual-Teacher
Copyright (c) 2023-present NAVER Cloud Corp.
distributed under NVIDIA Source Code License for SegFormer
--------------------------------------------------------
References:
SegFormer: https://github.com/NVlabs/SegFormer
--------------------------------------------------------
"""
warnings.filterwarnings("ignore")
criterion_u = torch.nn.CrossEntropyLoss(reduction='none').cuda()
def train_sup(args, model, optimizer, train_loader, val_loader, criterion, max_iters, print_iters, eval_iters):
train_iterator = iter(train_loader)
if args.ddp:
rank, world_size = dist.get_rank(), dist.get_world_size()
else:
rank = 0
for epoch in range(200):
for i in range(len(train_loader)):
model.train()
try:
batch_data = next(train_iterator)
except:
train_iterator = iter(train_loader)
batch_data = next(train_iterator)
image = batch_data['img'].data[0].cuda(non_blocking=True)
label = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
outputs = model(image)
outputs = F.interpolate(outputs, size=label.shape[1:], mode='bilinear', align_corners=False)
seg_loss = criterion(outputs, label.type(torch.long))
optimizer.zero_grad()
seg_loss.backward()
optimizer.step()
if rank == 0:
lr = optimizer.param_groups[0]['lr']
logging.info("save_path:{}".format(args.save_path))
logging.info("Iter: %d; LR: %.3e; seg_loss: %f" % (i + 1, lr, seg_loss.item()))
print("Iter: %d; LR: %.3e; seg_loss: %f" % (i + 1, lr, seg_loss.item()))
logging.info('[iter:{}] Validation:'.format(i + 1))
print('[iter:{}] Validation:'.format(i + 1))
val_score = val(model.module, val_loader)
logging.info('mIoU:{:.5f}'.format(val_score['Mean IoU'] * 100))
print('mIoU:{:.5f}'.format(val_score['Mean IoU'] * 100))
model.train()
def train_dual(args, model, model_teacher, model_teacher2, optimizer, train_loader, train_loader_u, val_loader, criterion, cm_loss_fn, max_iters, print_iters, eval_iters):
if args.ddp:
rank, world_size = dist.get_rank(), dist.get_world_size()
else:
rank = 0
best_miou, best_epoch = 0, 0
for epoch in range(200):
model.train()
train_loader.sampler.set_epoch(epoch)
train_loader_u.sampler.set_epoch(epoch)
train_iterator = iter(train_loader)
train_iterator_u = iter(train_loader_u)
if epoch % 2 == 0:
ema_model = model_teacher
do_cut_mix = True
do_class_mix = False
else:
ema_model = model_teacher2
do_cut_mix = False
do_class_mix = True
ema_model.train()
for i in range(len(train_loader)):
try:
batch_data_u = next(train_iterator_u)
except:
train_iterator_u = iter(train_loader_u)
batch_data_u = next(train_iterator_u)
try:
batch_data = next(train_iterator)
except:
train_iterator = iter(train_loader)
batch_data = next(train_iterator)
image = batch_data['img'].data[0].cuda(non_blocking=True)
label = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
image_u = batch_data_u['img'].data[0].cuda(non_blocking=True)
label_u = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
b, _, h, w = image.shape
image_u_strong = deepcopy(image_u)
image_u_strong = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(image_u_strong)
image_u_strong = transforms.RandomGrayscale(p=0.2)(image_u_strong)
if do_class_mix:
loss = compute_classmix(b, h, w, criterion, cm_loss_fn, model, ema_model, image, label, image_u, image_u_strong, threshold=0.95)
if do_cut_mix:
loss = compute_cutmix(h, w, image, label, criterion, model, ema_model, image_u, threshold=0.95)
loss_dc = compute_ic(model, ema_model, image_u, image_u_strong, criterion_u, label_u, h, w, threshold=0.95)
total_loss = loss + loss_dc * 0.2
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
if args.ddp:
reduced_loss = loss.clone().detach()
dist.all_reduce(reduced_loss)
update_ema(model_teacher=ema_model, model=model, alpha_teacher=0.99, iteration=i)
if rank == 0:
if (i + 1) % print_iters == 0:
lr = optimizer.param_groups[0]['lr']
logging.info("Epoch: %d; Iter: %d; LR: %.3e; loss: %f" % (epoch, i + 1, lr, loss.item()))
print("Epoch: %d; Iter: %d; LR: %.3e; loss: %f" % (epoch, i + 1, lr, loss.item()))
if rank == 0:
logging.info('[Epoch {}] [iter:{}] Validation:'.format(epoch, i + 1))
print('[Epoch {}] [iter:{}] Validation:'.format(epoch, i + 1))
val_score = val(model.module, val_loader)
miou = val_score['Mean IoU'] * 100
if miou > best_miou:
best_miou = miou
best_epoch = epoch
logging.info('mIoU:{:.5f} Best mIOU:{:.5f} on epoch {}'.format(miou, best_miou, best_epoch))
print('mIoU:{:.5f} Best mIOU:{:.5f} on epoch {}'.format(miou, best_miou, best_epoch))
model.train()
def synchronize():
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def val(model, data_loader):
model.eval()
preds, gts = [], []
for i, data in enumerate(data_loader):
with torch.no_grad():
image = data['img'][0].cuda(non_blocking=True)
label = data['gt_semantic_seg'][0].cuda(non_blocking=True)
outputs = model(image)
resized_outputs = F.interpolate(outputs, size=label.shape[1:], mode='bilinear', align_corners=False)
preds += list(torch.argmax(resized_outputs, dim=1).cpu().numpy().astype(np.int16))
gts += list(label.cpu().numpy().astype(np.int16))
score = eval_seg.scores(gts, preds, num_classes=150)
model.train()
return score
def val_ddp(args, epoch, model, data_loader):
model.eval()
preds, gts = [], []
if args.ddp:
data_loader.sampler.set_epoch(epoch)
rank, world_size = dist.get_rank(), dist.get_world_size()
else:
rank = 0
for i, data in enumerate(data_loader):
with torch.no_grad():
# print(data)
image = data['img'][0].cuda(non_blocking=True)
label = data['gt_semantic_seg'][0].cuda(non_blocking=True)
outputs = model(image)
resized_outputs = F.interpolate(outputs, size=label.shape[1:], mode='bilinear', align_corners=False)
preds += list(torch.argmax(resized_outputs, dim=1).cpu().numpy().astype(np.int16))
gts += list(label.cpu().numpy().astype(np.int16))
if args.ddp:
preds = torch.from_numpy(np.array(preds)).cuda()
gts = torch.from_numpy(np.array(gts)).cuda()
dist.all_reduce(preds)
dist.all_reduce(gts)
gts = list(gts)
preds = list(preds)
score = eval_seg.scores(gts, preds, num_classes=150)
return score
def intersectionAndUnion(output, target, K, ignore_index):
# 'K' classes, output and target sizes are N or N * L or N * H * W, each value in range 0 to K - 1.
assert output.ndim in [1, 2, 3]
assert output.shape == target.shape
output = output.reshape(output.size).copy()
target = target.reshape(target.size)
output[np.where(target == ignore_index)[0]] = ignore_index
intersection = output[np.where(output == target)[0]]
area_intersection, _ = np.histogram(intersection, bins=np.arange(K + 1))
area_output, _ = np.histogram(output, bins=np.arange(K + 1))
area_target, _ = np.histogram(target, bins=np.arange(K + 1))
area_union = area_output + area_target - area_intersection
return area_intersection, area_union, area_target
def update_ema(model_teacher, model, alpha_teacher, iteration):
with torch.no_grad():
alpha_teacher = min(1 - 1 / (iteration + 1), alpha_teacher)
for ema_param, param in zip(model_teacher.parameters(), model.parameters()):
ema_param.data[:] = alpha_teacher * ema_param[:].data[:] + (1 - alpha_teacher) * param[:].data[:]
def setup_logger(filename='test.log'):
## setup logger
# logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(filename)s - %(levelname)s: %(message)s')
logFormatter = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)s: %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fHandler = logging.FileHandler(filename, mode='w')
fHandler.setFormatter(logFormatter)
logger.addHandler(fHandler)
cHandler = logging.StreamHandler()
cHandler.setFormatter(logFormatter)
logger.addHandler(cHandler)
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('--ddp', default=False, action='store_true')
parser.add_argument('--dual_teacher', default=False, action='store_true')
parser.add_argument('--unimatch_aug', default=False, action='store_true')
parser.add_argument('--save_path', type=str, help='log moemo')
parser.add_argument('--out', default='work_dirs/res.pkl', help='output result file in pickle format')
parser.add_argument('--config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--load-from', help='the checkpoint file to load weights from')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument("--backbone", type=str)
parser.add_argument("--port", default=None, type=int)
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--dc', default=False, action='store_true')
args = parser.parse_args()
# if 'LOCAL_RANK' not in os.environ:
# os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def np2tmp(array, temp_file_name=None):
"""Save ndarray to local numpy file.
Args:
array (ndarray): Ndarray to save.
temp_file_name (str): Numpy file name. If 'temp_file_name=None', this
function will generate a file name with tempfile.NamedTemporaryFile
to save ndarray. Default: None.
Returns:
str: The numpy file name.
"""
if temp_file_name is None:
temp_file_name = tempfile.NamedTemporaryFile(
suffix='.npy', delete=False).name
np.save(temp_file_name, array)
return temp_file_name
def image_saver(input, name):
"""
:param name: "path/name"
"""
if input.dim() == 3:
input = input.unsqueeze(dim=0)
save_image(input.float(), str(name) + '.jpg')
def main():
setup_logger()
args = parse_args()
mit_type = args.backbone[-1]
if mit_type == '5':
args.config = 'local_configs/segformer/B' + mit_type + '/segformer.b' + mit_type + '.640x640.ade.160k.py'
else:
args.config = 'local_configs/segformer/B' + mit_type + '/segformer.b' + mit_type + '.512x512.ade.160k.py'
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
torch.backends.cudnn.benchmark = False
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
distributed = False
if args.ddp:
rank, word_size = setup_distributed(port=args.port)
distributed = True
else:
rank = 0
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log') | logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) | 7 | 2023-10-19 04:04:31+00:00 | 12k |
SLDGroup/G-CASCADE | lib/maxxvit_4out.py | [
{
"identifier": "build_model_with_cfg",
"path": "lib/models_timm/helpers.py",
"snippet": "def build_model_with_cfg(\n model_cls: Callable,\n variant: str,\n pretrained: bool,\n pretrained_cfg: Optional[Dict] = None,\n model_cfg: Optional[Any] = None,\n feature_cfg: Optional[Dict] = None,\n pretrained_strict: bool = True,\n pretrained_filter_fn: Optional[Callable] = None,\n pretrained_custom_load: bool = False,\n kwargs_filter: Optional[Tuple[str]] = None,\n **kwargs):\n \"\"\" Build model with specified default_cfg and optional model_cfg\n\n This helper fn aids in the construction of a model including:\n * handling default_cfg and associated pretrained weight loading\n * passing through optional model_cfg for models with config based arch spec\n * features_only model adaptation\n * pruning config / model adaptation\n\n Args:\n model_cls (nn.Module): model class\n variant (str): model variant name\n pretrained (bool): load pretrained weights\n pretrained_cfg (dict): model's pretrained weight/task config\n model_cfg (Optional[Dict]): model's architecture config\n feature_cfg (Optional[Dict]: feature extraction adapter config\n pretrained_strict (bool): load pretrained weights strictly\n pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights\n pretrained_custom_load (bool): use custom load fn, to load numpy or other non PyTorch weights\n kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model\n **kwargs: model args passed through to model __init__\n \"\"\"\n pruned = kwargs.pop('pruned', False)\n features = False\n feature_cfg = feature_cfg or {}\n\n # resolve and update model pretrained config and model kwargs\n pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=pretrained_cfg)\n update_pretrained_cfg_and_kwargs(pretrained_cfg, kwargs, kwargs_filter)\n pretrained_cfg.setdefault('architecture', variant)\n\n # Setup for feature extraction wrapper done at end of this fn\n if kwargs.pop('features_only', False):\n features = True\n feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4))\n if 'out_indices' in kwargs:\n feature_cfg['out_indices'] = kwargs.pop('out_indices')\n\n # Build the model\n model = model_cls(**kwargs) if model_cfg is None else model_cls(cfg=model_cfg, **kwargs)\n model.pretrained_cfg = pretrained_cfg\n model.default_cfg = model.pretrained_cfg # alias for backwards compat\n \n if pruned:\n model = adapt_model_from_file(model, variant)\n\n # For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats\n num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000))\n if pretrained:\n if pretrained_custom_load:\n # FIXME improve custom load trigger\n load_custom_pretrained(model, pretrained_cfg=pretrained_cfg)\n else:\n load_pretrained(\n model,\n pretrained_cfg=pretrained_cfg,\n num_classes=num_classes_pretrained,\n in_chans=kwargs.get('in_chans', 3),\n filter_fn=pretrained_filter_fn,\n strict=pretrained_strict)\n\n # Wrap the model in a feature extraction module if enabled\n if features:\n feature_cls = FeatureListNet\n if 'feature_cls' in feature_cfg:\n feature_cls = feature_cfg.pop('feature_cls')\n if isinstance(feature_cls, str):\n feature_cls = feature_cls.lower()\n if 'hook' in feature_cls:\n feature_cls = FeatureHookNet\n elif feature_cls == 'fx':\n feature_cls = FeatureGraphNet\n else:\n assert False, f'Unknown feature class {feature_cls}'\n model = feature_cls(model, **feature_cfg)\n model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) # add back default_cfg\n model.default_cfg = model.pretrained_cfg # alias for backwards compat\n \n return model"
},
{
"identifier": "checkpoint_seq",
"path": "lib/models_timm/helpers.py",
"snippet": "def checkpoint_seq(\n functions,\n x,\n every=1,\n flatten=False,\n skip_last=False,\n preserve_rng_state=True\n):\n r\"\"\"A helper function for checkpointing sequential models.\n\n Sequential models execute a list of modules/functions in order\n (sequentially). Therefore, we can divide such a sequence into segments\n and checkpoint each segment. All segments except run in :func:`torch.no_grad`\n manner, i.e., not storing the intermediate activations. The inputs of each\n checkpointed segment will be saved for re-running the segment in the backward pass.\n\n See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works.\n\n .. warning::\n Checkpointing currently only supports :func:`torch.autograd.backward`\n and only if its `inputs` argument is not passed. :func:`torch.autograd.grad`\n is not supported.\n\n .. warning:\n At least one of the inputs needs to have :code:`requires_grad=True` if\n grads are needed for model inputs, otherwise the checkpointed part of the\n model won't have gradients.\n\n Args:\n functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially.\n x: A Tensor that is input to :attr:`functions`\n every: checkpoint every-n functions (default: 1)\n flatten (bool): flatten nn.Sequential of nn.Sequentials\n skip_last (bool): skip checkpointing the last function in the sequence if True\n preserve_rng_state (bool, optional, default=True): Omit stashing and restoring\n the RNG state during each checkpoint.\n\n Returns:\n Output of running :attr:`functions` sequentially on :attr:`*inputs`\n\n Example:\n >>> model = nn.Sequential(...)\n >>> input_var = checkpoint_seq(model, input_var, every=2)\n \"\"\"\n def run_function(start, end, functions):\n def forward(_x):\n for j in range(start, end + 1):\n _x = functions[j](_x)\n return _x\n return forward\n\n if isinstance(functions, torch.nn.Sequential):\n functions = functions.children()\n if flatten:\n functions = chain.from_iterable(functions)\n if not isinstance(functions, (tuple, list)):\n functions = tuple(functions)\n\n num_checkpointed = len(functions)\n if skip_last:\n num_checkpointed -= 1\n end = -1\n for start in range(0, num_checkpointed, every):\n end = min(start + every - 1, num_checkpointed - 1)\n x = checkpoint(run_function(start, end, functions), x, preserve_rng_state=preserve_rng_state)\n if skip_last:\n return run_function(end + 1, len(functions) - 1, functions)(x)\n return x"
},
{
"identifier": "named_apply",
"path": "lib/models_timm/helpers.py",
"snippet": "def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module:\n if not depth_first and include_root:\n fn(module=module, name=name)\n for child_name, child_module in module.named_children():\n child_name = '.'.join((name, child_name)) if name else child_name\n named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)\n if depth_first and include_root:\n fn(module=module, name=name)\n return module"
},
{
"identifier": "register_notrace_function",
"path": "lib/models_timm/fx_features.py",
"snippet": "def register_notrace_function(func: Callable):\n \"\"\"\n Decorator for functions which ought not to be traced through\n \"\"\"\n _autowrap_functions.add(func)\n return func"
},
{
"identifier": "ClassifierHead",
"path": "lib/models_timm/layers/classifier.py",
"snippet": "class ClassifierHead(nn.Module):\n \"\"\"Classifier head w/ configurable global pooling and dropout.\"\"\"\n\n def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0., use_conv=False):\n super(ClassifierHead, self).__init__()\n self.drop_rate = drop_rate\n self.global_pool, num_pooled_features = _create_pool(in_chs, num_classes, pool_type, use_conv=use_conv)\n self.fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv)\n self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity()\n\n def forward(self, x, pre_logits: bool = False):\n x = self.global_pool(x)\n if self.drop_rate:\n x = F.dropout(x, p=float(self.drop_rate), training=self.training)\n if pre_logits:\n return x.flatten(1)\n else:\n x = self.fc(x)\n return self.flatten(x)"
},
{
"identifier": "DropPath",
"path": "lib/models_timm/layers/drop.py",
"snippet": "class DropPath(nn.Module):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n \"\"\"\n def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n self.scale_by_keep = scale_by_keep\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)\n\n def extra_repr(self):\n return f'drop_prob={round(self.drop_prob,3):0.3f}'"
},
{
"identifier": "Mlp",
"path": "lib/models_timm/layers/mlp.py",
"snippet": "class Mlp(nn.Module):\n \"\"\" MLP as used in Vision Transformer, MLP-Mixer and related networks\n \"\"\"\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n bias = to_2tuple(bias)\n drop_probs = to_2tuple(drop)\n\n self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0])\n self.act = act_layer()\n self.drop1 = nn.Dropout(drop_probs[0])\n self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1])\n self.drop2 = nn.Dropout(drop_probs[1])\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop1(x)\n x = self.fc2(x)\n x = self.drop2(x)\n return x"
},
{
"identifier": "ConvMlp",
"path": "lib/models_timm/layers/mlp.py",
"snippet": "class ConvMlp(nn.Module):\n \"\"\" MLP using 1x1 convs that keeps spatial dims\n \"\"\"\n def __init__(\n self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU,\n norm_layer=None, bias=True, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n bias = to_2tuple(bias)\n\n self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0])\n self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()\n self.act = act_layer()\n self.drop = nn.Dropout(drop)\n self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1])\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.norm(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n return x"
},
{
"identifier": "LayerNorm",
"path": "lib/models_timm/layers/norm.py",
"snippet": "class LayerNorm(nn.LayerNorm):\n \"\"\" LayerNorm w/ fast norm option\n \"\"\"\n def __init__(self, num_channels, eps=1e-6, affine=True):\n super().__init__(num_channels, eps=eps, elementwise_affine=affine)\n self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n if self._fast_norm:\n x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n else:\n x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n return x"
},
{
"identifier": "LayerNorm2d",
"path": "lib/models_timm/layers/norm.py",
"snippet": "class LayerNorm2d(nn.LayerNorm):\n \"\"\" LayerNorm for channels of '2D' spatial NCHW tensors \"\"\"\n def __init__(self, num_channels, eps=1e-6, affine=True):\n super().__init__(num_channels, eps=eps, elementwise_affine=affine)\n self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = x.permute(0, 2, 3, 1)\n if self._fast_norm:\n x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n else:\n x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n x = x.permute(0, 3, 1, 2)\n return x"
},
{
"identifier": "trunc_normal_tf_",
"path": "lib/models_timm/layers/weight_init.py",
"snippet": "def trunc_normal_tf_(tensor, mean=0., std=1., a=-2., b=2.):\n # type: (Tensor, float, float, float, float) -> Tensor\n r\"\"\"Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n\n NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the\n bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0\n and the result is subsquently scaled and shifted by the mean and std args.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n mean: the mean of the normal distribution\n std: the standard deviation of the normal distribution\n a: the minimum cutoff value\n b: the maximum cutoff value\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.trunc_normal_(w)\n \"\"\"\n with torch.no_grad():\n _trunc_normal_(tensor, 0, 1.0, a, b)\n tensor.mul_(std).add_(mean)\n return tensor"
},
{
"identifier": "create_attn",
"path": "lib/models_timm/layers/create_attn.py",
"snippet": "def create_attn(attn_type, channels, **kwargs):\n module_cls = get_attn(attn_type)\n if module_cls is not None:\n # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels\n return module_cls(channels, **kwargs)\n return None"
},
{
"identifier": "create_conv2d",
"path": "lib/models_timm/layers/create_conv2d.py",
"snippet": "def create_conv2d(in_channels, out_channels, kernel_size, **kwargs):\n \"\"\" Select a 2d convolution implementation based on arguments\n Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d.\n\n Used extensively by EfficientNet, MobileNetv3 and related networks.\n \"\"\"\n if isinstance(kernel_size, list):\n assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently\n if 'groups' in kwargs:\n groups = kwargs.pop('groups')\n if groups == in_channels:\n kwargs['depthwise'] = True\n else:\n assert groups == 1\n # We're going to use only lists for defining the MixedConv2d kernel groups,\n # ints, tuples, other iterables will continue to pass to normal conv and specify h, w.\n m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs)\n else:\n depthwise = kwargs.pop('depthwise', False)\n # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0\n groups = in_channels if depthwise else kwargs.pop('groups', 1)\n if 'num_experts' in kwargs and kwargs['num_experts'] > 0:\n m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs)\n else:\n m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs)\n return m"
},
{
"identifier": "get_act_layer",
"path": "lib/models_timm/layers/create_act.py",
"snippet": "def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'):\n \"\"\" Activation Layer Factory\n Fetching activation layers by name with this function allows export or torch script friendly\n functions to be returned dynamically based on current config.\n \"\"\"\n if not name:\n return None\n if not isinstance(name, str):\n # callable, module, etc\n return name\n if not (is_no_jit() or is_exportable() or is_scriptable()):\n if name in _ACT_LAYER_ME:\n return _ACT_LAYER_ME[name]\n if not (is_no_jit() or is_exportable()):\n if name in _ACT_LAYER_JIT:\n return _ACT_LAYER_JIT[name]\n return _ACT_LAYER_DEFAULT[name]"
},
{
"identifier": "get_norm_layer",
"path": "lib/models_timm/layers/create_norm.py",
"snippet": "def get_norm_layer(norm_layer):\n assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial))\n norm_kwargs = {}\n\n # unbind partial fn, so args can be rebound later\n if isinstance(norm_layer, functools.partial):\n norm_kwargs.update(norm_layer.keywords)\n norm_layer = norm_layer.func\n\n if isinstance(norm_layer, str):\n layer_name = norm_layer.replace('_', '')\n norm_layer = _NORM_MAP.get(layer_name, None)\n elif norm_layer in _NORM_TYPES:\n norm_layer = norm_layer\n elif isinstance(norm_layer, types.FunctionType):\n # if function type, assume it is a lambda/fn that creates a norm layer\n norm_layer = norm_layer\n else:\n type_name = norm_layer.__name__.lower().replace('_', '')\n norm_layer = _NORM_MAP.get(type_name, None)\n assert norm_layer is not None, f\"No equivalent norm layer for {type_name}\"\n\n if norm_kwargs:\n norm_layer = functools.partial(norm_layer, **norm_kwargs) # bind/rebind args\n return norm_layer"
},
{
"identifier": "get_norm_act_layer",
"path": "lib/models_timm/layers/create_norm_act.py",
"snippet": "def get_norm_act_layer(norm_layer, act_layer=None):\n assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial))\n assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial))\n norm_act_kwargs = {}\n\n # unbind partial fn, so args can be rebound later\n if isinstance(norm_layer, functools.partial):\n norm_act_kwargs.update(norm_layer.keywords)\n norm_layer = norm_layer.func\n\n if isinstance(norm_layer, str):\n layer_name = norm_layer.replace('_', '').lower().split('-')[0]\n norm_act_layer = _NORM_ACT_MAP.get(layer_name, None)\n elif norm_layer in _NORM_ACT_TYPES:\n norm_act_layer = norm_layer\n elif isinstance(norm_layer, types.FunctionType):\n # if function type, must be a lambda/fn that creates a norm_act layer\n norm_act_layer = norm_layer\n else:\n type_name = norm_layer.__name__.lower()\n if type_name.startswith('batchnorm'):\n norm_act_layer = BatchNormAct2d\n elif type_name.startswith('groupnorm'):\n norm_act_layer = GroupNormAct\n elif type_name.startswith('groupnorm1'):\n norm_act_layer = functools.partial(GroupNormAct, num_groups=1)\n elif type_name.startswith('layernorm2d'):\n norm_act_layer = LayerNormAct2d\n elif type_name.startswith('layernorm'):\n norm_act_layer = LayerNormAct\n else:\n assert False, f\"No equivalent norm_act layer for {type_name}\"\n\n if norm_act_layer in _NORM_ACT_REQUIRES_ARG:\n # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation.\n # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types\n norm_act_kwargs.setdefault('act_layer', act_layer)\n if norm_act_kwargs:\n norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args\n return norm_act_layer"
},
{
"identifier": "to_2tuple",
"path": "lib/models_timm/layers/helpers.py",
"snippet": "def _ntuple(n):\n def parse(x):\ndef make_divisible(v, divisor=8, min_value=None, round_limit=.9):\ndef extend_tuple(x, n):"
},
{
"identifier": "_assert",
"path": "lib/models_timm/layers/trace_utils.py",
"snippet": "def _assert(condition: bool, message: str):\n assert condition, message"
},
{
"identifier": "register_model",
"path": "lib/models_timm/registry.py",
"snippet": "def register_model(fn):\n # lookup containing module\n mod = sys.modules[fn.__module__]\n module_name_split = fn.__module__.split('.')\n module_name = module_name_split[-1] if len(module_name_split) else ''\n\n # add model to __all__ in module\n model_name = fn.__name__\n if hasattr(mod, '__all__'):\n mod.__all__.append(model_name)\n else:\n mod.__all__ = [model_name]\n\n # add entries to registry dict/sets\n _model_entrypoints[model_name] = fn\n _model_to_module[model_name] = module_name\n _module_to_models[module_name].add(model_name)\n has_valid_pretrained = False # check if model has a pretrained url to allow filtering on this\n if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs:\n # this will catch all models that have entrypoint matching cfg key, but miss any aliasing\n # entrypoints or non-matching combos\n cfg = mod.default_cfgs[model_name]\n has_valid_pretrained = (\n ('url' in cfg and 'http' in cfg['url']) or\n ('file' in cfg and cfg['file']) or\n ('hf_hub_id' in cfg and cfg['hf_hub_id'])\n )\n _model_pretrained_cfgs[model_name] = mod.default_cfgs[model_name]\n if has_valid_pretrained:\n _model_has_pretrained.add(model_name)\n return fn"
},
{
"identifier": "RelPosMlp",
"path": "lib/models_timm/vision_transformer_relpos.py",
"snippet": "class RelPosMlp(nn.Module):\n def __init__(\n self,\n window_size,\n num_heads=8,\n hidden_dim=128,\n prefix_tokens=0,\n mode='cr',\n pretrained_window_size=(0, 0)\n ):\n super().__init__()\n self.window_size = window_size\n self.window_area = self.window_size[0] * self.window_size[1]\n self.prefix_tokens = prefix_tokens\n self.num_heads = num_heads\n self.bias_shape = (self.window_area,) * 2 + (num_heads,)\n if mode == 'swin':\n self.bias_act = nn.Sigmoid()\n self.bias_gain = 16\n mlp_bias = (True, False)\n elif mode == 'rw':\n self.bias_act = nn.Tanh()\n self.bias_gain = 4\n mlp_bias = True\n else:\n self.bias_act = nn.Identity()\n self.bias_gain = None\n mlp_bias = True\n\n self.mlp = Mlp(\n 2, # x, y\n hidden_features=hidden_dim,\n out_features=num_heads,\n act_layer=nn.ReLU,\n bias=mlp_bias,\n drop=(0.125, 0.)\n )\n\n self.register_buffer(\n \"relative_position_index\",\n gen_relative_position_index(window_size),\n persistent=False)\n\n # get relative_coords_table\n self.register_buffer(\n \"rel_coords_log\",\n gen_relative_log_coords(window_size, pretrained_window_size, mode=mode),\n persistent=False)\n\n def get_bias(self) -> torch.Tensor:\n relative_position_bias = self.mlp(self.rel_coords_log)\n if self.relative_position_index is not None:\n relative_position_bias = relative_position_bias.view(-1, self.num_heads)[\n self.relative_position_index.view(-1)] # Wh*Ww,Wh*Ww,nH\n relative_position_bias = relative_position_bias.view(self.bias_shape)\n relative_position_bias = relative_position_bias.permute(2, 0, 1)\n relative_position_bias = self.bias_act(relative_position_bias)\n if self.bias_gain is not None:\n relative_position_bias = self.bias_gain * relative_position_bias\n if self.prefix_tokens:\n relative_position_bias = F.pad(relative_position_bias, [self.prefix_tokens, 0, self.prefix_tokens, 0])\n return relative_position_bias.unsqueeze(0).contiguous()\n\n def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None):\n return attn + self.get_bias()"
},
{
"identifier": "RelPosBias",
"path": "lib/models_timm/vision_transformer_relpos.py",
"snippet": "class RelPosBias(nn.Module):\n\n def __init__(self, window_size, num_heads, prefix_tokens=0):\n super().__init__()\n assert prefix_tokens <= 1\n self.window_size = window_size\n self.window_area = window_size[0] * window_size[1]\n self.bias_shape = (self.window_area + prefix_tokens,) * 2 + (num_heads,)\n\n num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 * prefix_tokens\n self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads))\n self.register_buffer(\n \"relative_position_index\",\n gen_relative_position_index(self.window_size, class_token=prefix_tokens > 0),\n persistent=False,\n )\n\n self.init_weights()\n\n def init_weights(self):\n trunc_normal_(self.relative_position_bias_table, std=.02)\n\n def get_bias(self) -> torch.Tensor:\n relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]\n # win_h * win_w, win_h * win_w, num_heads\n relative_position_bias = relative_position_bias.view(self.bias_shape).permute(2, 0, 1)\n return relative_position_bias.unsqueeze(0).contiguous()\n\n def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None):\n return attn + self.get_bias()"
}
] | import math
import torch
from collections import OrderedDict
from dataclasses import dataclass, replace
from functools import partial
from typing import Callable, Optional, Union, Tuple, List
from torch import nn
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from lib.models_timm.helpers import build_model_with_cfg, checkpoint_seq, named_apply
from lib.models_timm.fx_features import register_notrace_function
from lib.models_timm.layers import Mlp, ConvMlp, DropPath, ClassifierHead, trunc_normal_tf_, LayerNorm2d, LayerNorm
from lib.models_timm.layers import create_attn, get_act_layer, get_norm_layer, get_norm_act_layer, create_conv2d
from lib.models_timm.layers import to_2tuple, extend_tuple, make_divisible, _assert
from lib.models_timm.registry import register_model
from lib.models_timm.vision_transformer_relpos import RelPosMlp, RelPosBias # FIXME move these to common location | 9,272 |
def cfg_window_size(cfg: MaxxVitTransformerCfg, img_size: Tuple[int, int]):
if cfg.window_size is not None:
assert cfg.grid_size
return cfg
partition_size = img_size[0] // cfg.partition_ratio, img_size[1] // cfg.partition_ratio
cfg = replace(cfg, window_size=partition_size, grid_size=partition_size)
return cfg
class MaxxVit(nn.Module):
""" CoaTNet + MaxVit base model.
Highly configurable for different block compositions, tensor layouts, pooling types.
"""
def __init__(
self,
cfg: MaxxVitCfg,
img_size: Union[int, Tuple[int, int]] = 224,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
drop_rate: float = 0.,
drop_path_rate: float = 0.
):
super().__init__()
img_size = to_2tuple(img_size)
transformer_cfg = cfg_window_size(cfg.transformer_cfg, img_size)
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = cfg.embed_dim[-1]
self.embed_dim = cfg.embed_dim
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.stem = Stem(
in_chs=in_chans,
out_chs=cfg.stem_width,
act_layer=cfg.conv_cfg.act_layer,
norm_layer=cfg.conv_cfg.norm_layer,
norm_eps=cfg.conv_cfg.norm_eps,
)
stride = self.stem.stride
feat_size = tuple([i // s for i, s in zip(img_size, to_2tuple(stride))])
num_stages = len(cfg.embed_dim)
assert len(cfg.depths) == num_stages
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)]
in_chs = self.stem.out_chs
#final_norm_layer = get_norm_layer(cfg.transformer_cfg.norm_layer)
stages = []
#norms = []
for i in range(num_stages):
stage_stride = 2
out_chs = cfg.embed_dim[i]
feat_size = tuple([(r - 1) // stage_stride + 1 for r in feat_size])
stages += [MaxxVitStage(
in_chs,
out_chs,
depth=cfg.depths[i],
block_types=cfg.block_type[i],
conv_cfg=cfg.conv_cfg,
transformer_cfg=transformer_cfg,
feat_size=feat_size,
drop_path=dpr[i],
)]
#norms.append(final_norm_layer(out_chs, eps=cfg.transformer_cfg.norm_eps))
stride *= stage_stride
in_chs = out_chs
self.stages = nn.Sequential(*stages)
#self.norms = nn.Sequential(*norms)
final_norm_layer = get_norm_layer(cfg.transformer_cfg.norm_layer)
self.norm = final_norm_layer(self.num_features, eps=cfg.transformer_cfg.norm_eps)
# Classifier head
#self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
# Weight init (default PyTorch init works well for AdamW if scheme not set)
assert cfg.weight_init in ('', 'normal', 'trunc_normal', 'xavier_normal', 'vit_eff')
if cfg.weight_init:
named_apply(partial(self._init_weights, scheme=cfg.weight_init), self)
def _init_weights(self, module, name, scheme=''):
if hasattr(module, 'init_weights'):
try:
module.init_weights(scheme=scheme)
except TypeError:
module.init_weights()
@torch.jit.ignore
def no_weight_decay(self):
return {
k for k, _ in self.named_parameters()
if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem', # stem and embed
blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is None:
global_pool = self.head.global_pool.pool_type
| """ MaxVit and CoAtNet Vision Transformer - CNN Hybrids in PyTorch
This is a from-scratch implementation of both CoAtNet and MaxVit in PyTorch.
99% of the implementation was done from papers, however last minute some adjustments were made
based on the (as yet unfinished?) public code release https://github.com/google-research/maxvit
There are multiple sets of models defined for both architectures. Typically, names with a
`_rw` suffix are my own original configs prior to referencing https://github.com/google-research/maxvit.
These configs work well and appear to be a bit faster / lower resource than the paper.
The models without extra prefix / suffix' (coatnet_0_224, maxvit_tiny_224, etc), are intended to
match paper, BUT, without any official pretrained weights it's difficult to confirm a 100% match.
# FIXME / WARNING
This impl remains a WIP, some configs and models may vanish or change...
Papers:
MaxViT: Multi-Axis Vision Transformer - https://arxiv.org/abs/2204.01697
@article{tu2022maxvit,
title={MaxViT: Multi-Axis Vision Transformer},
author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao},
journal={ECCV},
year={2022},
}
CoAtNet: Marrying Convolution and Attention for All Data Sizes - https://arxiv.org/abs/2106.04803
@article{DBLP:journals/corr/abs-2106-04803,
author = {Zihang Dai and Hanxiao Liu and Quoc V. Le and Mingxing Tan},
title = {CoAtNet: Marrying Convolution and Attention for All Data Sizes},
journal = {CoRR},
volume = {abs/2106.04803},
year = {2021}
}
Hacked together by / Copyright 2022, Ross Wightman
"""
__all__ = ['MaxxVitCfg', 'MaxxVitConvCfg', 'MaxxVitTransformerCfg', 'MaxxVit']
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.95, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'stem.conv1', 'classifier': 'head.fc',
'fixed_input_size': True,
**kwargs
}
default_cfgs = {
# Fiddling with configs / defaults / still pretraining
'coatnet_pico_rw_224': _cfg(url=''),
'coatnet_nano_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_nano_rw_224_sw-f53093b4.pth',
crop_pct=0.9),
'coatnet_0_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_0_rw_224_sw-a6439706.pth'),
'coatnet_1_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_1_rw_224_sw-5cae1ea8.pth'
),
'coatnet_2_rw_224': _cfg(url=''),
'coatnet_3_rw_224': _cfg(url=''),
# Highly experimental configs
'coatnet_bn_0_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_bn_0_rw_224_sw-c228e218.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD,
crop_pct=0.95),
'coatnet_rmlp_nano_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_nano_rw_224_sw-bd1d51b3.pth',
crop_pct=0.9),
'coatnet_rmlp_0_rw_224': _cfg(url=''),
'coatnet_rmlp_1_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_1_rw_224_sw-9051e6c3.pth'),
'coatnet_rmlp_2_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_2_rw_224_sw-5ccfac55.pth'),
'coatnet_rmlp_3_rw_224': _cfg(url=''),
'coatnet_nano_cc_224': _cfg(url=''),
'coatnext_nano_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnext_nano_rw_224_ad-22cb71c2.pth',
crop_pct=0.9),
# Trying to be like the CoAtNet paper configs
'coatnet_0_224': _cfg(url=''),
'coatnet_1_224': _cfg(url=''),
'coatnet_2_224': _cfg(url=''),
'coatnet_3_224': _cfg(url=''),
'coatnet_4_224': _cfg(url=''),
'coatnet_5_224': _cfg(url=''),
# Experimental configs
'maxvit_pico_rw_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_nano_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_nano_rw_256_sw-fb127241.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_tiny_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_tiny_rw_224_sw-7d0dffeb.pth'),
'maxvit_tiny_rw_256': _cfg(
url='',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_pico_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_pico_rw_256_sw-8d82f2c6.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_nano_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_nano_rw_256_sw-c17bb0d6.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_tiny_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_tiny_rw_256_sw-bbef0ff5.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_small_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth',
crop_pct=0.9,
),
'maxvit_rmlp_small_rw_256': _cfg(
url='',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_tiny_pm_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvit_rmlp_nano_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_nano_rw_256_sw-0325d459.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvit_rmlp_tiny_rw_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvit_rmlp_small_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
# Trying to be like the MaxViT paper configs
'maxvit_tiny_224': _cfg(url=''),
'maxvit_small_224': _cfg(url=''),
'maxvit_base_224': _cfg(url=''),
'maxvit_large_224': _cfg(url=''),
'maxvit_xlarge_224': _cfg(url=''),
}
@dataclass
class MaxxVitTransformerCfg:
dim_head: int = 32
expand_ratio: float = 4.0
expand_first: bool = True
shortcut_bias: bool = True
attn_bias: bool = True
attn_drop: float = 0.
proj_drop: float = 0.
pool_type: str = 'avg2'
rel_pos_type: str = 'bias'
rel_pos_dim: int = 512 # for relative position types w/ MLP
partition_ratio: int = 32
window_size: Optional[Tuple[int, int]] = None
grid_size: Optional[Tuple[int, int]] = None
init_values: Optional[float] = None
act_layer: str = 'gelu'
norm_layer: str = 'layernorm2d'
norm_layer_cl: str = 'layernorm'
norm_eps: float = 1e-6
def __post_init__(self):
if self.grid_size is not None:
self.grid_size = to_2tuple(self.grid_size)
if self.window_size is not None:
self.window_size = to_2tuple(self.window_size)
if self.grid_size is None:
self.grid_size = self.window_size
@dataclass
class MaxxVitConvCfg:
block_type: str = 'mbconv'
expand_ratio: float = 4.0
expand_output: bool = True # calculate expansion channels from output (vs input chs)
kernel_size: int = 3
group_size: int = 1 # 1 == depthwise
pre_norm_act: bool = False # activation after pre-norm
output_bias: bool = True # bias for shortcut + final 1x1 projection conv
stride_mode: str = 'dw' # stride done via one of 'pool', '1x1', 'dw'
pool_type: str = 'avg2'
downsample_pool_type: str = 'avg2'
attn_early: bool = False # apply attn between conv2 and norm2, instead of after norm2
attn_layer: str = 'se'
attn_act_layer: str = 'silu'
attn_ratio: float = 0.25
init_values: Optional[float] = 1e-6 # for ConvNeXt block, ignored by MBConv
act_layer: str = 'gelu'
norm_layer: str = ''
norm_layer_cl: str = ''
norm_eps: Optional[float] = None
def __post_init__(self):
# mbconv vs convnext blocks have different defaults, set in post_init to avoid explicit config args
assert self.block_type in ('mbconv', 'convnext')
use_mbconv = self.block_type == 'mbconv'
if not self.norm_layer:
self.norm_layer = 'batchnorm2d' if use_mbconv else 'layernorm2d'
if not self.norm_layer_cl and not use_mbconv:
self.norm_layer_cl = 'layernorm'
if self.norm_eps is None:
self.norm_eps = 1e-5 if use_mbconv else 1e-6
self.downsample_pool_type = self.downsample_pool_type or self.pool_type
@dataclass
class MaxxVitCfg:
embed_dim: Tuple[int, ...] = (96, 192, 384, 768)
depths: Tuple[int, ...] = (2, 3, 5, 2)
block_type: Tuple[Union[str, Tuple[str, ...]], ...] = ('C', 'C', 'T', 'T')
stem_width: Union[int, Tuple[int, int]] = 64
stem_bias: bool = True
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg()
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg()
weight_init: str = 'vit_eff'
def _rw_coat_cfg(
stride_mode='pool',
pool_type='avg2',
conv_output_bias=False,
conv_attn_early=False,
conv_attn_act_layer='relu',
conv_norm_layer='',
transformer_shortcut_bias=True,
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
init_values=None,
rel_pos_type='bias',
rel_pos_dim=512,
):
# 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit
# Common differences for initial timm models:
# - pre-norm layer in MZBConv included an activation after norm
# - mbconv expansion calculated from input instead of output chs
# - mbconv shortcut and final 1x1 conv did not have a bias
# - SE act layer was relu, not silu
# - mbconv uses silu in timm, not gelu
# - expansion in attention block done via output proj, not input proj
# Variable differences (evolved over training initial models):
# - avg pool with kernel_size=2 favoured downsampling (instead of maxpool for coat)
# - SE attention was between conv2 and norm/act
# - default to avg pool for mbconv downsample instead of 1x1 or dw conv
# - transformer block shortcut has no bias
return dict(
conv_cfg=MaxxVitConvCfg(
stride_mode=stride_mode,
pool_type=pool_type,
pre_norm_act=True,
expand_output=False,
output_bias=conv_output_bias,
attn_early=conv_attn_early,
attn_act_layer=conv_attn_act_layer,
act_layer='silu',
norm_layer=conv_norm_layer,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
shortcut_bias=transformer_shortcut_bias,
pool_type=pool_type,
init_values=init_values,
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
def _rw_max_cfg(
stride_mode='dw',
pool_type='avg2',
conv_output_bias=False,
conv_attn_ratio=1 / 16,
conv_norm_layer='',
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
window_size=None,
dim_head=32,
init_values=None,
rel_pos_type='bias',
rel_pos_dim=512,
):
# 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit
# Differences of initial timm models:
# - mbconv expansion calculated from input instead of output chs
# - mbconv shortcut and final 1x1 conv did not have a bias
# - mbconv uses silu in timm, not gelu
# - expansion in attention block done via output proj, not input proj
return dict(
conv_cfg=MaxxVitConvCfg(
stride_mode=stride_mode,
pool_type=pool_type,
expand_output=False,
output_bias=conv_output_bias,
attn_ratio=conv_attn_ratio,
act_layer='silu',
norm_layer=conv_norm_layer,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
pool_type=pool_type,
dim_head=dim_head,
window_size=window_size,
init_values=init_values,
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
def _next_cfg(
stride_mode='dw',
pool_type='avg2',
conv_norm_layer='layernorm2d',
conv_norm_layer_cl='layernorm',
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
window_size=None,
init_values=1e-6,
rel_pos_type='mlp', # MLP by default for maxxvit
rel_pos_dim=512,
):
# For experimental models with convnext instead of mbconv
init_values = to_2tuple(init_values)
return dict(
conv_cfg=MaxxVitConvCfg(
block_type='convnext',
stride_mode=stride_mode,
pool_type=pool_type,
expand_output=False,
init_values=init_values[0],
norm_layer=conv_norm_layer,
norm_layer_cl=conv_norm_layer_cl,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
pool_type=pool_type,
window_size=window_size,
init_values=init_values[1],
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
model_cfgs = dict(
# Fiddling with configs / defaults / still pretraining
coatnet_pico_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 3, 5, 2),
stem_width=(32, 64),
**_rw_max_cfg( # using newer max defaults here
conv_output_bias=True,
conv_attn_ratio=0.25,
),
),
coatnet_nano_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
**_rw_max_cfg( # using newer max defaults here
stride_mode='pool',
conv_output_bias=True,
conv_attn_ratio=0.25,
),
),
coatnet_0_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
conv_attn_early=True,
transformer_shortcut_bias=False,
),
),
coatnet_1_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_early=True,
transformer_shortcut_bias=False,
)
),
coatnet_2_rw_224=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=(64, 128),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
),
),
coatnet_3_rw_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=(96, 192),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
),
),
# Highly experimental configs
coatnet_bn_0_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_early=True,
transformer_shortcut_bias=False,
transformer_norm_layer='batchnorm2d',
)
),
coatnet_rmlp_nano_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
**_rw_max_cfg(
conv_output_bias=True,
conv_attn_ratio=0.25,
rel_pos_type='mlp',
rel_pos_dim=384,
),
),
coatnet_rmlp_0_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
rel_pos_type='mlp',
),
),
coatnet_rmlp_1_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=(32, 64),
**_rw_coat_cfg(
pool_type='max',
conv_attn_early=True,
transformer_shortcut_bias=False,
rel_pos_type='mlp',
rel_pos_dim=384, # was supposed to be 512, woops
),
),
coatnet_rmlp_2_rw_224=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=(64, 128),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
rel_pos_type='mlp'
),
),
coatnet_rmlp_3_rw_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=(96, 192),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
rel_pos_type='mlp'
),
),
coatnet_nano_cc_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
block_type=('C', 'C', ('C', 'T'), ('C', 'T')),
**_rw_coat_cfg(),
),
coatnext_nano_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
weight_init='normal',
**_next_cfg(
rel_pos_type='bias',
init_values=(1e-5, None)
),
),
# Trying to be like the CoAtNet paper configs
coatnet_0_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 5, 2),
stem_width=64,
),
coatnet_1_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=64,
),
coatnet_2_224=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=128,
),
coatnet_3_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=192,
),
coatnet_4_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 12, 28, 2),
stem_width=192,
),
coatnet_5_224=MaxxVitCfg(
embed_dim=(256, 512, 1280, 2048),
depths=(2, 12, 28, 2),
stem_width=192,
),
# Experimental MaxVit configs
maxvit_pico_rw_256=MaxxVitCfg(
embed_dim=(32, 64, 128, 256),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(24, 32),
**_rw_max_cfg(),
),
maxvit_nano_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_tiny_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_tiny_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_rmlp_pico_rw_256=MaxxVitCfg(
embed_dim=(32, 64, 128, 256),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(24, 32),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_nano_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_tiny_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_small_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(
rel_pos_type='mlp',
init_values=1e-6,
),
),
maxvit_rmlp_small_rw_256=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(
rel_pos_type='mlp',
init_values=1e-6,
),
),
maxvit_tiny_pm_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('PM',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxxvit_rmlp_nano_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
weight_init='normal',
**_next_cfg(),
),
maxxvit_rmlp_tiny_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_next_cfg(),
),
maxxvit_rmlp_small_rw_256=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(48, 96),
**_next_cfg(),
),
# Trying to be like the MaxViT paper configs
maxvit_tiny_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=64,
),
maxvit_small_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=64,
),
maxvit_base_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=64,
),
maxvit_large_224=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=128,
),
maxvit_xlarge_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=192,
),
)
class Attention2d(nn.Module):
""" multi-head attention for 2D NCHW tensors"""
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
dim_head: int = 32,
bias: bool = True,
expand_first: bool = True,
rel_pos_cls: Callable = None,
attn_drop: float = 0.,
proj_drop: float = 0.
):
super().__init__()
dim_out = dim_out or dim
dim_attn = dim_out if expand_first else dim
self.num_heads = dim_attn // dim_head
self.dim_head = dim_head
self.scale = dim_head ** -0.5
self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias)
self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
B, C, H, W = x.shape
q, k, v = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2)
attn = (q.transpose(-2, -1) @ k) * self.scale
if self.rel_pos is not None:
attn = self.rel_pos(attn)
elif shared_rel_pos is not None:
attn = attn + shared_rel_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W)
x = self.proj(x)
x = self.proj_drop(x)
return x
class AttentionCl(nn.Module):
""" Channels-last multi-head attention (B, ..., C) """
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
dim_head: int = 32,
bias: bool = True,
expand_first: bool = True,
rel_pos_cls: Callable = None,
attn_drop: float = 0.,
proj_drop: float = 0.
):
super().__init__()
dim_out = dim_out or dim
dim_attn = dim_out if expand_first and dim_out > dim else dim
assert dim_attn % dim_head == 0, 'attn dim should be divisible by head_dim'
self.num_heads = dim_attn // dim_head
self.dim_head = dim_head
self.scale = dim_head ** -0.5
self.qkv = nn.Linear(dim, dim_attn * 3, bias=bias)
self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim_attn, dim_out, bias=bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
B = x.shape[0]
restore_shape = x.shape[:-1]
q, k, v = self.qkv(x).view(B, -1, self.num_heads, self.dim_head * 3).transpose(1, 2).chunk(3, dim=3)
attn = (q @ k.transpose(-2, -1)) * self.scale
if self.rel_pos is not None:
attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos)
elif shared_rel_pos is not None:
attn = attn + shared_rel_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(restore_shape + (-1,))
x = self.proj(x)
x = self.proj_drop(x)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma
return x.mul_(gamma) if self.inplace else x * gamma
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma.view(1, -1, 1, 1)
return x.mul_(gamma) if self.inplace else x * gamma
class Downsample2d(nn.Module):
""" A downsample pooling module supporting several maxpool and avgpool modes
* 'max' - MaxPool2d w/ kernel_size 3, stride 2, padding 1
* 'max2' - MaxPool2d w/ kernel_size = stride = 2
* 'avg' - AvgPool2d w/ kernel_size 3, stride 2, padding 1
* 'avg2' - AvgPool2d w/ kernel_size = stride = 2
"""
def __init__(
self,
dim: int,
dim_out: int,
pool_type: str = 'avg2',
bias: bool = True,
):
super().__init__()
assert pool_type in ('max', 'max2', 'avg', 'avg2')
if pool_type == 'max':
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
elif pool_type == 'max2':
self.pool = nn.MaxPool2d(2) # kernel_size == stride == 2
elif pool_type == 'avg':
self.pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1, count_include_pad=False)
else:
self.pool = nn.AvgPool2d(2) # kernel_size == stride == 2
if dim != dim_out:
self.expand = nn.Conv2d(dim, dim_out, 1, bias=bias)
else:
self.expand = nn.Identity()
def forward(self, x):
x = self.pool(x) # spatial downsample
x = self.expand(x) # expand chs
return x
def _init_transformer(module, name, scheme=''):
if isinstance(module, (nn.Conv2d, nn.Linear)):
if scheme == 'normal':
nn.init.normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'trunc_normal':
trunc_normal_tf_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'xavier_normal':
nn.init.xavier_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
# vit like
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
class TransformerBlock2d(nn.Module):
""" Transformer block with 2D downsampling
'2D' NCHW tensor layout
Some gains can be seen on GPU using a 1D / CL block, BUT w/ the need to switch back/forth to NCHW
for spatial pooling, the benefit is minimal so ended up using just this variant for CoAt configs.
This impl was faster on TPU w/ PT XLA than the 1D experiment.
"""
def __init__(
self,
dim: int,
dim_out: int,
stride: int = 1,
rel_pos_cls: Callable = None,
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps)
act_layer = get_act_layer(cfg.act_layer)
if stride == 2:
self.shortcut = Downsample2d(dim, dim_out, pool_type=cfg.pool_type, bias=cfg.shortcut_bias)
self.norm1 = nn.Sequential(OrderedDict([
('norm', norm_layer(dim)),
('down', Downsample2d(dim, dim, pool_type=cfg.pool_type)),
]))
else:
assert dim == dim_out
self.shortcut = nn.Identity()
self.norm1 = norm_layer(dim)
self.attn = Attention2d(
dim,
dim_out,
dim_head=cfg.dim_head,
expand_first=cfg.expand_first,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop
)
self.ls1 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim_out)
self.mlp = ConvMlp(
in_features=dim_out,
hidden_features=int(dim_out * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
x = self.shortcut(x) + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos)))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
def _init_conv(module, name, scheme=''):
if isinstance(module, nn.Conv2d):
if scheme == 'normal':
nn.init.normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'trunc_normal':
trunc_normal_tf_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'xavier_normal':
nn.init.xavier_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
# efficientnet like
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
nn.init.normal_(module.weight, 0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
nn.init.zeros_(module.bias)
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
class MbConvBlock(nn.Module):
""" Pre-Norm Conv Block - 1x1 - kxk - 1x1, w/ inverted bottleneck (expand)
"""
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
drop_path: float = 0.
):
super(MbConvBlock, self).__init__()
norm_act_layer = partial(get_norm_act_layer(cfg.norm_layer, cfg.act_layer), eps=cfg.norm_eps)
mid_chs = make_divisible((out_chs if cfg.expand_output else in_chs) * cfg.expand_ratio)
groups = num_groups(cfg.group_size, mid_chs)
if stride == 2:
self.shortcut = Downsample2d(in_chs, out_chs, pool_type=cfg.pool_type, bias=cfg.output_bias)
else:
self.shortcut = nn.Identity()
assert cfg.stride_mode in ('pool', '1x1', 'dw')
stride_pool, stride_1, stride_2 = 1, 1, 1
if cfg.stride_mode == 'pool':
# NOTE this is not described in paper, experiment to find faster option that doesn't stride in 1x1
stride_pool, dilation_2 = stride, dilation[1]
# FIXME handle dilation of avg pool
elif cfg.stride_mode == '1x1':
# NOTE I don't like this option described in paper, 1x1 w/ stride throws info away
stride_1, dilation_2 = stride, dilation[1]
else:
stride_2, dilation_2 = stride, dilation[0]
self.pre_norm = norm_act_layer(in_chs, apply_act=cfg.pre_norm_act)
if stride_pool > 1:
self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type)
else:
self.down = nn.Identity()
self.conv1_1x1 = create_conv2d(in_chs, mid_chs, 1, stride=stride_1)
self.norm1 = norm_act_layer(mid_chs)
self.conv2_kxk = create_conv2d(
mid_chs, mid_chs, cfg.kernel_size, stride=stride_2, dilation=dilation_2, groups=groups)
attn_kwargs = {}
if isinstance(cfg.attn_layer, str):
if cfg.attn_layer == 'se' or cfg.attn_layer == 'eca':
attn_kwargs['act_layer'] = cfg.attn_act_layer
attn_kwargs['rd_channels'] = int(cfg.attn_ratio * (out_chs if cfg.expand_output else mid_chs))
# two different orderings for SE and norm2 (due to some weights and trials using SE before norm2)
if cfg.attn_early:
self.se_early = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs)
self.norm2 = norm_act_layer(mid_chs)
self.se = None
else:
self.se_early = None
self.norm2 = norm_act_layer(mid_chs)
self.se = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs)
self.conv3_1x1 = create_conv2d(mid_chs, out_chs, 1, bias=cfg.output_bias)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def init_weights(self, scheme=''):
named_apply(partial(_init_conv, scheme=scheme), self)
def forward(self, x):
shortcut = self.shortcut(x)
x = self.pre_norm(x)
x = self.down(x)
# 1x1 expansion conv & norm-act
x = self.conv1_1x1(x)
x = self.norm1(x)
# depthwise / grouped 3x3 conv w/ SE (or other) channel attention & norm-act
x = self.conv2_kxk(x)
if self.se_early is not None:
x = self.se_early(x)
x = self.norm2(x)
if self.se is not None:
x = self.se(x)
# 1x1 linear projection to output width
x = self.conv3_1x1(x)
x = self.drop_path(x) + shortcut
return x
class ConvNeXtBlock(nn.Module):
""" ConvNeXt Block
"""
def __init__(
self,
in_chs: int,
out_chs: Optional[int] = None,
kernel_size: int = 7,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
conv_mlp: bool = True,
drop_path: float = 0.
):
super().__init__()
out_chs = out_chs or in_chs
act_layer = get_act_layer(cfg.act_layer)
if conv_mlp:
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps)
mlp_layer = ConvMlp
else:
assert 'layernorm' in cfg.norm_layer
norm_layer = LayerNorm
mlp_layer = Mlp
self.use_conv_mlp = conv_mlp
if stride == 2:
self.shortcut = Downsample2d(in_chs, out_chs)
elif in_chs != out_chs:
self.shortcut = nn.Conv2d(in_chs, out_chs, kernel_size=1, bias=cfg.output_bias)
else:
self.shortcut = nn.Identity()
assert cfg.stride_mode in ('pool', 'dw')
stride_pool, stride_dw = 1, 1
# FIXME handle dilation?
if cfg.stride_mode == 'pool':
stride_pool = stride
else:
stride_dw = stride
if stride_pool == 2:
self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type)
else:
self.down = nn.Identity()
self.conv_dw = create_conv2d(
in_chs, out_chs, kernel_size=kernel_size, stride=stride_dw, dilation=dilation[1],
depthwise=True, bias=cfg.output_bias)
self.norm = norm_layer(out_chs)
self.mlp = mlp_layer(out_chs, int(cfg.expand_ratio * out_chs), bias=cfg.output_bias, act_layer=act_layer)
if conv_mlp:
self.ls = LayerScale2d(out_chs, cfg.init_values) if cfg.init_values else nn.Identity()
else:
self.ls = LayerScale(out_chs, cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.down(x)
x = self.conv_dw(x)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
x = self.ls(x)
else:
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.mlp(x)
x = self.ls(x)
x = x.permute(0, 3, 1, 2)
x = self.drop_path(x) + shortcut
return x
def window_partition(x, window_size: List[int]):
B, H, W, C = x.shape
_assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})')
_assert(W % window_size[1] == 0, '')
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows, window_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
def grid_partition(x, grid_size: List[int]):
B, H, W, C = x.shape
_assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}')
_assert(W % grid_size[1] == 0, '')
x = x.view(B, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1], C)
windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, grid_size[0], grid_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def grid_reverse(windows, grid_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // grid_size[0], W // grid_size[1], grid_size[0], grid_size[1], C)
x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, H, W, C)
return x
def get_rel_pos_cls(cfg: MaxxVitTransformerCfg, window_size):
rel_pos_cls = None
if cfg.rel_pos_type == 'mlp':
rel_pos_cls = partial(RelPosMlp, window_size=window_size, hidden_dim=cfg.rel_pos_dim)
elif cfg.rel_pos_type == 'bias':
rel_pos_cls = partial(RelPosBias, window_size=window_size)
return rel_pos_cls
class PartitionAttentionCl(nn.Module):
""" Grid or Block partition + Attn + FFN.
NxC 'channels last' tensor layout.
"""
def __init__(
self,
dim: int,
partition_type: str = 'block',
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
self.partition_block = partition_type == 'block'
self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn = AttentionCl(
dim,
dim,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[1:3]
if self.partition_block:
partitioned = window_partition(x, self.partition_size)
else:
partitioned = grid_partition(x, self.partition_size)
partitioned = self.attn(partitioned)
if self.partition_block:
x = window_reverse(partitioned, self.partition_size, img_size)
else:
x = grid_reverse(partitioned, self.partition_size, img_size)
return x
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class ParallelPartitionAttention(nn.Module):
""" Experimental. Grid and Block partition + single FFN
NxC tensor layout.
"""
def __init__(
self,
dim: int,
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
assert dim % 2 == 0
norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
assert cfg.window_size == cfg.grid_size
self.partition_size = to_2tuple(cfg.window_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn_block = AttentionCl(
dim,
dim // 2,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.attn_grid = AttentionCl(
dim,
dim // 2,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
out_features=dim,
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[1:3]
partitioned_block = window_partition(x, self.partition_size)
partitioned_block = self.attn_block(partitioned_block)
x_window = window_reverse(partitioned_block, self.partition_size, img_size)
partitioned_grid = grid_partition(x, self.partition_size)
partitioned_grid = self.attn_grid(partitioned_grid)
x_grid = grid_reverse(partitioned_grid, self.partition_size, img_size)
return torch.cat([x_window, x_grid], dim=-1)
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
def window_partition_nchw(x, window_size: List[int]):
B, C, H, W = x.shape
_assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})')
_assert(W % window_size[1] == 0, '')
x = x.view(B, C, H // window_size[0], window_size[0], W // window_size[1], window_size[1])
windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, C, window_size[0], window_size[1])
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse_nchw(windows, window_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[1]
x = windows.view(-1, H // window_size[0], W // window_size[1], C, window_size[0], window_size[1])
x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, C, H, W)
return x
def grid_partition_nchw(x, grid_size: List[int]):
B, C, H, W = x.shape
_assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}')
_assert(W % grid_size[1] == 0, '')
x = x.view(B, C, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1])
windows = x.permute(0, 3, 5, 1, 2, 4).contiguous().view(-1, C, grid_size[0], grid_size[1])
return windows
@register_notrace_function # reason: int argument is a Proxy
def grid_reverse_nchw(windows, grid_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[1]
x = windows.view(-1, H // grid_size[0], W // grid_size[1], C, grid_size[0], grid_size[1])
x = x.permute(0, 3, 4, 1, 5, 2).contiguous().view(-1, C, H, W)
return x
class PartitionAttention2d(nn.Module):
""" Grid or Block partition + Attn + FFN
'2D' NCHW tensor layout.
"""
def __init__(
self,
dim: int,
partition_type: str = 'block',
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
self.partition_block = partition_type == 'block'
self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn = Attention2d(
dim,
dim,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = ConvMlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[-2:]
if self.partition_block:
partitioned = window_partition_nchw(x, self.partition_size)
else:
partitioned = grid_partition_nchw(x, self.partition_size)
partitioned = self.attn(partitioned)
if self.partition_block:
x = window_reverse_nchw(partitioned, self.partition_size, img_size)
else:
x = grid_reverse_nchw(partitioned, self.partition_size, img_size)
return x
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class MaxxVitBlock(nn.Module):
""" MaxVit conv, window partition + FFN , grid partition + FFN
"""
def __init__(
self,
dim: int,
dim_out: int,
stride: int = 1,
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
use_nchw_attn: bool = False, # FIXME move to cfg? True is ~20-30% faster on TPU, 5-10% slower on GPU
drop_path: float = 0.,
):
super().__init__()
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)
attn_kwargs = dict(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path)
partition_layer = PartitionAttention2d if use_nchw_attn else PartitionAttentionCl
self.nchw_attn = use_nchw_attn
self.attn_block = partition_layer(**attn_kwargs)
self.attn_grid = partition_layer(partition_type='grid', **attn_kwargs)
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self.attn_block)
named_apply(partial(_init_transformer, scheme=scheme), self.attn_grid)
named_apply(partial(_init_conv, scheme=scheme), self.conv)
def forward(self, x):
# NCHW format
x = self.conv(x)
if not self.nchw_attn:
x = x.permute(0, 2, 3, 1) # to NHWC (channels-last)
x = self.attn_block(x)
x = self.attn_grid(x)
if not self.nchw_attn:
x = x.permute(0, 3, 1, 2) # back to NCHW
return x
class ParallelMaxxVitBlock(nn.Module):
""" MaxVit block with parallel cat(window + grid), one FF
Experimental timm block.
"""
def __init__(
self,
dim,
dim_out,
stride=1,
num_conv=2,
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path=0.,
):
super().__init__()
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
if num_conv > 1:
convs = [conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)]
convs += [conv_cls(dim_out, dim_out, cfg=conv_cfg, drop_path=drop_path)] * (num_conv - 1)
self.conv = nn.Sequential(*convs)
else:
self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)
self.attn = ParallelPartitionAttention(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path)
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self.attn)
named_apply(partial(_init_conv, scheme=scheme), self.conv)
def forward(self, x):
x = self.conv(x)
x = x.permute(0, 2, 3, 1)
x = self.attn(x)
x = x.permute(0, 3, 1, 2)
return x
class MaxxVitStage(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 2,
depth: int = 4,
feat_size: Tuple[int, int] = (14, 14),
block_types: Union[str, Tuple[str]] = 'C',
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
drop_path: Union[float, List[float]] = 0.,
):
super().__init__()
self.grad_checkpointing = False
block_types = extend_tuple(block_types, depth)
blocks = []
for i, t in enumerate(block_types):
block_stride = stride if i == 0 else 1
assert t in ('C', 'T', 'M', 'PM')
if t == 'C':
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
blocks += [conv_cls(
in_chs,
out_chs,
stride=block_stride,
cfg=conv_cfg,
drop_path=drop_path[i],
)]
elif t == 'T':
rel_pos_cls = get_rel_pos_cls(transformer_cfg, feat_size)
blocks += [TransformerBlock2d(
in_chs,
out_chs,
stride=block_stride,
rel_pos_cls=rel_pos_cls,
cfg=transformer_cfg,
drop_path=drop_path[i],
)]
elif t == 'M':
blocks += [MaxxVitBlock(
in_chs,
out_chs,
stride=block_stride,
conv_cfg=conv_cfg,
transformer_cfg=transformer_cfg,
drop_path=drop_path[i],
)]
elif t == 'PM':
blocks += [ParallelMaxxVitBlock(
in_chs,
out_chs,
stride=block_stride,
conv_cfg=conv_cfg,
transformer_cfg=transformer_cfg,
drop_path=drop_path[i],
)]
in_chs = out_chs
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class Stem(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
act_layer: str = 'gelu',
norm_layer: str = 'batchnorm2d',
norm_eps: float = 1e-5,
):
super().__init__()
if not isinstance(out_chs, (list, tuple)):
out_chs = to_2tuple(out_chs)
norm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps)
self.out_chs = out_chs[-1]
self.stride = 2
self.conv1 = create_conv2d(in_chs, out_chs[0], kernel_size, stride=2)
self.norm1 = norm_act_layer(out_chs[0])
self.conv2 = create_conv2d(out_chs[0], out_chs[1], kernel_size, stride=1)
def init_weights(self, scheme=''):
named_apply(partial(_init_conv, scheme=scheme), self)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.conv2(x)
return x
def cfg_window_size(cfg: MaxxVitTransformerCfg, img_size: Tuple[int, int]):
if cfg.window_size is not None:
assert cfg.grid_size
return cfg
partition_size = img_size[0] // cfg.partition_ratio, img_size[1] // cfg.partition_ratio
cfg = replace(cfg, window_size=partition_size, grid_size=partition_size)
return cfg
class MaxxVit(nn.Module):
""" CoaTNet + MaxVit base model.
Highly configurable for different block compositions, tensor layouts, pooling types.
"""
def __init__(
self,
cfg: MaxxVitCfg,
img_size: Union[int, Tuple[int, int]] = 224,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
drop_rate: float = 0.,
drop_path_rate: float = 0.
):
super().__init__()
img_size = to_2tuple(img_size)
transformer_cfg = cfg_window_size(cfg.transformer_cfg, img_size)
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = cfg.embed_dim[-1]
self.embed_dim = cfg.embed_dim
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.stem = Stem(
in_chs=in_chans,
out_chs=cfg.stem_width,
act_layer=cfg.conv_cfg.act_layer,
norm_layer=cfg.conv_cfg.norm_layer,
norm_eps=cfg.conv_cfg.norm_eps,
)
stride = self.stem.stride
feat_size = tuple([i // s for i, s in zip(img_size, to_2tuple(stride))])
num_stages = len(cfg.embed_dim)
assert len(cfg.depths) == num_stages
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)]
in_chs = self.stem.out_chs
#final_norm_layer = get_norm_layer(cfg.transformer_cfg.norm_layer)
stages = []
#norms = []
for i in range(num_stages):
stage_stride = 2
out_chs = cfg.embed_dim[i]
feat_size = tuple([(r - 1) // stage_stride + 1 for r in feat_size])
stages += [MaxxVitStage(
in_chs,
out_chs,
depth=cfg.depths[i],
block_types=cfg.block_type[i],
conv_cfg=cfg.conv_cfg,
transformer_cfg=transformer_cfg,
feat_size=feat_size,
drop_path=dpr[i],
)]
#norms.append(final_norm_layer(out_chs, eps=cfg.transformer_cfg.norm_eps))
stride *= stage_stride
in_chs = out_chs
self.stages = nn.Sequential(*stages)
#self.norms = nn.Sequential(*norms)
final_norm_layer = get_norm_layer(cfg.transformer_cfg.norm_layer)
self.norm = final_norm_layer(self.num_features, eps=cfg.transformer_cfg.norm_eps)
# Classifier head
#self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
# Weight init (default PyTorch init works well for AdamW if scheme not set)
assert cfg.weight_init in ('', 'normal', 'trunc_normal', 'xavier_normal', 'vit_eff')
if cfg.weight_init:
named_apply(partial(self._init_weights, scheme=cfg.weight_init), self)
def _init_weights(self, module, name, scheme=''):
if hasattr(module, 'init_weights'):
try:
module.init_weights(scheme=scheme)
except TypeError:
module.init_weights()
@torch.jit.ignore
def no_weight_decay(self):
return {
k for k, _ in self.named_parameters()
if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem', # stem and embed
blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is None:
global_pool = self.head.global_pool.pool_type | self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) | 4 | 2023-10-24 17:49:10+00:00 | 12k |
StackTipsLab/bloggy | bloggy/urls.py | [
{
"identifier": "settings",
"path": "bloggy/settings.py",
"snippet": "BASE_DIR = Path(__file__).resolve().parent.parent\nSECRET_KEY = os.getenv(\"SECRET_KEY\", get_random_secret_key())\nDEBUG = os.getenv(\"DEBUG\", \"False\") == \"True\"\nALLOWED_HOSTS = os.getenv(\"ALLOWED_HOSTS\", \"127.0.0.1, localhost\").split(\",\")\nINTERNAL_IPS = ['127.0.0.1']\nSITE_URL = os.getenv(\"SITE_URL\")\nINSTALLED_APPS = [\n 'bloggy',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n # sitemap\n 'django.contrib.sitemaps',\n\n # 'tinymce',\n 'widget_tweaks',\n 'django_summernote',\n 'whitenoise.runserver_nostatic',\n\n 'rest_framework',\n 'bloggy_api',\n 'mail_templated', # Used for templated email https://github.com/artemrizhov/django-mail-templated\n 'storages',\n 'debug_toolbar', # dev only\n\n 'hitcount',\n 'colorfield'\n]\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.gzip.GZipMiddleware',\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'bloggy.middleware.slash_middleware.AppendOrRemoveSlashMiddleware', # Remove slash from url\n\n # Cache\n 'django.middleware.cache.UpdateCacheMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.cache.FetchFromCacheMiddleware',\n # Cache\n\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n # Social login\n # 'social_django.middleware.SocialAuthExceptionMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'bloggy.middleware.redirect.RedirectMiddleware', # new articles mismatch url redirect\n]\nROOT_URLCONF = 'bloggy.urls'\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': os.path.join(BASE_DIR, '/bloggy/templates'),\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'bloggy.context_processors.seo_attrs',\n 'bloggy.context_processors.app_settings',\n\n # Social login\n # 'social_django.context_processors.backends',\n # 'social_django.context_processors.login_redirect',\n ],\n },\n },\n]\nWSGI_APPLICATION = 'bloggy.wsgi.application'\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': os.getenv('DB_NAME'),\n 'USER': os.getenv('DB_USER'),\n 'PASSWORD': os.getenv('DB_PASSWORD'),\n 'HOST': os.getenv('DB_HOST'),\n 'PORT': os.getenv('DB_PORT'),\n 'OPTIONS': {'charset': 'utf8mb4', 'use_unicode': True},\n }\n}\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nDEFAULT_AUTO_FIELD = 'django.db.models.AutoField'\nSTATIC_URL = '/static/'\nUSE_SPACES = os.getenv('USE_SPACES') == 'True'\nAWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')\nAWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME')\nAWS_S3_ENDPOINT_URL = f'https://{os.getenv(\"AWS_S3_ENDPOINT_URL\")}'\n AWS_DEFAULT_ACL = 'public-read'\n AWS_QUERYSTRING_AUTH = False\n AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age=86400'}\n AWS_LOCATION = 'static'\n STATIC_URL = f'{os.getenv(\"ASSETS_DOMAIN\")}/static/'\n STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\n MEDIA_URL = '/media/'\n DEFAULT_FILE_STORAGE = 'bloggy.storage_backends.PublicMediaStorage'\n PRIVATE_MEDIA_LOCATION = 'private'\n PRIVATE_FILE_STORAGE = 'bloggy.storage_backends.PrivateMediaStorage'\n AWS_S3_CUSTOM_DOMAIN = 'media.stacktips.com'\n STATIC_URL = '/static/'\n STATIC_ROOT = os.path.join(BASE_DIR, 'bloggy/static')\n MEDIA_URL = '/media/'\n MEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nTINYMCE_DEFAULT_CONFIG = {\n 'plugins': 'code',\n 'toolbar': 'code',\n}\nLOGIN_URL = 'login'\nLOGOUT_URL = 'logout'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = '/'\nAUTH_USER_MODEL = 'bloggy.User'\nAUTH_USER_DEFAULT_GROUP = 'bloggy-members'\nSUMMERNOTE_THEME = 'bs4'\nSUMMERNOTE_CONFIG = {\n 'iframe': True,\n 'summernote': {\n 'width': '1000',\n 'height': '720',\n 'styleTags': [\n 'p',\n {\n 'title': 'Blockquote',\n 'tag': 'blockquote',\n 'className': 'blockquote',\n 'value': 'blockquote'\n },\n {\n 'title': 'Code Block',\n 'tag': 'pre',\n 'className': 'prettyprint lang-java',\n 'value': 'pre'\n },\n 'h1', 'h2', 'h3', 'h4', 'h5', 'h6'\n ],\n\n 'airMode': False,\n 'toolbar': [\n ['style', ['style']],\n ['font', ['bold', 'underline', 'clear']],\n ['color', ['color']],\n ['para', ['ul', 'ol', 'paragraph']],\n ['table', ['table']],\n ['insert', ['link', 'picture', 'code']],\n ['view', ['fullscreen', 'codeview', 'help']],\n ],\n },\n\n 'codemirror': {\n 'mode': 'htmlmixed',\n 'lineNumbers': 'true',\n 'theme': 'monokai',\n },\n\n 'css': (\n '//cdnjs.cloudflare.com/ajax/libs/codemirror/5.29.0/theme/monokai.min.css',\n ),\n 'attachment_require_authentication': True,\n 'attachment_upload_to': 'uploads/summernote',\n 'attachment_model': 'bloggy.Media',\n 'attachment_absolute_uri': False\n\n}\nMESSAGE_STORAGE = \"django.contrib.messages.storage.cookie.CookieStorage\"\nSITE_TITLE = os.getenv(\"SITE_TITLE\", \"Bloggy\")\nSITE_TAGLINE = os.getenv(\"SITE_TAGLINE\", \"A perfectly crafted blog that developers love.\")\nSITE_DESCRIPTION = os.getenv(\"SITE_DESCRIPTION\")\nSITE_LOGO = os.getenv(\"SITE_LOGO\")\nASSETS_DOMAIN = os.getenv(\"ASSETS_DOMAIN\")\nGOOGLE_RECAPTHCA_SECRET_KEY = os.getenv('GOOGLE_RECAPTHCA_SECRET_KEY')\nGOOGLE_RECAPTHCA_TOKEN_VERIFY_URL = 'https://www.google.com/recaptcha/api/siteverify'\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n ),\n 'DEFAULT_PAGINATION_CLASS': 'bloggy_api.pagination.CustomPaginatedResponse',\n 'PAGE_SIZE': 30,\n\n 'EXCEPTION_HANDLER': 'rest_framework.views.exception_handler',\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.SessionAuthentication'\n ]\n}\nCACHE_TTL = 60 * 15\nCACHE_MIDDLEWARE_ALIAS = 'default' # which cache alias to use\nCACHE_MIDDLEWARE_SECONDS = CACHE_TTL # number of seconds to cache a page for (TTL)\nCACHE_MIDDLEWARE_KEY_PREFIX = '' # should be used if the cache is shared across multiple sites that use the same\nENABLE_CACHING = os.getenv(\"ENABLE_CACHING\", \"False\") == \"True\"\n CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',\n 'LOCATION': os.getenv(\"MEMCACHIER_SERVERS\", \"127.0.0.1:11211\"),\n \"OPTIONS\": {\n \"binary\": True,\n # 'username': os.getenv(\"MEMCACHIER_USERNAME\", \"\"),\n # 'password': os.getenv(\"MEMCACHIER_PASSWORD\", \"\"),\n \"behaviors\": {\n \"ketama\": True,\n },\n },\n }\n }\n CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n }\n }\nHITCOUNT_KEEP_HIT_ACTIVE = {'days': 0}\nHITCOUNT_KEEP_HIT_IN_DATABASE = {'days': 77}\nHITCOUNT_HITS_PER_IP_LIMIT = 0\nSHORTCODES_YOUTUBE_JQUERY = False\nPING_INDEX_NOW_POST_UPDATE = os.getenv(\"PING_INDEX_NOW_POST_UPDATE\", \"True\")\nPING_GOOGLE_POST_UPDATE = os.getenv(\"PING_GOOGLE_POST_UPDATE\", \"True\")\nINDEX_NOW_API_KEY = os.getenv(\"INDEX_NOW_API_KEY\", )\nEMAIL_BACKEND = os.getenv('EMAIL_BACKEND')\nEMAIL_HOST = os.getenv('EMAIL_HOST')\nEMAIL_PORT = os.getenv('EMAIL_PORT')\nEMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER')\nEMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD')\nEMAIL_USE_TLS = os.getenv('EMAIL_USE_TLS', \"True\")\nDEFAULT_FROM_EMAIL = os.getenv('DEFAULT_FROM_EMAIL')\nEMAIL_FILE_PATH = os.getenv('EMAIL_FILE_PATH', os.path.join(BASE_DIR, 'test-emails'))\nPOST_TYPE_CHOICES = os.getenv('POST_TYPE_CHOICES')\nSHOW_EMTPY_CATEGORIES = os.getenv(\"SHOW_EMTPY_CATEGORIES\", \"False\") == \"True\"\nLOAD_GOOGLE_TAG_MANAGER = os.getenv(\"LOAD_GOOGLE_TAG_MANAGER\", \"False\") == \"True\"\nLOAD_GOOGLE_ADS = os.getenv(\"LOAD_GOOGLE_ADS\", \"False\") == \"True\"\nMY_ADS_TXT_CONTENT = os.getenv('MY_ADS_TXT_CONTENT')\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": \"DEBUG\",\n },\n \"loggers\": {\n \"django\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"DJANGO_LOG_LEVEL\", \"INFO\"),\n \"propagate\": False,\n },\n },\n}\ndef get_post_types():"
},
{
"identifier": "EditProfileView",
"path": "bloggy/views/edit_profile_view.py",
"snippet": "class EditProfileView(FormView):\n template_name = \"profile/edit_profile.html\"\n model = User\n form_class = EditProfileForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['meta_title'] = \"Update Profile\"\n context[\n 'meta_description'] = f\"Update my profile. You need a {settings.SITE_TITLE} account to sign in and view your profile.\"\n context['meta_image'] = static('static/media/logo.png')\n return context\n\n def get_initial(self):\n initial = super().get_initial()\n username = self.request.user.username\n user = get_object_or_404(User, username=username)\n\n # update initial field defaults with custom set default values:\n initial.update({\n 'profile_photo': user.profile_photo,\n 'username': user.username,\n 'name': user.name,\n 'bio': user.bio,\n 'website': user.website,\n 'linkedin': user.linkedin,\n 'twitter': user.twitter,\n 'youtube': user.youtube,\n 'github': user.github,\n })\n\n return initial\n\n def get_success_url(self):\n return self.request.get_full_path()\n\n def form_valid(self, form):\n # This method is called when valid form data has been POSTed.\n # It should return an HttpResponse.\n\n if self.request.FILES.get(\"profile_photo\", None) is not None:\n # file_path = self.save_media_file(self.request.FILES[\"profile_photo\"])\n User.objects.filter(username=self.request.user.username).update(\n profile_photo=self.request.FILES[\"profile_photo\"],\n name=form.cleaned_data[\"name\"],\n bio=form.cleaned_data[\"bio\"],\n website=sanitize_url(form.cleaned_data[\"website\"]),\n twitter=sanitize_url(form.cleaned_data[\"twitter\"]),\n youtube=sanitize_url(form.cleaned_data[\"youtube\"]),\n linkedin=sanitize_url(form.cleaned_data[\"linkedin\"]),\n github=sanitize_url(form.cleaned_data[\"github\"])\n )\n else:\n User.objects.filter(username=self.request.user.username).update(\n name=form.cleaned_data[\"name\"],\n bio=form.cleaned_data[\"bio\"],\n website=sanitize_url(form.cleaned_data[\"website\"]),\n twitter=sanitize_url(form.cleaned_data[\"twitter\"]),\n youtube=sanitize_url(form.cleaned_data[\"youtube\"]),\n linkedin=sanitize_url(form.cleaned_data[\"linkedin\"]),\n github=sanitize_url(form.cleaned_data[\"github\"])\n )\n\n return super().form_valid(form)\n\n def save_media_file(self, image):\n # This will generate random folder for saving your image using UUID\n media_path = f'uploads/user/{self.request.user.username}/{image.name}'\n file_path = f'media/{media_path}'\n\n if not os.path.exists(file_path):\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n\n # Create image save path with title\n with open(file_path, \"wb+\") as f:\n for chunk in image.chunks():\n f.write(chunk)\n\n return media_path"
},
{
"identifier": "CoursesListView",
"path": "bloggy/views/courses_view.py",
"snippet": "class CoursesListView(TemplateView):\n model = Course\n template_name = \"pages/archive/courses.html\"\n paginate_by = DEFAULT_PAGE_SIZE\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n courses = Course.objects.filter(publish_status=\"LIVE\").order_by(\"-display_order\")\n paginator = Paginator(courses, self.paginate_by)\n page = self.request.GET.get('page')\n\n try:\n courses = paginator.page(page)\n except PageNotAnInteger:\n courses = paginator.page(1)\n except EmptyPage:\n courses = paginator.page(paginator.num_pages)\n\n context['courses'] = courses\n return context"
},
{
"identifier": "CourseDetailsView",
"path": "bloggy/views/courses_view.py",
"snippet": "class CourseDetailsView(HitCountDetailView):\n model = Course\n template_name = \"pages/single/course.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n set_seo_settings(post=self.object, context=context)\n return context"
},
{
"identifier": "LessonDetailsView",
"path": "bloggy/views/courses_view.py",
"snippet": "class LessonDetailsView(TemplateView):\n template_name = \"pages/single/lesson.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n guide_slug = context[\"course\"]\n post = Post.objects.filter(course__slug=guide_slug).filter(slug=context[\"slug\"]).order_by(\n \"display_order\").first()\n if not post:\n raise Http404\n\n context[\"post\"] = post\n course = post.course\n context[\"course\"] = course\n set_seo_settings(post=course, context=context)\n return context"
},
{
"identifier": "IndexView",
"path": "bloggy/views/pages.py",
"snippet": "class IndexView(TemplateView):\n template_name = \"pages/home.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['posts'] = Post.objects.prefetch_related(\"category\").filter(publish_status=\"LIVE\").order_by(\n \"-published_date\")[:12]\n context['courses'] = Course.objects.filter(publish_status=\"LIVE\").all()[:6]\n return context"
},
{
"identifier": "CategoriesView",
"path": "bloggy/views/category_view.py",
"snippet": "class CategoriesView(TemplateView):\n template_name = \"pages/archive/categories.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n if settings.SHOW_EMTPY_CATEGORIES:\n categories = Category.objects.order_by(\"-article_count\").all()\n else:\n categories = Category.objects.filter(article_count__gt=0).order_by(\"-article_count\").all()\n\n logger.debug('Loading categories: %s', categories)\n context['categories'] = categories\n\n return context"
},
{
"identifier": "CategoryDetailsView",
"path": "bloggy/views/category_view.py",
"snippet": "class CategoryDetailsView(ListView):\n model = Post\n template_name = \"pages/archive/posts.html\"\n paginate_by = 20\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n category_param = self.kwargs['slug']\n\n try:\n category = Category.objects.get(slug=category_param)\n context['selected_category'] = category\n except Category.DoesNotExist:\n raise Http404\n\n posts = Post.objects.filter(category__slug__in=[category_param], publish_status=\"LIVE\").order_by(\n \"-published_date\")\n paginator = Paginator(posts, self.paginate_by)\n page = self.request.GET.get('page')\n\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n\n context['posts'] = posts\n context['categories'] = Category.objects.filter(article_count__gt=0).order_by(\"-article_count\").all()\n\n set_seo_settings(post=category, context=context)\n return context"
},
{
"identifier": "sitemaps_list",
"path": "bloggy/services/sitemaps.py",
"snippet": "class StaticPagesSitemap(sitemaps.Sitemap):\n def items(self):\n def location(self, item):"
},
{
"identifier": "RegisterView",
"path": "bloggy/views/register.py",
"snippet": "class RegisterView(View):\n\n def get(self, request):\n return render(request, 'auth/register.html', {'form': SignUpForm()})\n\n def post(self, request):\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n\n verification_token = create_token(user, token_type=\"signup\")\n email_service.email_registration_token(request, user, verification_token)\n return redirect(reverse('login'))\n\n return render(request, 'auth/register.html', {'form': form})"
},
{
"identifier": "AccountActivationView",
"path": "bloggy/views/account.py",
"snippet": "class AccountActivationView(View):\n def get(self, request, uuid, token):\n\n verification_token = get_token(uuid, token, token_type=\"signup\")\n if is_token_expired(verification_token):\n messages.error(request, \"The verification link is expired or malformed.\")\n return redirect('index')\n\n # activate user\n user = User.objects.get(email=verification_token.user.email)\n user.is_active = True\n user.is_staff = False\n group = Group.objects.get_or_create(name=settings.AUTH_USER_DEFAULT_GROUP)\n user.groups.add(group[0].id)\n user.save()\n\n # delete token as it\n verification_token.delete()\n\n messages.success(request, \"You're all set! Your account is now active and ready to use.\")\n return redirect('login')"
},
{
"identifier": "PostListView",
"path": "bloggy/views/posts.py",
"snippet": "class PostListView(ListView):\n model = Post\n template_name = \"pages/archive/posts.html\"\n paginate_by = 20\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['posts'] = get_recent_feed(page=self.request.GET.get('page'))\n context['courses'] = Course.objects.filter(publish_status=\"LIVE\").all()[:2]\n context['categories'] = (Category.objects.filter(article_count__gt=0)\n .order_by(\"-article_count\").all())\n return context"
},
{
"identifier": "PostDetailsView",
"path": "bloggy/views/posts.py",
"snippet": "class PostDetailsView(HitCountDetailView):\n model = Post\n count_hit = True\n\n def get_template_names(self):\n if self.template_name:\n return f\"pages/single/{self.object.post_type}-{self.template_name}.html\"\n\n return f\"pages/single/{self.object.post_type}.html\"\n\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def get_client_ip(self):\n x_forwarded_for = self.request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = self.request.META.get('REMOTE_ADDR')\n return ip\n\n def get_context_data(self, **kwargs):\n\n # check if article is published? if live no issues.\n if self.object.publish_status == \"DRAFT\":\n logged_user = self.request.user\n\n # If not live, check for the context parameter and the user login status\n # If user is the owner of the post or user is an admin, can preview the post\n if not logged_user:\n raise HttpResponse('Unauthorized', status=401)\n if not (logged_user.username.__eq__(self.object.author.username) or logged_user.is_superuser):\n raise HttpResponse('Unauthorized', status=401)\n\n context = super().get_context_data(**kwargs)\n set_seo_settings(post=self.object, context=context)\n return context"
},
{
"identifier": "MyLoginView",
"path": "bloggy/views/login.py",
"snippet": "class MyLoginView(LoginView):\n\n def get_success_url(self):\n redirect_url = self.request.GET.get('next')\n if redirect_url:\n return redirect_url\n\n return reverse('index')"
},
{
"identifier": "AdsTextView",
"path": "bloggy/views/pages.py",
"snippet": "class AdsTextView(View):\n def get(self, request, *args, **kwargs):\n return HttpResponse(settings.MY_ADS_TXT_CONTENT, content_type='text/plain')"
},
{
"identifier": "robots",
"path": "bloggy/views/pages.py",
"snippet": "@cache_page(60 * 60 * 24)\ndef robots(request):\n \"\"\"\n generates robots.txt, which pretty much does not change\n \"\"\"\n domain = settings.SITE_URL\n\n data = f\"\"\"User-agent: *\nDisallow: /admin/\nDisallow: /media/\nDisallow: /static/\nDisallow: /api/\n\nSitemap: {domain}/sitemap.xml\n\"\"\"\n\n return HttpResponse(data, content_type='text/plain')"
},
{
"identifier": "PageDetailsView",
"path": "bloggy/views/pages.py",
"snippet": "class PageDetailsView(TemplateView):\n template_name = \"pages/page.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n url = context[\"url\"]\n page = Page.objects.filter(url=url).filter(publish_status=\"LIVE\").first()\n if page:\n context[\"page\"] = page\n set_seo_settings(post=page, context=context)\n return context\n raise Http404"
},
{
"identifier": "QuizListView",
"path": "bloggy/views/quizzes_view.py",
"snippet": "class QuizListView(ListView):\n model = Quiz\n template_name = \"pages/archive/quizzes.html\"\n paginate_by = DEFAULT_PAGE_SIZE\n\n def get_context_data(self, **kwargs):\n context = super(QuizListView, self).get_context_data(**kwargs)\n context['quizzes'] = get_recent_quizzes()\n return context"
},
{
"identifier": "QuizDetailView",
"path": "bloggy/views/quizzes_view.py",
"snippet": "class QuizDetailView(HitCountDetailView):\n model = Quiz\n template_name = \"pages/single/quiz.html\"\n\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n set_seo_settings(post=self.object, context=context)\n return context"
},
{
"identifier": "PostsRssFeed",
"path": "bloggy/views/rss.py",
"snippet": "class PostsRssFeed(BaseRssFeedView):\n title = f'Posts from {settings.SITE_TITLE}'\n link = \"/articles\"\n\n def item_enclosure_url(self, item):\n thumbnail = static('static/media/default-banner.png')\n if item.thumbnail:\n thumbnail = settings.ASSETS_DOMAIN + item.thumbnail.url\n return thumbnail\n\n def items(self):\n return Post.objects.filter(publish_status=\"LIVE\").order_by('-published_date')[:30]\n\n def item_description(self, item):\n content = (f\"{item.content}\\n<small>Originally published at \"\n f\"<a href='{settings.SITE_URL + item.get_absolute_url()}' \"\n f\"target='_blank'>{settings.SITE_URL}</a></small>\")\n\n thumbnail = static('static/media/default-banner.png')\n if item.thumbnail:\n thumbnail = settings.ASSETS_DOMAIN + item.thumbnail.url\n return f'{content}<img src=\"{thumbnail}\" alt=\"{item.title}\" style=\"display:none;\">'\n\n def item_author_name(self, item):\n author = item.author\n return author.username if author else None\n\n def item_author_link(self, item):\n author = item.author\n return settings.SITE_URL + item.get_absolute_url() if author else \"/\""
},
{
"identifier": "CoursesRssFeed",
"path": "bloggy/views/rss.py",
"snippet": "class CoursesRssFeed(BaseRssFeedView):\n title = \"Courses\"\n link = \"/courses\"\n\n def items(self):\n return Course.objects.filter(publish_status=\"LIVE\").order_by('-published_date')[:30]\n\n def item_description(self, item):\n content = f\"{item.excerpt}\\n<small>Take the free course from <a href='{settings.SITE_URL + item.get_absolute_url()}' target='_blank'>{settings.SITE_URL}</a></small>\"\n return content\n\n def item_categories(self, obj):\n return []"
},
{
"identifier": "SearchListView",
"path": "bloggy/views/search.py",
"snippet": "class SearchListView(ListView):\n model = Post\n template_name = \"pages/search_result.html\"\n paginate_by = DEFAULT_PAGE_SIZE\n\n def get_context_data(self, **kwargs):\n search_query = self.request.GET.get(\"q\")\n context = super().get_context_data(**kwargs)\n\n if StringUtils.is_not_blank(search_query):\n categories = Category.objects.filter(slug__icontains=search_query)[:5]\n results = chain(\n Post.objects.filter(title__icontains=search_query, excerpt__icontains=search_query, publish_status=\"LIVE\"),\n )\n\n context['posts'] = results\n context['categories'] = categories\n context['search_query'] = search_query\n context['meta_title'] = f\"Search result for {search_query}\"\n context['meta_description'] = \"Search articles\"\n\n return context"
},
{
"identifier": "MyProfileView",
"path": "bloggy/views/user.py",
"snippet": "class MyProfileView(DetailView):\n # template_name = \"pages/user.html\"\n template_name = \"profile/user_dashboard.html\"\n\n def get_object(self, **kwargs):\n username = self.request.user # self.kwargs.get(\"username\")\n return get_object_or_404(User, username=username)\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n user = self.get_object()\n\n posts = user.posts.order_by(\"-published_date\").filter(publish_status=\"LIVE\")\n paginator = Paginator(posts, DEFAULT_PAGE_SIZE)\n page = self.request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n\n context.update({\n 'posts': posts,\n 'userProfile': user,\n 'userType': \"self\",\n })\n\n context['meta_title'] = \"My Profile\"\n context[\n 'meta_description'] = f'My profile. Access your {settings.SITE_TITLE} profile, account settings My Profile.'\n if user.profile_photo:\n context['meta_image'] = settings.SITE_LOGO\n\n return context"
},
{
"identifier": "PublicProfileView",
"path": "bloggy/views/user.py",
"snippet": "class PublicProfileView(SingleObjectMixin, View):\n template_name = \"pages/user.html\"\n\n def get_object(self, **kwargs):\n username = self.kwargs.get(\"username\")\n return get_object_or_404(User, username=username)\n\n def get(self, request, *args, **kwargs):\n username = kwargs.get(\"username\")\n if username == 'siteadmin' or username == 'admin' or username == 'superadmin' or username == 'wp-admin':\n raise Http404\n\n self.object = self.get_object()\n context = self.get_context_data(object=self.object)\n posts = self.object.posts.filter(publish_status=\"LIVE\").order_by(\"-published_date\")\n\n paginator = Paginator(posts, DEFAULT_PAGE_SIZE)\n page = self.request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n\n context['meta_title'] = self.object.get_full_name()\n description = f\"{settings.SITE_TITLE} Author. {self.object.get_full_name()}. {self.object.bio}\"\n context['meta_description'] = strip_tags(description)\n context['meta_image'] = self.object.get_avatar()\n\n context.update({\n 'posts': posts,\n 'user': self.object\n })\n\n return render(request, self.template_name, context)"
},
{
"identifier": "AuthorsListView",
"path": "bloggy/views/user.py",
"snippet": "class AuthorsListView(TemplateView):\n template_name = \"pages/authors.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n authors = User.objects.filter(is_active=True).filter(is_staff=True).exclude(\n username__in=[\"siteadmin\", \"superadmin\", \"admin\"]).all()\n context.update({\n \"authors\": authors\n })\n return context"
},
{
"identifier": "UserBookmarksView",
"path": "bloggy/views/user_collections.py",
"snippet": "class UserBookmarksView(TemplateView):\n template_name = \"profile/user_bookmarks.html\"\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n username = self.request.user.username\n user = get_object_or_404(User, username=username)\n\n articles = Post.objects.raw('''\n select a.id as id, a.title as title, a.slug as slug, a.publish_status as publish_status, a.thumbnail as thumbnail, b.updated_date as bookmark_date from bloggy_article a JOIN bloggy_bookmarks b on a.id=b.post_id where b.user_id=%s and b.post_type=%s\n ''', ([user.id], \"article\"))\n\n context.update({\n 'articles': articles,\n 'userProfile': user,\n 'userType': \"self\",\n })\n return context"
}
] | from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import LogoutView
from django.contrib.auth.views import PasswordChangeView
from django.contrib.sitemaps.views import sitemap, index
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import path, include
from django.views.generic.base import TemplateView
from bloggy import settings
from bloggy.views import EditProfileView
from bloggy.views.courses_view import CoursesListView, CourseDetailsView, LessonDetailsView
from bloggy.views.pages import IndexView
from bloggy.views.category_view import CategoriesView, CategoryDetailsView
from .services.sitemaps import sitemaps_list
from .views import RegisterView
from .views.account import AccountActivationView
from .views.posts import PostListView, PostDetailsView
from .views.login import MyLoginView
from .views.pages import AdsTextView, robots
from .views.pages import PageDetailsView
from .views.quizzes_view import QuizListView, QuizDetailView
from .views.rss import PostsRssFeed, CoursesRssFeed
from .views.search import SearchListView
from .views.user import MyProfileView, PublicProfileView, AuthorsListView
from .views.user_collections import UserBookmarksView | 8,298 | """bloggy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
urlpatterns = [
path('admin/', admin.site.urls),
path('admin/password_change/', PasswordChangeView.as_view(), name='password_change'),
path('', IndexView.as_view(), name='index'),
path('articles', PostListView.as_view(), name='posts'),
path('articles/<slug:slug>', PostDetailsView.as_view(), name='post_single'),
path('topics', CategoriesView.as_view(), name='categories'),
path('topics/<str:slug>', CategoryDetailsView.as_view(), name='categories_single'),
path('search', SearchListView.as_view(), name='search'),
path('courses', CoursesListView.as_view(), name='courses'),
path('courses/<slug:slug>', CourseDetailsView.as_view(), name='courses_single'),
path('courses/<str:course>/<slug:slug>', LessonDetailsView.as_view(), name='lesson_single'),
path('quizzes', QuizListView.as_view(), name='quizzes'),
path('quizzes/<slug:slug>', QuizDetailView.as_view(), name='quiz_single'),
path('login', MyLoginView.as_view(template_name="auth/login.html"), name='login'),
path('logout', LogoutView.as_view(), name='logout'),
path('register', RegisterView.as_view(), name='register'),
path('activate/<str:uuid>/<str:token>', AccountActivationView.as_view(), name='activate_account'),
path('authors', AuthorsListView.as_view(), name="authors"),
path('user/<str:username>', PublicProfileView.as_view(), name="user_profile"),
path('edit-profile', login_required(EditProfileView.as_view()), name="profile.edit_profile"),
# path('dashboard', login_required(MyProfileView.as_view()), name="profile.dashboard"),
path('bookmarks', login_required(UserBookmarksView.as_view()), name="profile.bookmarks"),
path('contact', TemplateView.as_view(template_name="pages/contact.html"), name='pages.contact'),
path("rss/articles", PostsRssFeed(), name="articles_feed"),
path("rss/courses", CoursesRssFeed(), name="courses_feed"),
path('sitemap.xml', index, {'sitemaps': sitemaps_list}, name='django.contrib.sitemaps.views.index'),
path('sitemap/<str:section>.xml', sitemap, {'sitemaps': sitemaps_list},
name='django.contrib.sitemaps.views.sitemap'),
# static files for SEO or other reasons
| """bloggy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
urlpatterns = [
path('admin/', admin.site.urls),
path('admin/password_change/', PasswordChangeView.as_view(), name='password_change'),
path('', IndexView.as_view(), name='index'),
path('articles', PostListView.as_view(), name='posts'),
path('articles/<slug:slug>', PostDetailsView.as_view(), name='post_single'),
path('topics', CategoriesView.as_view(), name='categories'),
path('topics/<str:slug>', CategoryDetailsView.as_view(), name='categories_single'),
path('search', SearchListView.as_view(), name='search'),
path('courses', CoursesListView.as_view(), name='courses'),
path('courses/<slug:slug>', CourseDetailsView.as_view(), name='courses_single'),
path('courses/<str:course>/<slug:slug>', LessonDetailsView.as_view(), name='lesson_single'),
path('quizzes', QuizListView.as_view(), name='quizzes'),
path('quizzes/<slug:slug>', QuizDetailView.as_view(), name='quiz_single'),
path('login', MyLoginView.as_view(template_name="auth/login.html"), name='login'),
path('logout', LogoutView.as_view(), name='logout'),
path('register', RegisterView.as_view(), name='register'),
path('activate/<str:uuid>/<str:token>', AccountActivationView.as_view(), name='activate_account'),
path('authors', AuthorsListView.as_view(), name="authors"),
path('user/<str:username>', PublicProfileView.as_view(), name="user_profile"),
path('edit-profile', login_required(EditProfileView.as_view()), name="profile.edit_profile"),
# path('dashboard', login_required(MyProfileView.as_view()), name="profile.dashboard"),
path('bookmarks', login_required(UserBookmarksView.as_view()), name="profile.bookmarks"),
path('contact', TemplateView.as_view(template_name="pages/contact.html"), name='pages.contact'),
path("rss/articles", PostsRssFeed(), name="articles_feed"),
path("rss/courses", CoursesRssFeed(), name="courses_feed"),
path('sitemap.xml', index, {'sitemaps': sitemaps_list}, name='django.contrib.sitemaps.views.index'),
path('sitemap/<str:section>.xml', sitemap, {'sitemaps': sitemaps_list},
name='django.contrib.sitemaps.views.sitemap'),
# static files for SEO or other reasons | path('robots.txt', robots, name='robots'), | 15 | 2023-10-17 14:50:39+00:00 | 12k |
zabbix/python-zabbix-utils | .github/scripts/compatibility_api_test_5.py | [
{
"identifier": "Getter",
"path": "zabbix_utils/getter.py",
"snippet": "class Getter():\n \"\"\"Zabbix get implementation.\n\n Args:\n host (str, optional): Zabbix agent address. Defaults to `'127.0.0.1'`.\n\n port (int, optional): Zabbix agent port. Defaults to `10050`.\n\n timeout (int, optional): Connection timeout value. Defaults to `10`.\n\n use_ipv6 (bool, optional): Specifying IPv6 use instead of IPv4. Defaults to `False`.\n\n source_ip (str, optional): IP from which to establish connection. Defaults to `None`.\n\n socket_wrapper (Callable, optional): Func(`conn`) to wrap socket. Defaults to `None`.\n \"\"\"\n\n def __init__(self, host: str = '127.0.0.1', port: int = 10050, timeout: int = 10,\n use_ipv6: bool = False, source_ip: Union[str, None] = None,\n socket_wrapper: Union[Callable, None] = None):\n self.host = host\n self.port = port\n self.timeout = timeout\n self.use_ipv6 = use_ipv6\n self.source_ip = source_ip\n\n self.socket_wrapper = socket_wrapper\n if self.socket_wrapper:\n if not isinstance(self.socket_wrapper, Callable):\n raise TypeError('Value \"socket_wrapper\" should be a function.')\n\n def __get_response(self, conn: socket) -> Union[str, None]:\n result = ZabbixProtocol.parse_packet(conn, log, ProcessingError)\n\n log.debug('Received data: %s', result)\n\n return result\n\n def get(self, key: str) -> Union[str, None]:\n \"\"\"Gets item value from Zabbix agent by specified key.\n\n Args:\n key (str): Zabbix item key.\n\n Returns:\n str: Value from Zabbix agent for specified key.\n \"\"\"\n\n packet = ZabbixProtocol.create_packet(key, log)\n\n try:\n if self.use_ipv6:\n connection = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n else:\n connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error:\n raise ProcessingError(\n f\"Error creating socket for {self.host}:{self.port}\") from None\n\n connection.settimeout(self.timeout)\n\n if self.source_ip:\n connection.bind((self.source_ip, 0,))\n\n try:\n connection.connect((self.host, self.port))\n if self.socket_wrapper is not None:\n connection = self.socket_wrapper(connection)\n connection.sendall(packet)\n except (TimeoutError, socket.timeout) as err:\n log.error(\n 'The connection to %s timed out after %d seconds',\n f\"{self.host}:{self.port}\",\n self.timeout\n )\n connection.close()\n raise err\n except (ConnectionRefusedError, socket.gaierror) as err:\n log.error(\n 'An error occurred while trying to connect to %s: %s',\n f\"{self.host}:{self.port}\",\n getattr(err, 'msg', str(err))\n )\n connection.close()\n raise err\n except (OSError, socket.error) as err:\n log.warning(\n 'An error occurred while trying to send to %s: %s',\n f\"{self.host}:{self.port}\",\n getattr(err, 'msg', str(err))\n )\n connection.close()\n raise err\n\n try:\n response = self.__get_response(connection)\n except ConnectionResetError as err:\n log.debug('Get value error: %s', err)\n log.warning('Check access restrictions in Zabbix agent configuration.')\n raise err\n log.debug('Response from [%s:%s]: %s', self.host, self.port, response)\n\n try:\n connection.close()\n except socket.error:\n pass\n\n return response"
},
{
"identifier": "ZabbixAPI",
"path": "zabbix_utils/api.py",
"snippet": "class ZabbixAPI():\n \"\"\"Provide interface for working with Zabbix API.\n\n Args:\n url (str, optional): Zabbix API URL. Defaults to `http://localhost/zabbix/api_jsonrpc.php`.\n token (str, optional): Zabbix API token. Defaults to `None`.\n user (str, optional): Zabbix API username. Defaults to `None`.\n password (str, optional): Zabbix API user's password. Defaults to `None`.\n http_user (str, optional): Basic Authentication username. Defaults to `None`.\n http_password (str, optional): Basic Authentication password. Defaults to `None`.\n skip_version_check (bool, optional): Skip version compatibility check. Defaults to `False`.\n validate_certs (bool, optional): Specifying certificate validation. Defaults to `True`.\n timeout (int, optional): Connection timeout to Zabbix API. Defaults to `30`.\n \"\"\"\n\n __version = None\n __use_token = False\n __session_id = None\n __basic_cred = None\n\n def __init__(self, url: Union[str, None] = None, token: Union[str, None] = None,\n user: Union[str, None] = None, password: Union[str, None] = None,\n http_user: Union[str, None] = None, http_password: Union[str, None] = None,\n skip_version_check: bool = False, validate_certs: bool = True, timeout: int = 30):\n\n url = url or env.get('ZABBIX_URL') or 'http://localhost/zabbix/api_jsonrpc.php'\n user = user or env.get('ZABBIX_USER') or None\n password = password or env.get('ZABBIX_PASSWORD') or None\n token = token or env.get('ZABBIX_TOKEN') or None\n\n self.url = ModuleUtils.check_url(url)\n self.validate_certs = validate_certs\n self.timeout = timeout\n\n if http_user and http_password:\n self.__basic_auth(http_user, http_password)\n\n self.__check_version(skip_version_check)\n\n if token or user or password:\n self.login(token, user, password)\n\n def __getattr__(self, name: str) -> Callable:\n \"\"\"Dynamic creation of an API object.\n\n Args:\n name (str): Zabbix API method name.\n\n Returns:\n APIObject: Zabbix API object instance.\n \"\"\"\n\n return APIObject(name, self)\n\n def __enter__(self) -> Self:\n return self\n\n def __exit__(self, *args) -> None:\n self.logout()\n\n def __basic_auth(self, user: str, password: str) -> Self:\n \"\"\"Enable Basic Authentication using.\n\n Args:\n user (str): Basic Authentication username.\n password (str): Basic Authentication password.\n \"\"\"\n\n log.debug(\n \"Enable Basic Authentication with username:%s password:%s\",\n user,\n ModuleUtils.HIDING_MASK\n )\n\n self.__basic_cred = base64.b64encode(\n f\"{user}:{password}\".encode()\n ).decode()\n\n def api_version(self) -> APIVersion:\n \"\"\"Return object of Zabbix API version.\n\n Returns:\n APIVersion: Object of Zabbix API version\n \"\"\"\n\n if self.__version is None:\n self.__version = APIVersion(self.apiinfo.version())\n return self.__version\n\n @property\n def version(self) -> APIVersion:\n \"\"\"Return object of Zabbix API version.\n\n Returns:\n APIVersion: Object of Zabbix API version.\n \"\"\"\n\n return self.api_version()\n\n def login(self, token: Union[str, None] = None, user: Union[str, None] = None,\n password: Union[str, None] = None) -> Self:\n \"\"\"Login to Zabbix API.\n\n Args:\n token (str, optional): Zabbix API token. Defaults to `None`.\n user (str, optional): Zabbix API username. Defaults to `None`.\n password (str, optional): Zabbix API user's password. Defaults to `None`.\n \"\"\"\n\n if token:\n if self.version < 5.4:\n raise APINotSupported(\n message=\"Token usage\",\n version=self.version\n )\n if user or password:\n raise ProcessingError(\n \"Token cannot be used with username and password\")\n self.__use_token = True\n self.__session_id = token\n return\n\n if not user:\n raise ProcessingError(\"Username is missing\")\n if not password:\n raise ProcessingError(\"User password is missing\")\n\n if self.version < 5.4:\n user_cred = {\n \"user\": user,\n \"password\": password\n }\n else:\n user_cred = {\n \"username\": user,\n \"password\": password\n }\n\n log.debug(\n \"Login to Zabbix API using username:%s password:%s\", user, ModuleUtils.HIDING_MASK\n )\n self.__use_token = False\n self.__session_id = self.user.login(**user_cred)\n\n log.debug(\"Connected to Zabbix API version %s: %s\", self.version, self.url)\n\n def logout(self) -> None:\n \"\"\"Logout from Zabbix API.\"\"\"\n\n if self.__session_id:\n if self.__use_token:\n self.__session_id = None\n self.__use_token = False\n return\n\n log.debug(\"Logout from Zabbix API\")\n self.user.logout()\n self.__session_id = None\n else:\n log.debug(\"You're not logged in Zabbix API\")\n\n def check_auth(self) -> bool:\n \"\"\"Check authentication status in Zabbix API.\n\n Returns:\n bool: User authentication status (`True`, `False`)\n \"\"\"\n\n if not self.__session_id:\n log.debug(\"You're not logged in Zabbix API\")\n return False\n\n if self.__use_token:\n log.debug(\"Check auth session using token in Zabbix API\")\n refresh_resp = self.user.checkAuthentication(token=self.__session_id)\n else:\n log.debug(\"Check auth session using sessionid in Zabbix API\")\n refresh_resp = self.user.checkAuthentication(sessionid=self.__session_id)\n\n return bool(refresh_resp.get('userid'))\n\n def send_api_request(self, method: str, params: Union[dict, None] = None,\n need_auth=True) -> dict:\n \"\"\"Function for sending request to Zabbix API.\n\n Args:\n method (str): Zabbix API method name.\n params (dict, optional): Params for request body. Defaults to `None`.\n need_auth (bool, optional): Authorization using flag. Defaults to `False`.\n\n Raises:\n ProcessingError: Wrapping built-in exceptions during request processing.\n APIRequestError: Wrapping errors from Zabbix API.\n\n Returns:\n dict: Dictionary with Zabbix API response.\n \"\"\"\n\n request_json = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params or {},\n 'id': str(uuid4()),\n }\n\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json-rpc',\n 'User-Agent': f\"{__name__}/{__version__}\"\n }\n\n if need_auth:\n if not self.__session_id:\n raise ProcessingError(\"You're not logged in Zabbix API\")\n if self.version < 6.4 or self.__basic_cred is not None:\n request_json['auth'] = self.__session_id\n else:\n headers[\"Authorization\"] = f\"Bearer {self.__session_id}\"\n\n if self.__basic_cred is not None:\n headers[\"Authorization\"] = f\"Basic {self.__basic_cred}\"\n\n log.debug(\n \"Sending request to %s with body: %s\",\n self.url,\n request_json\n )\n\n req = ul.Request(\n self.url,\n data=json.dumps(request_json).encode(\"utf-8\"),\n headers=headers,\n method='POST'\n )\n req.timeout = self.timeout\n\n # Disable SSL certificate validation if needed.\n if not self.validate_certs:\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n else:\n ctx = None\n\n try:\n resp = ul.urlopen(req, context=ctx)\n resp_json = json.loads(resp.read().decode('utf-8'))\n except URLError as err:\n raise ProcessingError(f\"Unable to connect to {self.url}:\", err) from None\n except ValueError as err:\n raise ProcessingError(\"Unable to parse json:\", err) from None\n\n if method not in ModuleUtils.FILES_METHODS:\n log.debug(\n \"Received response body: %s\",\n resp_json\n )\n else:\n debug_json = resp_json.copy()\n if debug_json.get('result'):\n debug_json['result'] = shorten(debug_json['result'], 200, placeholder='...')\n log.debug(\n \"Received response body (clipped): %s\",\n json.dumps(debug_json, indent=4, separators=(',', ': '))\n )\n\n if 'error' in resp_json:\n err = resp_json['error'].copy()\n err['body'] = request_json.copy()\n raise APIRequestError(err)\n\n return resp_json\n\n def __check_version(self, skip_check: bool) -> None:\n\n skip_check_help = \"If you're sure zabbix_utils will work properly with your current \\\nZabbix version you can skip this check by \\\nspecifying skip_version_check=True when create ZabbixAPI object.\"\n if self.version < __min_supported__:\n if skip_check:\n log.debug(\n \"Version of Zabbix API [%s] is less than the library supports. %s\",\n self.version,\n \"Further library use at your own risk!\"\n )\n else:\n raise APINotSupported(\n f\"Version of Zabbix API [{self.version}] is not supported by the library. \" +\n f\"The oldest supported version is {__min_supported__}.0. \" + skip_check_help\n )\n\n if self.version > __max_supported__:\n if skip_check:\n log.debug(\n \"Version of Zabbix API [%s] is more than the library was tested on. %s\",\n self.version,\n \"Recommended to update the library. Further library use at your own risk!\"\n )\n else:\n raise APINotSupported(\n f\"Version of Zabbix API [{self.version}] was not tested with the library. \" +\n f\"The latest tested version is {__max_supported__}.0. \" + skip_check_help\n )"
},
{
"identifier": "APIVersion",
"path": "zabbix_utils/api.py",
"snippet": "class APIVersion():\n \"\"\"Zabbix API version object.\n\n Args:\n apiver (str): Raw version in string format.\n \"\"\"\n\n def __init__(self, apiver: str):\n self.__raw = apiver\n self.__first, self.__second, self.__third = self.__parse_version(self.__raw)\n\n def __getitem__(self, index: int) -> Any:\n # Get a symbol from the raw version string by index\n # For compatibility with using Zabbix version as a string\n return self.__raw[index]\n\n def is_lts(self) -> bool:\n \"\"\"Check if the current version is LTS.\n\n Returns:\n bool: `True` if the current version is LTS.\n \"\"\"\n\n return self.__second == 0\n\n @property\n def major(self) -> float:\n \"\"\"Get major version number.\n\n Returns:\n float: A major version number.\n \"\"\"\n\n return float(f\"{self.__first}.{self.__second}\")\n\n @property\n def minor(self) -> int:\n \"\"\"Get minor version number.\n\n Returns:\n int: A minor version number.\n \"\"\"\n\n return self.__third\n\n def __parse_version(self, ver: str) -> List[Any]:\n # Parse the version string into a list of integers.\n match = re.fullmatch(r'(\\d+)\\.(\\d+)\\.(\\d+)', ver)\n if match is None:\n raise ValueError(\n f\"Unable to parse version of Zabbix API: {ver}. \" +\n f\"Default '{__max_supported__}.0' format is expected.\"\n ) from None\n return list(map(int, match.groups()))\n\n def __str__(self) -> str:\n return self.__raw\n\n def __repr__(self) -> str:\n return self.__raw\n\n def __eq__(self, other: Union[float, str]) -> bool:\n if isinstance(other, float):\n return self.major == other\n if isinstance(other, str):\n return [self.__first, self.__second, self.__third] == self.__parse_version(other)\n raise TypeError(\n f\"'==' not supported between instances of '{type(self).__name__}' and \\\n'{type(other).__name__}', only 'float' or 'str' is expected\"\n )\n\n def __gt__(self, other: Union[float, str]) -> bool:\n if isinstance(other, float):\n return self.major > other\n if isinstance(other, str):\n return [self.__first, self.__second, self.__third] > self.__parse_version(other)\n raise TypeError(\n f\"'>' not supported between instances of '{type(self).__name__}' and \\\n'{type(other).__name__}', only 'float' or 'str' is expected\"\n )\n\n def __lt__(self, other: Union[float, str]) -> bool:\n if isinstance(other, float):\n return self.major < other\n if isinstance(other, str):\n return [self.__first, self.__second, self.__third] < self.__parse_version(other)\n raise TypeError(\n f\"'<' not supported between instances of '{type(self).__name__}' and \\\n'{type(other).__name__}', only 'float' or 'str' is expected\"\n )\n\n def __ne__(self, other: Any) -> bool:\n return not self.__eq__(other)\n\n def __ge__(self, other: Any) -> bool:\n return not self.__lt__(other)\n\n def __le__(self, other: Any) -> bool:\n return not self.__gt__(other)"
},
{
"identifier": "ItemValue",
"path": "zabbix_utils/sender.py",
"snippet": "class ItemValue():\n \"\"\"Contains data of a single item value.\n\n Args:\n host (str): Specify host name the item belongs to (as registered in Zabbix frontend).\n key (str): Specify item key to send value to.\n value (str): Specify item value.\n clock (int, optional): Specify time in Unix timestamp format. Defaults to `None`.\n ns (int, optional): Specify time expressed in nanoseconds. Defaults to `None`.\n \"\"\"\n\n def __init__(self, host: str, key: str, value: str,\n clock: Union[int, None] = None, ns: Union[int, None] = None):\n self.host = str(host)\n self.key = str(key)\n self.value = str(value)\n self.clock = None\n self.ns = None\n\n if clock is not None:\n try:\n self.clock = int(clock)\n except ValueError:\n raise ValueError(\n 'The clock value must be expressed in the Unix Timestamp format') from None\n\n if ns is not None:\n try:\n self.ns = int(ns)\n except ValueError:\n raise ValueError(\n 'The ns value must be expressed in the integer value of nanoseconds') from None\n\n def __str__(self) -> str:\n return json.dumps(self.to_json(), ensure_ascii=False)\n\n def __repr__(self) -> str:\n return self.__str__()\n\n def to_json(self) -> dict:\n \"\"\"Represents ItemValue object in dictionary for json.\n\n Returns:\n dict: Object attributes in dictionary.\n \"\"\"\n\n return {k: v for k, v in self.__dict__.items() if v is not None}"
},
{
"identifier": "Sender",
"path": "zabbix_utils/sender.py",
"snippet": "class Sender():\n \"\"\"Zabbix sender implementation.\n\n Args:\n server (str, optional): Zabbix server address. Defaults to `'127.0.0.1'`.\n port (int, optional): Zabbix server port. Defaults to `10051`.\n use_config (bool, optional): Specifying configuration use. Defaults to `False`.\n timeout (int, optional): Connection timeout value. Defaults to `10`.\n use_ipv6 (bool, optional): Specifying IPv6 use instead of IPv4. Defaults to `False`.\n source_ip (str, optional): IP from which to establish connection. Defaults to `None`.\n chunk_size (int, optional): Number of packets in one chunk. Defaults to `250`.\n socket_wrapper (Callable, optional): Func(`conn`,`tls`) to wrap socket. Defaults to `None`.\n compression (bool, optional): Specifying compression use. Defaults to `False`.\n config_path (str, optional): Path to Zabbix agent configuration file. Defaults to \\\n`/etc/zabbix/zabbix_agentd.conf`.\n \"\"\"\n\n def __init__(self, server: str = '127.0.0.1', port: int = 10051,\n use_config: bool = False, timeout: int = 10, use_ipv6: bool = False,\n source_ip: Union[str, None] = None, chunk_size: int = 250,\n socket_wrapper: Union[Callable, None] = None, compression: bool = False,\n config_path: Union[str, None] = '/etc/zabbix/zabbix_agentd.conf'):\n self.timeout = timeout\n self.use_ipv6 = use_ipv6\n self.tls = {}\n\n self.source_ip = None\n self.chunk_size = chunk_size\n self.compression = compression\n\n if socket_wrapper is not None:\n if not isinstance(socket_wrapper, Callable):\n raise TypeError('Value \"socket_wrapper\" should be a function.')\n self.socket_wrapper = socket_wrapper\n\n if use_config:\n self.clusters = []\n self.__load_config(config_path)\n else:\n self.clusters = [Cluster(f\"{server}:{port}\")]\n\n if source_ip is not None:\n self.source_ip = source_ip\n\n def __read_config(self, config: configparser.SectionProxy) -> None:\n server_row = config.get('ServerActive') or config.get('Server') or '127.0.0.1:10051'\n\n for cluster in server_row.split(','):\n self.clusters.append(Cluster(cluster.strip()))\n\n if 'SourceIP' in config:\n self.source_ip = config.get('SourceIP')\n\n for key in config:\n if key.startswith('tls'):\n self.tls[key] = config.get(key)\n\n def __load_config(self, filepath: str) -> None:\n config = configparser.ConfigParser(strict=False)\n\n with open(filepath, 'r', encoding='utf-8') as cfg:\n config.read_string('[root]\\n' + cfg.read())\n self.__read_config(config['root'])\n\n def __get_response(self, conn: socket) -> Union[str, None]:\n try:\n result = json.loads(\n ZabbixProtocol.parse_packet(conn, log, ProcessingError)\n )\n except json.decoder.JSONDecodeError as err:\n log.debug('Unexpected response was received from Zabbix.')\n raise err\n\n log.debug('Received data: %s', result)\n\n return result\n\n def __create_request(self, items: list) -> dict:\n return {\n \"request\": \"sender data\",\n \"data\": [i.to_json() for i in items]\n }\n\n def __chunk_send(self, items: list) -> dict:\n responses = {}\n\n packet = ZabbixProtocol.create_packet(self.__create_request(items), log, self.compression)\n\n for cluster in self.clusters:\n active_node = None\n\n for i, node in enumerate(cluster.nodes):\n\n log.debug('Trying to send data to %s', node)\n\n try:\n if self.use_ipv6:\n connection = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n else:\n connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error:\n raise ProcessingError(f\"Error creating socket for {node}\") from None\n\n connection.settimeout(self.timeout)\n\n if self.source_ip:\n connection.bind((self.source_ip, 0,))\n\n try:\n connection.connect((node.address, node.port))\n except (TimeoutError, socket.timeout):\n log.debug(\n 'The connection to %s timed out after %d seconds',\n node,\n self.timeout\n )\n except (ConnectionRefusedError, socket.gaierror) as err:\n log.debug(\n 'An error occurred while trying to connect to %s: %s',\n node,\n getattr(err, 'msg', str(err))\n )\n else:\n if i > 0:\n cluster.nodes[0], cluster.nodes[i] = cluster.nodes[i], cluster.nodes[0]\n active_node = node\n break\n\n if active_node is None:\n log.error(\n 'Couldn\\'t connect to all of cluster nodes: %s',\n str(list(cluster.nodes))\n )\n connection.close()\n raise ProcessingError(\n f\"Couldn't connect to all of cluster nodes: {list(cluster.nodes)}\"\n )\n\n if self.socket_wrapper is not None:\n connection = self.socket_wrapper(connection, self.tls)\n\n try:\n connection.sendall(packet)\n except (TimeoutError, socket.timeout) as err:\n log.error(\n 'The connection to %s timed out after %d seconds while trying to send',\n active_node,\n self.timeout\n )\n connection.close()\n raise err\n except (OSError, socket.error) as err:\n log.warning(\n 'An error occurred while trying to send to %s: %s',\n active_node,\n getattr(err, 'msg', str(err))\n )\n connection.close()\n raise err\n\n try:\n response = self.__get_response(connection)\n except ConnectionResetError as err:\n log.debug('Get value error: %s', err)\n raise err\n log.debug('Response from %s: %s', active_node, response)\n\n if response and response.get('response') != 'success':\n raise socket.error(response)\n\n responses[active_node] = response\n\n try:\n connection.close()\n except socket.error:\n pass\n\n return responses\n\n def send(self, items: list, merge_responses: bool = True) -> dict:\n \"\"\"Sends packets and receives an answer from Zabbix.\n\n Args:\n items (list): List of ItemValue objects.\n merge_responses (bool, optional): Whether to merge all responses data \\\nto a single one. Defaults to `True`.\n\n Returns:\n dict: Dictionary of TrapperResponse objects for each Node object.\n \"\"\"\n\n result = {}\n\n if not all(isinstance(item, ItemValue) for item in items):\n log.debug('Received unexpected item list. It must be a list of ItemValue objects: %s',\n json.dumps(items))\n raise ProcessingError(f\"Received unexpected item list. \\\nIt must be a list of ItemValue objects: {json.dumps(items)}\")\n\n chunks = [items[i:i + self.chunk_size] for i in range(0, len(items), self.chunk_size)]\n for i, chunk in enumerate(chunks):\n\n resp_by_node = self.__chunk_send(chunk)\n\n for node, resp in resp_by_node.items():\n if merge_responses:\n if node not in result:\n result[node] = TrapperResponse()\n result[node].add(resp, i + 1)\n else:\n if node not in result:\n result[node] = []\n result[node].append(TrapperResponse(i+1).add(resp))\n\n return result\n\n def send_value(self, host: str, key: str,\n value: str, clock: Union[int, None] = None,\n ns: Union[int, None] = None, merge_responses: bool = True) -> dict:\n \"\"\"Sends one value and receives an answer from Zabbix.\n\n Args:\n host (str): Specify host name the item belongs to (as registered in Zabbix frontend).\n key (str): Specify item key to send value to.\n value (str): Specify item value.\n clock (int, optional): Specify time in Unix timestamp format. Defaults to `None`.\n ns (int, optional): Specify time expressed in nanoseconds. Defaults to `None`.\n merge_responses (bool, optional): Whether to merge all responses data \\\nto a single one. Defaults to `True`.\n\n Returns:\n dict: Dictionary of TrapperResponse object for each Node object.\n \"\"\"\n\n return self.send([ItemValue(host, key, value, clock, ns)], merge_responses)"
},
{
"identifier": "TrapperResponse",
"path": "zabbix_utils/sender.py",
"snippet": "class TrapperResponse():\n \"\"\"Contains response from Zabbix server/proxy.\n\n Args:\n chunk (int, optional): Current chunk number. Defaults to `1`.\n \"\"\"\n\n def __init__(self, chunk: int = 1):\n self.__processed = 0\n self.__failed = 0\n self.__total = 0\n self.__time = 0\n self.__chunk = chunk\n\n def __repr__(self) -> str:\n result = {}\n for key, value in self.__dict__.items():\n result[\n key[len(f\"_{self.__class__.__name__}__\"):]\n ] = str(value) if isinstance(value, Decimal) else value\n\n return json.dumps(result)\n\n def parse(self, response: dict) -> dict:\n \"\"\"Parse response from Zabbix.\n\n Args:\n response (dict): Raw response from Zabbix.\n\n Raises:\n ProcessingError: Raises if unexpected response received\n \"\"\"\n\n fields = {\n \"processed\": ('[Pp]rocessed', r'\\d+'),\n \"failed\": ('[Ff]ailed', r'\\d+'),\n \"total\": ('[Tt]otal', r'\\d+'),\n \"time\": ('[Ss]econds spent', r'\\d+\\.\\d+')\n }\n\n pattern = re.compile(\n r\";\\s+?\".join([rf\"{r[0]}:\\s+?(?P<{k}>{r[1]})\" for k, r in fields.items()])\n )\n\n info = response.get('info')\n if not info:\n log.debug('Received unexpected response: %s', response)\n raise ProcessingError(f\"Received unexpected response: {response}\")\n\n res = pattern.search(info).groupdict()\n\n return res\n\n def add(self, response: dict, chunk: Union[int, None] = None) -> Self:\n \"\"\"Add and merge response data from Zabbix.\n\n Args:\n response (dict): Raw response from Zabbix.\n chunk (Union[int, None], optional): Chunk number. Defaults to `None`.\n \"\"\"\n\n resp = self.parse(response)\n\n def add_value(cls, key, value):\n setattr(\n cls,\n key,\n getattr(cls, key) + value\n )\n\n for k, v in resp.items():\n add_value(\n self,\n f\"_{self.__class__.__name__}__{k}\",\n Decimal(v) if '.' in v else int(v)\n )\n if chunk is not None:\n self.__chunk = chunk\n\n return self\n\n @property\n def processed(self) -> int:\n \"\"\"Returns number of processed packets.\n\n Returns:\n int: Number of processed packets.\n \"\"\"\n\n return self.__processed\n\n @property\n def failed(self) -> int:\n \"\"\"Returns number of failed packets.\n\n Returns:\n int: Number of failed packets.\n \"\"\"\n\n return self.__failed\n\n @property\n def total(self) -> int:\n \"\"\"Returns total number of packets.\n\n Returns:\n int: Total number of packets.\n \"\"\"\n\n return self.__total\n\n @property\n def time(self) -> int:\n \"\"\"Returns value of spent time.\n\n Returns:\n int: Spent time for the packets sending.\n \"\"\"\n\n return self.__time\n\n @property\n def chunk(self) -> int:\n \"\"\"Returns current chunk number.\n\n Returns:\n int: Number of the current chunk.\n \"\"\"\n\n return self.__chunk"
},
{
"identifier": "APIRequestError",
"path": "zabbix_utils/exceptions.py",
"snippet": "class APIRequestError(ModuleBaseException):\n \"\"\"Exception class when Zabbix API returns error by request.\n\n Args:\n api_error (Union[str, dict]): Raw error message from Zabbix API.\n \"\"\"\n def __init__(self, api_error: Union[str, dict]):\n if isinstance(api_error, dict):\n api_error['body'] = ModuleUtils.hide_private(api_error['body'])\n super().__init__(\"{message} {data}\".format(**api_error))\n for key, value in api_error.items():\n setattr(self, key, value)\n else:\n super().__init__(api_error)"
},
{
"identifier": "APINotSupported",
"path": "zabbix_utils/exceptions.py",
"snippet": "class APINotSupported(ModuleBaseException):\n \"\"\"Exception class when object/action is not supported by Zabbix API.\n\n Args:\n message (str): Not supported object/action message.\n\n version (str): Current version of Zabbix API.\n \"\"\"\n\n def __init__(self, message: str, version: str = None):\n if version:\n message = f\"{message} is unsupported for Zabbix {version} version\"\n super().__init__(message)"
}
] | import sys
import time
import unittest
from zabbix_utils.getter import Getter
from zabbix_utils.api import ZabbixAPI, APIVersion
from zabbix_utils.sender import ItemValue, Sender, TrapperResponse
from zabbix_utils.exceptions import APIRequestError, APINotSupported | 9,353 | password=self.password
)
self.assertIsNotNone(self.zapi._ZabbixAPI__session_id, "Login by user and password was going wrong")
resp = self.zapi.user.checkAuthentication(sessionid=self.zapi._ZabbixAPI__session_id)
self.assertEqual(
type(resp), dict, "Request user.checkAuthentication was going wrong")
users = self.zapi.user.get(
output=['userid', 'name']
)
self.assertEqual(type(users), list, "Request user.get was going wrong")
self.zapi.logout()
self.assertIsNone(self.zapi._ZabbixAPI__session_id, "Logout was going wrong")
with self.assertRaises(APIRequestError,
msg="Request user.checkAuthentication after logout was going wrong"):
resp = self.zapi.user.checkAuthentication(sessionid=self.zapi._ZabbixAPI__session_id)
def test_token_auth(self):
"""Tests auth using token"""
with self.assertRaises(APINotSupported,
msg="Login by token should be not supported"):
self.zapi.login(token=self.token)
class CompatibilitySenderTest(unittest.TestCase):
"""Compatibility test with Zabbix sender version 5.0"""
def setUp(self):
self.ip = '127.0.0.1'
self.port = 10051
self.chunk_size = 10
self.sender = Sender(
server=self.ip,
port=self.port,
chunk_size=self.chunk_size
)
self.hostname = f"{self.__class__.__name__}_host"
self.itemname = f"{self.__class__.__name__}_item"
self.itemkey = f"{self.__class__.__name__}"
self.prepare_items()
def prepare_items(self):
"""Creates host and items for sending values later"""
zapi = ZabbixAPI(
url=ZABBIX_URL,
user=ZABBIX_USER,
password=ZABBIX_PASSWORD,
skip_version_check=True
)
hosts = zapi.host.get(
filter={'host': self.hostname},
output=['hostid']
)
hostid = None
if len(hosts) > 0:
hostid = hosts[0].get('hostid')
if not hostid:
hostid = zapi.host.create(
host=self.hostname,
interfaces=[{
"type": 1,
"main": 1,
"useip": 1,
"ip": "127.0.0.1",
"dns": "",
"port": "10050"
}],
groups=[{"groupid": "2"}]
)['hostids'][0]
self.assertIsNotNone(hostid, "Creating test host was going wrong")
items = zapi.item.get(
filter={'key_': self.itemkey},
output=['itemid']
)
itemid = None
if len(items) > 0:
itemid = items[0].get('itemid')
if not itemid:
itemid = zapi.item.create(
name=self.itemname,
key_=self.itemkey,
hostid=hostid,
type=2,
value_type=3
)['itemids'][0]
time.sleep(2)
self.assertIsNotNone(hostid, "Creating test item was going wrong")
zapi.logout()
def test_send_values(self):
"""Tests sending item values"""
items = [
ItemValue(self.hostname, self.itemkey, 10),
ItemValue(self.hostname, self.itemkey, 'test message'),
ItemValue(self.hostname, 'item_key1', -1, 1695713666),
ItemValue(self.hostname, 'item_key2', '{"msg":"test message"}'),
ItemValue(self.hostname, self.itemkey, 0, 1695713666, 100),
ItemValue(self.hostname, self.itemkey, 5.5, 1695713666)
]
resp = list(self.sender.send(items).values())[0]
| #!/usr/bin/env python
# Copyright (C) 2001-2023 Zabbix SIA
#
# Zabbix SIA licenses this file under the MIT License.
# See the LICENSE file in the project root for more information.
sys.path.append('.')
ZABBIX_URL = 'localhost'
ZABBIX_USER = 'Admin'
ZABBIX_PASSWORD = 'zabbix'
class CompatibilityAPITest(unittest.TestCase):
"""Compatibility test with Zabbix API version 5.0"""
def setUp(self):
self.url = 'localhost'
self.user = 'Admin'
self.password = 'zabbix'
self.token = 'token'
self.zapi = ZabbixAPI(
url=self.url
)
def test_classic_auth(self):
"""Tests classic auth using username and password"""
self.assertEqual(
type(self.zapi), ZabbixAPI, "Creating ZabbixAPI object was going wrong")
self.assertEqual(
type(self.zapi.api_version()), APIVersion, "Version getting was going wrong")
self.zapi.login(
user=self.user,
password=self.password
)
self.assertIsNotNone(self.zapi._ZabbixAPI__session_id, "Login by user and password was going wrong")
resp = self.zapi.user.checkAuthentication(sessionid=self.zapi._ZabbixAPI__session_id)
self.assertEqual(
type(resp), dict, "Request user.checkAuthentication was going wrong")
users = self.zapi.user.get(
output=['userid', 'name']
)
self.assertEqual(type(users), list, "Request user.get was going wrong")
self.zapi.logout()
self.assertIsNone(self.zapi._ZabbixAPI__session_id, "Logout was going wrong")
with self.assertRaises(APIRequestError,
msg="Request user.checkAuthentication after logout was going wrong"):
resp = self.zapi.user.checkAuthentication(sessionid=self.zapi._ZabbixAPI__session_id)
def test_token_auth(self):
"""Tests auth using token"""
with self.assertRaises(APINotSupported,
msg="Login by token should be not supported"):
self.zapi.login(token=self.token)
class CompatibilitySenderTest(unittest.TestCase):
"""Compatibility test with Zabbix sender version 5.0"""
def setUp(self):
self.ip = '127.0.0.1'
self.port = 10051
self.chunk_size = 10
self.sender = Sender(
server=self.ip,
port=self.port,
chunk_size=self.chunk_size
)
self.hostname = f"{self.__class__.__name__}_host"
self.itemname = f"{self.__class__.__name__}_item"
self.itemkey = f"{self.__class__.__name__}"
self.prepare_items()
def prepare_items(self):
"""Creates host and items for sending values later"""
zapi = ZabbixAPI(
url=ZABBIX_URL,
user=ZABBIX_USER,
password=ZABBIX_PASSWORD,
skip_version_check=True
)
hosts = zapi.host.get(
filter={'host': self.hostname},
output=['hostid']
)
hostid = None
if len(hosts) > 0:
hostid = hosts[0].get('hostid')
if not hostid:
hostid = zapi.host.create(
host=self.hostname,
interfaces=[{
"type": 1,
"main": 1,
"useip": 1,
"ip": "127.0.0.1",
"dns": "",
"port": "10050"
}],
groups=[{"groupid": "2"}]
)['hostids'][0]
self.assertIsNotNone(hostid, "Creating test host was going wrong")
items = zapi.item.get(
filter={'key_': self.itemkey},
output=['itemid']
)
itemid = None
if len(items) > 0:
itemid = items[0].get('itemid')
if not itemid:
itemid = zapi.item.create(
name=self.itemname,
key_=self.itemkey,
hostid=hostid,
type=2,
value_type=3
)['itemids'][0]
time.sleep(2)
self.assertIsNotNone(hostid, "Creating test item was going wrong")
zapi.logout()
def test_send_values(self):
"""Tests sending item values"""
items = [
ItemValue(self.hostname, self.itemkey, 10),
ItemValue(self.hostname, self.itemkey, 'test message'),
ItemValue(self.hostname, 'item_key1', -1, 1695713666),
ItemValue(self.hostname, 'item_key2', '{"msg":"test message"}'),
ItemValue(self.hostname, self.itemkey, 0, 1695713666, 100),
ItemValue(self.hostname, self.itemkey, 5.5, 1695713666)
]
resp = list(self.sender.send(items).values())[0]
| self.assertEqual(type(resp), TrapperResponse, "Sending item values was going wrong") | 5 | 2023-10-16 12:49:35+00:00 | 12k |
YefanZhou/TempBalance | main_tb.py | [
{
"identifier": "Tempbalance",
"path": "tempbalance.py",
"snippet": "class Tempbalance(object):\n def __init__(self, \n net, \n EVALS_THRESH=0.00001,\n bins=100, \n conv_norm=0.5,\n pl_fitting='median',\n xmin_pos=2,\n filter_zeros=False,\n remove_first_layer=True,\n remove_last_layer=True,\n eigs_thresh=50,\n esd_metric_for_tb='alpha',\n assign_func='tb_linear_map',\n lr_min_ratio=0.5,\n lr_max_ratio=1.5,\n batchnorm=True,\n batchnorm_type='name',\n layernorm=False\n ):\n \"\"\"init function\n Args:\n net (nn.module): net to train\n EVALS_THRESH (float, ): threshold to filter small eigenvalue. Defaults to 0.00001.\n bins (int, int): ESD bins. Defaults to 100.\n conv_norm (float, ): conv norm. Defaults to 0.5.\n pl_fitting (str, ): powerlaw fitting method. Defaults to median, ['median', 'goodness-of-fit', 'fix-finger']\n xmin_pos (int, ): set the position of minimum eigenvalue in the tail. Defaults to 2.\n filter_zeros (bool, ): filter small eigenvalues or not. Defaults to False.\n remove_first_layer (bool, ): whether exclude first layer in TB. Defaults to True.\n remove_last_layer (bool, ): whether exclude last layer in TB. Defaults to True.\n esd_metric_for_tb (str, ): metric for TB scheduling. Defaults to 'alpha'.\n assign_func (str, ): learning rate assignment function. Defaults to 'tb_linear_map'.\n lr_min_ratio (float, ): learning rate lower bound. Defaults to 0.5.\n lr_max_ratio (float, ): learning rate upper bound. Defaults to 1.5.\n batchnorm (bool, ): whether adjust batch norm learning rate using TB. Defaults to True.\n batchnorm_type (str, ): how to set learning rate for batchnorm layers\n layernorm (bool, ): whether adjust layer norm learning rate using TB. Defaults to True.\n \"\"\"\n self.net = net\n self.EVALS_THRESH = EVALS_THRESH\n self.bins = bins\n self.conv_norm = conv_norm\n self.pl_fitting = pl_fitting\n self.xmin_pos = xmin_pos\n self.filter_zeros = filter_zeros\n self.remove_first_layer = remove_first_layer\n self.remove_last_layer = remove_last_layer\n self.eigs_thresh = eigs_thresh\n self.esd_metric_for_tb = esd_metric_for_tb\n self.assign_func = assign_func\n self.lr_min_ratio = lr_min_ratio\n self.lr_max_ratio = lr_max_ratio\n self.batchnorm = batchnorm\n self.layernorm = layernorm\n self.bn_to_conv = {}\n self.ln_to_linear = {}\n # print('EVALS_THRESH', self.EVALS_THRESH, type(self.EVALS_THRESH) )\n # print('bins', self.bins, type(self.bins) )\n # print('conv_norm', self.conv_norm, type(self.conv_norm) )\n # print('pl_fitting', self.pl_fitting, type(self.pl_fitting) )\n # print('xmin_pos', self.xmin_pos, type(self.xmin_pos) )\n # print('filter_zeros', self.filter_zeros, type(self.filter_zeros) )\n # print('remove_first_layer', self.remove_first_layer, type(self.remove_first_layer) )\n # print('remove_last_layer', self.remove_last_layer, type(self.remove_last_layer) )\n # print('esd_metric_for_tb', self.esd_metric_for_tb, type(self.esd_metric_for_tb) )\n # print('assign_func', self.assign_func, type(self.assign_func) )\n # print('lr_min_ratio', self.lr_min_ratio, type(self.lr_min_ratio) )\n # print('lr_max_ratio', self.lr_max_ratio, type(self.lr_max_ratio) )\n # print('batchnorm', self.batchnorm, type(self.batchnorm) )\n \n if batchnorm and batchnorm_type == 'name':\n # let the batch norm layer change lr corresponding to the layer\n # with the same layer name \n longname_lst = []\n for name, m in self.net.named_modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n longname_lst.append(name)\n for name, module in self.net.named_modules():\n if isinstance(module, nn.BatchNorm2d) \\\n and name.replace('bn', 'conv') in longname_lst:\n self.bn_to_conv[name] = name.replace('bn', 'conv')\n \n elif batchnorm and batchnorm_type == 'order':\n # let the batch norm layer change lr corresponding to the \n # conv layer before current layer\n longname_lst = []\n type_lst = []\n for name, module in self.net.named_modules():\n if isinstance(module, nn.Conv2d):\n longname_lst.append(name)\n type_lst.append('nn.Conv2d')\n if isinstance(module, nn.BatchNorm2d):\n if type_lst[-1] == 'nn.Conv2d':\n self.bn_to_conv[name] = longname_lst[-1]\n longname_lst.append(name)\n type_lst.append('nn.BatchNorm2d')\n \n if self.layernorm:\n longname_lst = []\n type_lst = []\n for name, module in self.net.named_modules():\n if isinstance(module, nn.Linear):\n longname_lst.append(name)\n type_lst.append('nn.Linear')\n if isinstance(module, nn.LayerNorm):\n if type_lst[-1] == 'nn.Linear':\n self.ln_to_linear[name] = longname_lst[-1]\n longname_lst.append(name)\n type_lst.append('nn.LayerNorm')\n \n \n def build_optimizer_param_group(self, untuned_lr=0.1, initialize=True):\n \"\"\"build the parameter group for optimizer\n\n Args:\n untuned_lr (float, ): global learning rate that is not tuned. Defaults to 0.1.\n initialize (bool, ): if True, build a list of dictionary, if False, build a list of learning rate . Defaults to True.\n\n Returns:\n _type_: _description_\n \"\"\"\n metrics = self.net_esd_estimator()\n layer_stats = pd.DataFrame({key:metrics[key] for key in metrics if key!='eigs'})\n \n if self.remove_first_layer:\n layer_stats = layer_stats.drop(labels=0, axis=0)\n # index must be reset otherwise may delete the wrong row \n layer_stats.index = list(range(len(layer_stats[self.esd_metric_for_tb])))\n if self.remove_last_layer:\n layer_stats = layer_stats.drop(labels=len(layer_stats) - 1, axis=0)\n # index must be reset otherwise may delete the wrong row \n layer_stats.index = list(range(len(layer_stats[self.esd_metric_for_tb])))\n \n # remove layers with number of eigs less than a threshold\n layer_stats = layer_stats[layer_stats['eigs_num'] >= self.eigs_thresh]\n layer_stats.index = list(range(len(layer_stats[self.esd_metric_for_tb])))\n \n metric_scores = np.array(layer_stats[self.esd_metric_for_tb])\n scheduled_lr = self.get_layer_temps(assign_func=self.assign_func, \n metric_scores=metric_scores, \n untuned_lr=untuned_lr)\n \n layer_stats['scheduled_lr'] = scheduled_lr\n layer_name_to_tune = list(layer_stats['longname'])\n opt_params_groups = []\n params_to_tune_ids = []\n layer_count = 0\n # these params should be tuned\n for name, module in self.net.named_modules():\n \n # these are the conv layers analyzed by ESD\n if name in layer_name_to_tune:\n params_to_tune_ids += list(map(id, module.parameters()))\n scheduled_lr = layer_stats[layer_stats['longname'] == name]['scheduled_lr'].item()\n if initialize:\n # append a dictionary for initialize optimizer\n opt_params_groups.append({'params': module.parameters(), 'lr': scheduled_lr})\n else:\n # append tuned learning rate \n opt_params_groups.append(scheduled_lr)\n layer_count += 1\n # decide should we tune the batch norm accordingly\n elif self.batchnorm \\\n and isinstance(module, nn.BatchNorm2d) \\\n and name in self.bn_to_conv \\\n and self.bn_to_conv[name] in layer_name_to_tune:\n \n params_to_tune_ids += list(map(id, module.parameters()))\n scheduled_lr = layer_stats[layer_stats['longname'] == self.bn_to_conv[name]]['scheduled_lr'].item()\n if initialize:\n # append a dictionary for initialize optimizer\n opt_params_groups.append({'params': module.parameters(), 'lr': scheduled_lr})\n else:\n # append tuned learning rate \n opt_params_groups.append(scheduled_lr)\n layer_count += 1\n \n elif self.layernorm \\\n and isinstance(module, nn.LayerNorm) \\\n and name in self.ln_to_linear \\\n and self.ln_to_linear[name] in layer_name_to_tune:\n \n params_to_tune_ids += list(map(id, module.parameters()))\n scheduled_lr = layer_stats[layer_stats['longname'] == self.ln_to_linear[name]]['scheduled_lr'].item()\n if initialize:\n opt_params_groups.append({'params': module.parameters(), 'lr': scheduled_lr})\n else:\n opt_params_groups.append(scheduled_lr)\n layer_count += 1\n \n if initialize:\n # those params are untuned\n untuned_params = \\\n filter(lambda p: id(p) not in params_to_tune_ids, self.net.parameters())\n opt_params_groups.append({'params': untuned_params, 'lr': untuned_lr}) \n return opt_params_groups, layer_count\n else:\n return opt_params_groups, layer_count\n \n def step(self, optimizer, untuned_lr):\n opt_params_groups, layer_count = \\\n self.build_optimizer_param_group(untuned_lr=untuned_lr, initialize=False)\n for index, param_group in enumerate(optimizer.param_groups):\n if index <= layer_count - 1:\n param_group['lr'] = opt_params_groups[index]\n else:\n param_group['lr'] = untuned_lr\n \n def net_esd_estimator(\n self,\n verbose=False):\n \"\"\"evaluate the ESD of the conv nets\n Args:\n verbose: \n Returns:\n _type_: _description_\n \"\"\"\n results = {\n 'alphahat':[],\n 'alpha':[],\n 'spectral_norm': [],\n 'D': [],\n 'longname':[],\n 'eigs':[],\n 'norm':[],\n 'eigs_num':[]\n }\n if verbose:\n print(\"=================================\")\n print(f\"pl_fitting: {self.pl_fitting}, xmin_pos: {self.xmin_pos}, conv_norm: {self.conv_norm}, filter_zeros: {self.filter_zeros}\")\n print(\"=================================\")\n # iterate through layers\n for name, m in self.net.named_modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n matrix = m.weight.data.clone()\n # normalization and tranpose Conv2d\n if isinstance(m, nn.Conv2d):\n matrix = torch.flatten(matrix, start_dim=2) * math.sqrt(self.conv_norm)\n matrix = matrix.transpose(1, 2).transpose(0, 1)\n eigs = torch.square(torch.linalg.svdvals(matrix).flatten())\n # ascending order \n eigs, _ = torch.sort(eigs, descending=False)\n spectral_norm = eigs[-1].item()\n fnorm = torch.sum(eigs).item()\n \n if self.filter_zeros:\n nz_eigs = eigs[eigs > self.EVALS_THRESH]\n N = len(nz_eigs)\n # somethines N may equal 0, if that happens, we don't filter eigs\n if N == 0:\n nz_eigs = eigs\n N = len(nz_eigs)\n else:\n nz_eigs = eigs\n N = len(nz_eigs)\n\n log_nz_eigs = torch.log(nz_eigs)\n\n if self.pl_fitting == 'median':\n i = int(len(nz_eigs) / self.xmin_pos) \n xmin = nz_eigs[i]\n n = float(N - i)\n seq = torch.arange(n).cuda()\n final_alpha = 1 + n / (torch.sum(log_nz_eigs[i:]) - n * log_nz_eigs[i])\n final_D = torch.max(torch.abs(\n 1 - (nz_eigs[i:] / xmin) ** (-final_alpha + 1) - seq / n \n ))\n else:\n alphas = torch.zeros(N-1)\n Ds = torch.ones(N-1)\n if self.pl_fitting == 'fix-finger':\n hist_nz_eigs = torch.log10(nz_eigs)\n min_e, max_e = hist_nz_eigs.min(), hist_nz_eigs.max()\n counts = torch.histc(hist_nz_eigs, self.bins, min=min_e, max=max_e)\n boundaries = torch.linspace(min_e, max_e, self.bins + 1)\n h = counts, boundaries\n ih = torch.argmax(h[0]) # \n xmin2 = 10 ** h[1][ih]\n xmin_min = torch.log10(0.95 * xmin2)\n xmin_max = 1.5 * xmin2\n \n for i, xmin in enumerate(nz_eigs[:-1]):\n if self.pl_fitting == 'fix-finger':\n if xmin < xmin_min:\n continue\n if xmin > xmin_max:\n break\n\n n = float(N - i)\n seq = torch.arange(n).cuda()\n alpha = 1 + n / (torch.sum(log_nz_eigs[i:]) - n * log_nz_eigs[i])\n alphas[i] = alpha\n if alpha > 1:\n Ds[i] = torch.max(torch.abs(\n 1 - (nz_eigs[i:] / xmin) ** (-alpha + 1) - seq / n \n ))\n\n min_D_index = torch.argmin(Ds)\n final_alpha = alphas[min_D_index]\n final_D = Ds[min_D_index]\n \n final_alpha = final_alpha.item()\n final_D = final_D.item()\n final_alphahat=final_alpha*math.log10(spectral_norm)\n\n results['spectral_norm'].append(spectral_norm)\n results['alphahat'].append(final_alphahat)\n results['norm'].append(fnorm)\n results['alpha'].append(final_alpha)\n results['D'].append(final_D)\n results['longname'].append(name)\n results['eigs'].append(eigs.detach().cpu().numpy())\n results['eigs_num'].append(len(eigs))\n \n return results\n \n \n def get_layer_temps(self, assign_func, metric_scores, untuned_lr):\n n = len(metric_scores)\n idx = [i for i in range(n)]\n temps = np.array([untuned_lr] * n)\n \n if assign_func == 'tb_linear_map':\n lr_range = [self.lr_min_ratio * untuned_lr, self.lr_max_ratio * untuned_lr]\n score_range = [min(metric_scores), max(metric_scores)]\n temps = np.interp(metric_scores, score_range, lr_range)\n\n elif assign_func == 'tb_sqrt':\n temps = np.sqrt(metric_scores)/np.sum(np.sqrt(metric_scores)) * n * untuned_lr\n\n elif assign_func == 'tb_log2':\n temps = np.log2(metric_scores)/np.sum(np.log2(metric_scores)) * n * untuned_lr\n\n elif assign_func == 'tb_step':\n idxes = np.argsort(metric_scores)\n unsort_temps = [untuned_lr * (self.lr_min_ratio + (self.lr_max_ratio - self.lr_min_ratio) * i / n) for i in range(n)]\n temps = [value for _, value in sorted(list(zip(idxes, unsort_temps)), key=itemgetter(0))]\n \n else:\n raise NotImplementedError\n \n return temps"
},
{
"identifier": "SGDSNR",
"path": "sgdsnr.py",
"snippet": "class SGDSNR(Optimizer):\n r\"\"\"Implements stochastic gradient descent (optionally with momentum).\n\n Nesterov momentum is based on the formula from\n `On the importance of initialization and momentum in deep learning`__.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): learning rate\n momentum (float, optional): momentum factor (default: 0)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n dampening (float, optional): dampening for momentum (default: 0)\n nesterov (bool, optional): enables Nesterov momentum (default: False)\n\n Example:\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n\n __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf\n\n .. note::\n The implementation of SGD with Momentum/Nesterov subtly differs from\n Sutskever et. al. and implementations in some other frameworks.\n\n Considering the specific case of Momentum, the update can be written as\n\n .. math::\n \\begin{aligned}\n v_{t+1} & = \\mu * v_{t} + g_{t+1}, \\\\\n p_{t+1} & = p_{t} - \\text{lr} * v_{t+1},\n \\end{aligned}\n\n where :math:`p`, :math:`g`, :math:`v` and :math:`\\mu` denote the\n parameters, gradient, velocity, and momentum respectively.\n\n This is in contrast to Sutskever et. al. and\n other frameworks which employ an update of the form\n\n .. math::\n \\begin{aligned}\n v_{t+1} & = \\mu * v_{t} + \\text{lr} * g_{t+1}, \\\\\n p_{t+1} & = p_{t} - v_{t+1}.\n \\end{aligned}\n\n The Nesterov version is analogously modified.\n \"\"\"\n\n def __init__(self, params, lr=required, \n momentum=0, dampening=0,\n weight_decay=0, nesterov=False, \n spectrum_regularization=0, \n differentiable: bool = False):\n if lr is not required and lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening,\n weight_decay=weight_decay, nesterov=nesterov, \n spectrum_regularization=spectrum_regularization)\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n super(SGDSNR, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(SGDSNR, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('nesterov', False)\n group.setdefault('spectrum_regularization', 0)\n \n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n spectrum_regularization = group['spectrum_regularization']\n \n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad\n\n if weight_decay != 0:\n d_p = d_p.add(p, alpha=weight_decay)\n else:\n if spectrum_regularization != 0:\n #print(\"spectrum_regularization\", spectrum_regularization, p.dim())\n if p.dim() > 1:\n d_p = d_p.add(torch.reshape(compute_weight(p.to(device)), p.shape),\n alpha=spectrum_regularization)\n else:\n pass\n else:\n pass\n\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(d_p, alpha=1 - dampening)\n if nesterov:\n d_p = d_p.add(buf, alpha=momentum)\n else:\n d_p = buf\n\n p.add_(d_p, alpha=-group['lr'])\n\n return loss"
},
{
"identifier": "LARS",
"path": "lars_optim/lars.py",
"snippet": "class LARS(Optimizer):\n def __init__(self, params, lr=required, momentum=0, weight_decay=0, eeta=0.001, epsilon=1e-5):\n if lr is not required and lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\n \"Invalid weight_decay value: {}\".format(weight_decay))\n if eeta <= 0 or eeta > 1:\n raise ValueError(\"Invalid eeta value: {}\".format(eeta))\n if epsilon <= 0:\n raise ValueError(\"Invalid epsilon value: {}\".format(epsilon))\n defaults = dict(lr=lr, momentum=momentum,\n weight_decay=weight_decay, eeta=eeta, epsilon=epsilon, lars=True)\n\n super().__init__(params, defaults)\n\n @torch.no_grad()\n def step(self, closure=None):\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n eeta = group['eeta']\n lr = group['lr']\n lars = group['lars']\n eps = group['epsilon']\n\n for p in group['params']:\n if p.grad is None:\n continue\n decayed_grad = p.grad\n scaled_lr = lr\n if lars:\n w_norm = torch.norm(p)\n g_norm = torch.norm(p.grad)\n trust_ratio = torch.where(\n w_norm > 0 and g_norm > 0,\n eeta * w_norm / (g_norm + weight_decay * w_norm + eps),\n torch.ones_like(w_norm)\n )\n trust_ratio.clamp_(0.0, 50)\n scaled_lr *= trust_ratio.item()\n if weight_decay != 0:\n decayed_grad = decayed_grad.add(p, alpha=weight_decay)\n decayed_grad = torch.clamp(decayed_grad, -10.0, 10.0)\n\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.clone(\n decayed_grad).detach()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(decayed_grad)\n decayed_grad = buf\n\n p.add_(decayed_grad, alpha=-scaled_lr)\n\n return loss"
},
{
"identifier": "LAMB",
"path": "lars_optim/lamb.py",
"snippet": "class LAMB(Optimizer):\n r\"\"\"Implements Lamb algorithm.\n It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n adam (bool, optional): always use trust ratio = 1, which turns this into\n Adam. Useful for comparison purposes.\n .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:\n https://arxiv.org/abs/1904.00962\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,\n weight_decay=0, bias_correction=False):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\n \"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\n \"Invalid beta parameter at index 1: {}\".format(betas[1]))\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay, bias_correction=bias_correction)\n super().__init__(params, defaults)\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n torch.nn.utils.clip_grad_norm_(\n parameters=[\n p for group in self.param_groups for p in group['params']],\n max_norm=1.0,\n norm_type=2\n )\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\n 'Lamb does not support sparse gradients, consider SparseAdam instad.')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n # Decay the first and second moment running average coefficient\n # m_t\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n # v_t\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\n # Paper v3 does not use debiasing.\n # bias_correction1 = 1 - beta1 ** state['step']\n # bias_correction2 = 1 - beta2 ** state['step']\n # Apply bias to lr to avoid broadcast.\n # * math.sqrt(bias_correction2) / bias_correction1\n scaled_lr = group['lr']\n if group['bias_correction']:\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n exp_avg.div_(bias_correction1)\n exp_avg_sq.div_(bias_correction2)\n update = exp_avg / exp_avg_sq.sqrt().add(group['eps'])\n if group['weight_decay'] != 0:\n update.add_(p.data, alpha=group['weight_decay'])\n w_norm = torch.norm(p)\n g_norm = torch.norm(update)\n trust_ratio = torch.where(\n w_norm > 0 and g_norm > 0,\n w_norm / g_norm,\n torch.ones_like(w_norm)\n )\n scaled_lr *= trust_ratio.item()\n\n p.data.add_(update, alpha=-scaled_lr)\n\n return loss"
},
{
"identifier": "train",
"path": "utils.py",
"snippet": "def train( epoch, \n net, \n num_epochs, \n trainloader, \n criterion, \n optimizer, \n optim_type='SGD', \n tb_update_interval=0, \n untuned_lr=0, \n args=None):\n \n net.train()\n net.training = True\n train_loss = 0\n correct = 0\n total = 0 \n print(f'Training Epoch {epoch}')\n pbar = tqdm.tqdm(total=len(trainloader), desc=\"Training\")\n\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs, targets = inputs.cuda(), targets.cuda() # GPU settings\n optimizer.zero_grad()\n outputs = net(inputs) # Forward Propagation\n loss = criterion(outputs, targets) # Loss\n loss.backward() # Backward Propagation\n optimizer.step() # Optimizer update\n\n train_loss += loss.item() * targets.size(0)\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n pbar.update(1)\n \n # if tb_update_interval > 0 and args.total_iters % tb_update_interval == 0:\n # print(f\"--------------------> tb_update_interval: {tb_update_interval}, temp_balance\")\n # temp_balance(args=args, net=net, optimizer=optimizer, epoch=epoch, untuned_lr=untuned_lr, iters=args.total_iters)\n \n # if tb_update_interval > 0:\n # args.total_iters += 1\n \n pbar.close()\n train_loss /= total\n acc = 100.*correct/total\n acc = acc.item()\n\n return acc, train_loss"
},
{
"identifier": "test",
"path": "utils.py",
"snippet": "def test(epoch, net, testloader, criterion):\n net.eval()\n net.training = False\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.cuda(), targets.cuda()\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n\n test_loss += loss.item() * targets.size(0)\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n\n # Save checkpoint when best model\n acc = 100.*correct/total\n acc = acc.item()\n test_loss = test_loss/total\n \n return acc, test_loss"
},
{
"identifier": "getNetwork",
"path": "utils.py",
"snippet": "def getNetwork(args, num_classes):\n if args.net_type == 'vgg_cifar':\n net = VGG_cifar(args.depth, num_classes, args.widen_factor)\n file_name = 'vgg_cifar'\n elif args.net_type == 'resnet':\n net = ResNet(args.depth, num_classes, args.widen_factor)\n file_name = 'resnet'\n elif args.net_type == 'resnet_tiny_imagenet':\n net = ResNet_tiny_imagenet(args.depth, num_classes=num_classes)\n file_name = 'resnet_tiny_imagenet'\n elif args.net_type == 'wide_resnet':\n net = Wide_ResNet(depth=args.depth, \n widen_factor=args.widen_factor, \n num_classes=num_classes)\n file_name = 'wide_resnet'\n \n return net, file_name"
},
{
"identifier": "save_args_to_file",
"path": "utils.py",
"snippet": "def save_args_to_file(args, output_file_path):\n with open(output_file_path, \"w\") as output_file:\n json.dump(vars(args), output_file, indent=4)"
}
] | import os
import sys
import time
import argparse
import random
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import numpy as np
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import config as cf
import torch_optimizer
from pathlib import Path
from os.path import join
from tempbalance import Tempbalance
from sgdsnr import SGDSNR
from adamp import SGDP, AdamP
from lars_optim import LARS, LAMB
from utils import train, test, getNetwork, save_args_to_file | 10,120 |
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
])
data_path = join(args.datadir, args.dataset)
if(args.dataset == 'cifar10'):
print("| Preparing CIFAR-10 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR10(root=data_path, train=True,
download=True,
transform=transform_train)
testset = torchvision.datasets.CIFAR10(root=data_path, train=False,
download=False,
transform=transform_test)
num_classes = 10
elif(args.dataset == 'cifar100'):
print("| Preparing CIFAR-100 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR100(root=data_path, train=True,
download=True,
transform=transform_train)
testset = torchvision.datasets.CIFAR100(root=data_path, train=False,
download=False,
transform=transform_test)
num_classes = 100
elif(args.dataset == 'svhn'):
print("| Preparing SVHN dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.SVHN(root=data_path,
split='train',
download=True,
transform=transform_train)
testset = torchvision.datasets.SVHN(root=data_path,
split='test',
download=True,
transform=transform_test)
num_classes = 10
elif(args.dataset == 'tiny-imagenet-200'):
print("| Preparing tiny-imagenet-200 dataset...")
sys.stdout.write("| ")
trainset = datasets.ImageFolder(os.path.join(data_path, 'train'), transform_train)
testset = datasets.ImageFolder(os.path.join(data_path, 'val'), transform_test)
num_classes = 200
else:
raise NotImplementedError
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size,
shuffle=True,
num_workers=6)
testloader = torch.utils.data.DataLoader(testset,
batch_size=cf.eval_batchsize[args.dataset],
shuffle=False,
num_workers=4)
Path(args.ckpt_path).mkdir(parents=True, exist_ok=True)
if args.print_tofile:
# Open files for stdout and stderr redirection
stdout_file = open(os.path.join(args.ckpt_path, 'stdout.log'), 'w')
stderr_file = open(os.path.join(args.ckpt_path, 'stderr.log'), 'w')
# Redirect stdout and stderr to the files
sys.stdout = stdout_file
sys.stderr = stderr_file
# Model
print('\n[Phase 2] : Model setup')
if args.resume:
# Load checkpoint
print('| Resuming from checkpoint...')
net, file_name = getNetwork(args, num_classes)
checkpoint = torch.load(args.resume, map_location='cpu')
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['test_acc']
start_epoch = checkpoint['epoch']
print(f"Loaded Epoch: {start_epoch} \n Test Acc: {best_acc:.3f} Train Acc: {checkpoint['train_acc']:.3f}")
else:
print('| Building net type [' + args.net_type + ']...')
net, file_name = getNetwork(args, num_classes)
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
best_acc = 0
if use_cuda:
net.cuda()
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
print(net)
if args.use_tb:
print("##############Enable and init Temp Balancing##################")
tb_scheduler = Tempbalance(net=net,
pl_fitting=args.pl_fitting,
xmin_pos=args.xmin_pos,
filter_zeros=args.filter_zeros,
remove_first_layer=args.remove_first_layer,
remove_last_layer=args.remove_last_layer,
esd_metric_for_tb=args.esd_metric_for_tb,
assign_func=args.assign_func,
lr_min_ratio=args.lr_min_ratio,
lr_max_ratio=args.lr_max_ratio,
batchnorm=args.batchnorm,
batchnorm_type=args.batchnorm_type
)
tb_param_group, _ = \
tb_scheduler.build_optimizer_param_group(untuned_lr=args.lr, initialize=True)
if args.optim_type == 'SGD':
optimizer = optim.SGD(tb_param_group,
momentum=0.9,
weight_decay=args.weight_decay)
elif args.optim_type == 'SGDSNR':
| from __future__ import print_function
parser = argparse.ArgumentParser(description='PyTorch CIFAR-10 Training')
parser.add_argument('--lr', type=float, default=0.01, help='learning_rate')
parser.add_argument('--net-type', type=str, default='wide-resnet', help='model')
parser.add_argument('--depth', type=int, default=28, help='depth of model')
parser.add_argument('--num-epochs', type=int, default=200, help='number of epochs')
parser.add_argument('--widen-factor', type=float, default=1, help='width of model')
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset = [cifar10/cifar100]')
parser.add_argument('--lr-sche', type=str, default='cosine', choices=['cosine'])
parser.add_argument('--weight-decay', type=float, default=1e-4) # 5e-4
parser.add_argument('--ckpt-path', type=str, default='', help='path to checkpoints')
parser.add_argument('--print-tofile', default=False, type=lambda x: (str(x).lower() == 'true'), help='print to file')
parser.add_argument('--batch-size', type=int, default=128) # 5e-4
parser.add_argument('--datadir', type=str, default='', help='directory of dataset')
parser.add_argument('--optim-type', type=str, default='SGD', help='type of optimizer')
parser.add_argument('--resume', type=str, default='', help='resume from checkpoint')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--ww-interval', type=int, default=1)
parser.add_argument('--epochs-to-save', type=int, nargs='+', default=[])
parser.add_argument('--pl-fitting', type=str, default='median', choices=['median', 'goodness-of-fit', 'fix-finger'])
# temperature balance related
parser.add_argument('--use-tb', default=True, type=lambda x: (str(x).lower() == 'true'), help='use temp balance')
parser.add_argument('--remove-last-layer', default=True, type=lambda x: (str(x).lower() == 'true'), help='if remove the last layer')
parser.add_argument('--remove-first-layer', default=True, type=lambda x: (str(x).lower() == 'true'), help='if remove the first layer')
parser.add_argument('--batchnorm', default=True, type=lambda x: (str(x).lower() == 'true'), help='balancing batch norm layer')
parser.add_argument('--filter-zeros', default=False, type=lambda x: (str(x).lower() == 'true') )
parser.add_argument('--esd-metric-for-tb', type=str, default='alpha', help='ww metric')
parser.add_argument('--assign-func', type=str, default='', help='assignment function for layerwise lr')
parser.add_argument('--lr-min-ratio', type=float, default=0.5)
parser.add_argument('--lr-max-ratio', type=float, default=1.5)
parser.add_argument('--xmin-pos', type=float, default=2, help='xmin_index = size of eigs // xmin_pos')
parser.add_argument('--batchnorm-type', type=str, default='name', help='method to change batchnorm layer learning rate')
parser.add_argument('--look-k', type=int, default=5, help='')
parser.add_argument('--look-alpha', type=float, default=0.8, help='')
parser.add_argument('--T_0', type=int, default=10, help='')
parser.add_argument('--T-mult', type=int, default=2, help='')
# spectral regularization related
parser.add_argument('--sg', type=float, default=0.01, help='spectrum regularization')
args = parser.parse_args()
print(args)
# Save the arguments to a file
save_args_to_file(args, join(args.ckpt_path, 'args.json'))
def set_seed(seed=42):
print(f"=====> Set the random seed as {seed}")
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# When running on the CuDNN backend, two further options must be set
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set a fixed value for the hash seed
os.environ["PYTHONHASHSEED"] = str(seed)
# Hyper Parameter settings
use_cuda = torch.cuda.is_available()
best_acc = 0
start_epoch = cf.start_epoch
set_seed(args.seed)
# Data Loader
print('\n[Phase 1] : Data Preparation')
print(f"prepare preprocessing, {args.dataset}")
transform_train = transforms.Compose([
transforms.RandomCrop(cf.crop_size[args.dataset], padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
]) # meanstd transformation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
])
data_path = join(args.datadir, args.dataset)
if(args.dataset == 'cifar10'):
print("| Preparing CIFAR-10 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR10(root=data_path, train=True,
download=True,
transform=transform_train)
testset = torchvision.datasets.CIFAR10(root=data_path, train=False,
download=False,
transform=transform_test)
num_classes = 10
elif(args.dataset == 'cifar100'):
print("| Preparing CIFAR-100 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR100(root=data_path, train=True,
download=True,
transform=transform_train)
testset = torchvision.datasets.CIFAR100(root=data_path, train=False,
download=False,
transform=transform_test)
num_classes = 100
elif(args.dataset == 'svhn'):
print("| Preparing SVHN dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.SVHN(root=data_path,
split='train',
download=True,
transform=transform_train)
testset = torchvision.datasets.SVHN(root=data_path,
split='test',
download=True,
transform=transform_test)
num_classes = 10
elif(args.dataset == 'tiny-imagenet-200'):
print("| Preparing tiny-imagenet-200 dataset...")
sys.stdout.write("| ")
trainset = datasets.ImageFolder(os.path.join(data_path, 'train'), transform_train)
testset = datasets.ImageFolder(os.path.join(data_path, 'val'), transform_test)
num_classes = 200
else:
raise NotImplementedError
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size,
shuffle=True,
num_workers=6)
testloader = torch.utils.data.DataLoader(testset,
batch_size=cf.eval_batchsize[args.dataset],
shuffle=False,
num_workers=4)
Path(args.ckpt_path).mkdir(parents=True, exist_ok=True)
if args.print_tofile:
# Open files for stdout and stderr redirection
stdout_file = open(os.path.join(args.ckpt_path, 'stdout.log'), 'w')
stderr_file = open(os.path.join(args.ckpt_path, 'stderr.log'), 'w')
# Redirect stdout and stderr to the files
sys.stdout = stdout_file
sys.stderr = stderr_file
# Model
print('\n[Phase 2] : Model setup')
if args.resume:
# Load checkpoint
print('| Resuming from checkpoint...')
net, file_name = getNetwork(args, num_classes)
checkpoint = torch.load(args.resume, map_location='cpu')
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['test_acc']
start_epoch = checkpoint['epoch']
print(f"Loaded Epoch: {start_epoch} \n Test Acc: {best_acc:.3f} Train Acc: {checkpoint['train_acc']:.3f}")
else:
print('| Building net type [' + args.net_type + ']...')
net, file_name = getNetwork(args, num_classes)
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
best_acc = 0
if use_cuda:
net.cuda()
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
print(net)
if args.use_tb:
print("##############Enable and init Temp Balancing##################")
tb_scheduler = Tempbalance(net=net,
pl_fitting=args.pl_fitting,
xmin_pos=args.xmin_pos,
filter_zeros=args.filter_zeros,
remove_first_layer=args.remove_first_layer,
remove_last_layer=args.remove_last_layer,
esd_metric_for_tb=args.esd_metric_for_tb,
assign_func=args.assign_func,
lr_min_ratio=args.lr_min_ratio,
lr_max_ratio=args.lr_max_ratio,
batchnorm=args.batchnorm,
batchnorm_type=args.batchnorm_type
)
tb_param_group, _ = \
tb_scheduler.build_optimizer_param_group(untuned_lr=args.lr, initialize=True)
if args.optim_type == 'SGD':
optimizer = optim.SGD(tb_param_group,
momentum=0.9,
weight_decay=args.weight_decay)
elif args.optim_type == 'SGDSNR': | optimizer = SGDSNR(tb_param_group, | 1 | 2023-10-24 00:45:55+00:00 | 12k |
zhaojw1998/AccoMontage-3 | piano_arranger/models/Poly_Dis.py | [
{
"identifier": "PytorchModel",
"path": "piano_arranger/models/amc_dl/torch_plus/module.py",
"snippet": "class PytorchModel(nn.Module):\n\n def __init__(self, name, device):\n self.name = name\n super(PytorchModel, self).__init__()\n if device is None:\n device = torch.device('cuda' if torch.cuda.is_available()\n else 'cpu')\n self.device = device\n\n def run(self, *input):\n \"\"\"A general way to run the model.\n Usually tensor input -> tensor output\"\"\"\n raise NotImplementedError\n\n def loss(self, *input, **kwargs):\n \"\"\"Call it during training. The output is loss and possibly others to\n display on tensorboard.\"\"\"\n raise NotImplementedError\n\n def inference(self, *input):\n \"\"\"Call it during inference.\n The output is usually numpy after argmax.\"\"\"\n raise NotImplementedError\n\n def loss_function(self, *input):\n raise NotImplementedError\n\n def forward(self, mode, *input, **kwargs):\n if mode in [\"run\", 0]:\n return self.run(*input, **kwargs)\n elif mode in ['loss', 'train', 1]:\n return self.loss(*input, **kwargs)\n elif mode in ['inference', 'eval', 'val', 2]:\n return self.inference(*input, **kwargs)\n else:\n raise NotImplementedError\n\n def load_model(self, model_path, map_location=None):\n if map_location is None:\n map_location = self.device\n dic = torch.load(model_path, map_location=map_location)\n for name in list(dic.keys()):\n dic[name.replace('module.', '')] = dic.pop(name)\n self.load_state_dict(dic)\n self.to(self.device)\n\n @staticmethod\n def init_model(*inputs):\n raise NotImplementedError"
},
{
"identifier": "get_zs_from_dists",
"path": "piano_arranger/models/amc_dl/torch_plus/train_utils.py",
"snippet": "def get_zs_from_dists(dists, sample=False):\n return [dist.rsample() if sample else dist.mean for dist in dists]"
},
{
"identifier": "kl_with_normal",
"path": "piano_arranger/models/amc_dl/torch_plus/train_utils.py",
"snippet": "def kl_with_normal(dist):\n shape = dist.mean.size(-1)\n normal = standard_normal(shape)\n kl = kl_divergence(dist, normal).mean()\n return kl"
},
{
"identifier": "RnnEncoder",
"path": "piano_arranger/models/ptvae.py",
"snippet": "class RnnEncoder(nn.Module):\n def __init__(self, input_dim, hidden_dim, z_dim):\n super(RnnEncoder, self).__init__()\n self.gru = nn.GRU(input_dim, hidden_dim, batch_first=True,\n bidirectional=True)\n self.linear_mu = nn.Linear(hidden_dim * 2, z_dim)\n self.linear_var = nn.Linear(hidden_dim * 2, z_dim)\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.z_dim = z_dim\n\n def forward(self, x):\n x = self.gru(x)[-1]\n x = x.transpose_(0, 1).contiguous()\n x = x.view(x.size(0), -1)\n mu = self.linear_mu(x)\n var = self.linear_var(x).exp_()\n dist = Normal(mu, var)\n return dist"
},
{
"identifier": "RnnDecoder",
"path": "piano_arranger/models/ptvae.py",
"snippet": "class RnnDecoder(nn.Module):\n\n def __init__(self, input_dim=36, z_input_dim=256,\n hidden_dim=512, z_dim=256, num_step=32):\n super(RnnDecoder, self).__init__()\n self.z2dec_hid = nn.Linear(z_dim, hidden_dim)\n self.z2dec_in = nn.Linear(z_dim, z_input_dim)\n self.gru = nn.GRU(input_dim + z_input_dim, hidden_dim,\n batch_first=True,\n bidirectional=False)\n self.init_input = nn.Parameter(torch.rand(36))\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.z_dim = z_dim\n self.root_out = nn.Linear(hidden_dim, 12)\n self.chroma_out = nn.Linear(hidden_dim, 24)\n self.bass_out = nn.Linear(hidden_dim, 12)\n self.num_step = num_step\n\n def forward(self, z_chd, inference, tfr, c=None):\n # z_chd: (B, z_chd_size)\n bs = z_chd.size(0)\n z_chd_hid = self.z2dec_hid(z_chd).unsqueeze(0)\n z_chd_in = self.z2dec_in(z_chd).unsqueeze(1)\n if inference:\n tfr = 0.\n token = self.init_input.repeat(bs, 1).unsqueeze(1)\n recon_root = []\n recon_chroma = []\n recon_bass = []\n\n for t in range(int(self.num_step / 4)):\n chd, z_chd_hid = \\\n self.gru(torch.cat([token, z_chd_in], dim=-1), z_chd_hid)\n r_root = self.root_out(chd) # (bs, 1, 12)\n r_chroma = self.chroma_out(chd).view(bs, 1, 12, 2).contiguous()\n r_bass = self.bass_out(chd) # (bs, 1, 12)\n recon_root.append(r_root)\n recon_chroma.append(r_chroma)\n recon_bass.append(r_bass)\n\n t_root = torch.zeros(bs, 1, 12).to(z_chd.device).float()\n t_root[torch.arange(0, bs), 0, r_root.max(-1)[-1]] = 1.\n t_chroma = r_chroma.max(-1)[-1].float()\n t_bass = torch.zeros(bs, 1, 12).to(z_chd.device).float()\n t_bass[torch.arange(0, bs), 0, r_bass.max(-1)[-1]] = 1.\n token = torch.cat([t_root, t_chroma, t_bass], dim=-1)\n if t == self.num_step - 1:\n break\n teacher_force = random.random() < tfr\n if teacher_force and not inference:\n token = c[:, t].unsqueeze(1)\n recon_root = torch.cat(recon_root, dim=1)\n recon_chroma = torch.cat(recon_chroma, dim=1)\n recon_bass = torch.cat(recon_bass, dim=1)\n return recon_root, recon_chroma, recon_bass"
},
{
"identifier": "PtvaeDecoder",
"path": "piano_arranger/models/ptvae.py",
"snippet": "class PtvaeDecoder(nn.Module):\n\n def __init__(self, device=None, note_embedding=None,\n max_simu_note=16, max_pitch=127, min_pitch=0,\n pitch_sos=128, pitch_eos=129, pitch_pad=130,\n dur_pad=2, dur_width=5, num_step=32,\n note_emb_size=128, z_size=512,\n dec_emb_hid_size=128,\n dec_time_hid_size=1024, dec_notes_hid_size=512,\n dec_z_in_size=256, dec_dur_hid_size=16):\n super(PtvaeDecoder, self).__init__()\n # Parameters\n # note and time\n self.max_pitch = max_pitch # the highest pitch in train/val set.\n self.min_pitch = min_pitch # the lowest pitch in train/val set.\n self.pitch_sos = pitch_sos\n self.pitch_eos = pitch_eos\n self.pitch_pad = pitch_pad\n self.pitch_range = max_pitch - min_pitch + 3 # 88, not including pad.\n self.dur_pad = dur_pad\n self.dur_width = dur_width\n self.note_size = self.pitch_range + dur_width\n self.max_simu_note = max_simu_note # the max # of notes at each ts.\n self.num_step = num_step # 32\n\n # device\n if device is None:\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n else:\n self.device = device\n\n self.note_emb_size = note_emb_size\n self.z_size = z_size\n\n # decoder\n self.dec_z_in_size = dec_z_in_size\n self.dec_emb_hid_size = dec_emb_hid_size\n self.dec_time_hid_size = dec_time_hid_size\n self.dec_init_input = \\\n nn.Parameter(torch.rand(2 * self.dec_emb_hid_size))\n self.dec_notes_hid_size = dec_notes_hid_size\n self.dur_sos_token = nn.Parameter(torch.rand(self.dur_width))\n self.dec_dur_hid_size = dec_dur_hid_size\n\n # Modules\n # For both encoder and decoder\n if note_embedding is None:\n self.note_embedding = nn.Linear(self.note_size, note_emb_size)\n else:\n self.note_embedding = note_embedding\n self.z2dec_hid_linear = nn.Linear(self.z_size, dec_time_hid_size)\n self.z2dec_in_linear = nn.Linear(self.z_size, dec_z_in_size)\n self.dec_notes_emb_gru = nn.GRU(note_emb_size, dec_emb_hid_size,\n num_layers=1, batch_first=True,\n bidirectional=True)\n self.dec_time_gru = \\\n nn.GRU(dec_z_in_size + 2 * dec_emb_hid_size,\n dec_time_hid_size,\n num_layers=1, batch_first=True,\n bidirectional=False)\n self.dec_time_to_notes_hid = nn.Linear(dec_time_hid_size,\n dec_notes_hid_size)\n self.dec_notes_gru = nn.GRU(dec_time_hid_size + note_emb_size,\n dec_notes_hid_size,\n num_layers=1, batch_first=True,\n bidirectional=False)\n self.pitch_out_linear = nn.Linear(dec_notes_hid_size, self.pitch_range)\n self.dec_dur_gru = nn.GRU(dur_width, dec_dur_hid_size,\n num_layers=1, batch_first=True,\n bidirectional=False)\n self.dur_hid_linear = nn.Linear(self.pitch_range + dec_notes_hid_size,\n dec_dur_hid_size)\n self.dur_out_linear = nn.Linear(dec_dur_hid_size, 2)\n\n def get_len_index_tensor(self, ind_x):\n \"\"\"Calculate the lengths ((B, 32), torch.LongTensor) of pgrid.\"\"\"\n with torch.no_grad():\n lengths = self.max_simu_note - \\\n (ind_x[:, :, :, 0] - self.pitch_pad == 0).sum(dim=-1)\n return lengths\n\n def index_tensor_to_multihot_tensor(self, ind_x):\n \"\"\"Transfer piano_grid to multi-hot piano_grid.\"\"\"\n # ind_x: (B, 32, max_simu_note, 1 + dur_width)\n with torch.no_grad():\n dur_part = ind_x[:, :, :, 1:].float()\n out = torch.zeros(\n [ind_x.size(0) * self.num_step * self.max_simu_note,\n self.pitch_range + 1],\n dtype=torch.float).to(self.device)\n\n out[range(0, out.size(0)), ind_x[:, :, :, 0].view(-1)] = 1.\n out = out.view(-1, 32, self.max_simu_note, self.pitch_range + 1)\n out = torch.cat([out[:, :, :, 0: self.pitch_range], dur_part],\n dim=-1)\n return out\n\n def get_sos_token(self):\n sos = torch.zeros(self.note_size)\n sos[self.pitch_sos] = 1.\n sos[self.pitch_range:] = 2.\n sos = sos.to(self.device)\n return sos\n\n def dur_ind_to_dur_token(self, inds, batch_size):\n token = torch.zeros(batch_size, self.dur_width)\n token[range(0, batch_size), inds] = 1.\n token = token.to(self.device)\n return token\n\n def pitch_dur_ind_to_note_token(self, pitch_inds, dur_inds, batch_size):\n token = torch.zeros(batch_size, self.note_size)\n token[range(0, batch_size), pitch_inds] = 1.\n token[:, self.pitch_range:] = dur_inds\n token = token.to(self.device)\n token = self.note_embedding(token)\n return token\n\n def decode_note(self, note_summary, batch_size):\n # note_summary: (B, 1, dec_notes_hid_size)\n # This function estimate pitch, and dur for a single pitch based on\n # note_summary.\n # Returns: est_pitch (B, 1, pitch_range), est_durs (B, 1, dur_width, 2)\n\n # The estimated pitch is calculated by a linear layer.\n est_pitch = self.pitch_out_linear(note_summary).squeeze(1)\n # est_pitch: (B, pitch_range)\n\n # The estimated dur is calculated by a 5-step gru.\n dur_hid = note_summary.transpose(0, 1)\n # dur_hid: (1, B, dec_notes_hid_size)\n dur_hid = \\\n self.dur_hid_linear(torch.cat([dur_hid,\n est_pitch.unsqueeze(0)],\n dim=-1))\n token = self.dur_sos_token.repeat(batch_size, 1).unsqueeze(1)\n # token: (B, 1, dur_width)\n\n est_durs = torch.zeros(batch_size, self.dur_width, 2)\n est_durs = est_durs.to(self.device)\n\n for t in range(self.dur_width):\n token, dur_hid = self.dec_dur_gru(token, dur_hid)\n est_dur = self.dur_out_linear(token).squeeze(1)\n est_durs[:, t] = est_dur\n if t == self.dur_width - 1:\n break\n token_inds = est_dur.max(1)[1]\n token = self.dur_ind_to_dur_token(token_inds,\n batch_size).unsqueeze(1)\n return est_pitch, est_durs\n\n def decode_notes(self, notes_summary, batch_size, notes, inference,\n teacher_forcing_ratio=0.5):\n # notes_summary: (B, 1, dec_time_hid_size)\n # notes: (B, max_simu_note, note_emb_size), ground_truth\n notes_summary_hid = \\\n self.dec_time_to_notes_hid(notes_summary.transpose(0, 1))\n if inference:\n assert teacher_forcing_ratio == 0\n assert notes is None\n sos = self.get_sos_token() # (note_size,)\n token = self.note_embedding(sos).repeat(batch_size, 1).unsqueeze(1)\n # hid: (B, 1, note_emb_size)\n else:\n token = notes[:, 0].unsqueeze(1)\n\n predicted_notes = torch.zeros(batch_size, self.max_simu_note,\n self.note_emb_size)\n predicted_notes[:, :, self.pitch_range:] = 2.\n predicted_notes[:, 0] = token.squeeze(1) # fill sos index\n lengths = torch.zeros(batch_size)\n predicted_notes = predicted_notes.to(self.device)\n lengths = lengths.to(self.device)\n pitch_outs = []\n dur_outs = []\n\n for t in range(1, self.max_simu_note):\n note_summary, notes_summary_hid = \\\n self.dec_notes_gru(torch.cat([notes_summary, token], dim=-1),\n notes_summary_hid)\n # note_summary: (B, 1, dec_notes_hid_size)\n # notes_summary_hid: (1, B, dec_time_hid_size)\n\n est_pitch, est_durs = self.decode_note(note_summary, batch_size)\n # est_pitch: (B, pitch_range)\n # est_durs: (B, dur_width, 2)\n\n pitch_outs.append(est_pitch.unsqueeze(1))\n dur_outs.append(est_durs.unsqueeze(1))\n pitch_inds = est_pitch.max(1)[1]\n dur_inds = est_durs.max(2)[1]\n predicted = self.pitch_dur_ind_to_note_token(pitch_inds, dur_inds,\n batch_size)\n # predicted: (B, note_size)\n\n predicted_notes[:, t] = predicted\n eos_samp_inds = (pitch_inds == self.pitch_eos)\n lengths[eos_samp_inds & (lengths == 0)] = t\n\n if t == self.max_simu_note - 1:\n break\n teacher_force = random.random() < teacher_forcing_ratio\n if inference or not teacher_force:\n token = predicted.unsqueeze(1)\n else:\n token = notes[:, t].unsqueeze(1)\n lengths[lengths == 0] = t\n pitch_outs = torch.cat(pitch_outs, dim=1)\n dur_outs = torch.cat(dur_outs, dim=1)\n return pitch_outs, dur_outs, predicted_notes, lengths\n\n def decoder(self, z, inference, x, lengths, teacher_forcing_ratio1,\n teacher_forcing_ratio2):\n # z: (B, z_size)\n # x: (B, num_step, max_simu_note, note_emb_size)\n batch_size = z.size(0)\n z_hid = self.z2dec_hid_linear(z).unsqueeze(0)\n # z_hid: (1, B, dec_time_hid_size)\n z_in = self.z2dec_in_linear(z).unsqueeze(1)\n # z_in: (B, dec_z_in_size)\n\n if inference:\n assert x is None\n assert lengths is None\n assert teacher_forcing_ratio1 == 0\n assert teacher_forcing_ratio2 == 0\n else:\n x_summarized = x.view(-1, self.max_simu_note, self.note_emb_size)\n x_summarized = pack_padded_sequence(x_summarized, lengths.view(-1),\n batch_first=True,\n enforce_sorted=False)\n x_summarized = self.dec_notes_emb_gru(x_summarized)[-1].\\\n transpose(0, 1).contiguous()\n x_summarized = x_summarized.view(-1, self.num_step,\n 2 * self.dec_emb_hid_size)\n\n pitch_outs = []\n dur_outs = []\n token = self.dec_init_input.repeat(batch_size, 1).unsqueeze(1)\n # (B, 2 * dec_emb_hid_size)\n\n for t in range(self.num_step):\n notes_summary, z_hid = \\\n self.dec_time_gru(torch.cat([token, z_in], dim=-1), z_hid)\n if inference:\n pitch_out, dur_out, predicted_notes, predicted_lengths = \\\n self.decode_notes(notes_summary, batch_size, None,\n inference, teacher_forcing_ratio2)\n else:\n pitch_out, dur_out, predicted_notes, predicted_lengths = \\\n self.decode_notes(notes_summary, batch_size, x[:, t],\n inference, teacher_forcing_ratio2)\n pitch_outs.append(pitch_out.unsqueeze(1))\n dur_outs.append(dur_out.unsqueeze(1))\n if t == self.num_step - 1:\n break\n\n teacher_force = random.random() < teacher_forcing_ratio1\n if teacher_force and not inference:\n token = x_summarized[:, t].unsqueeze(1)\n else:\n token = pack_padded_sequence(predicted_notes,\n predicted_lengths.cpu(),\n batch_first=True,\n enforce_sorted=False)\n token = self.dec_notes_emb_gru(token)[-1].\\\n transpose(0, 1).contiguous()\n token = token.view(-1, 2 * self.dec_emb_hid_size).unsqueeze(1)\n pitch_outs = torch.cat(pitch_outs, dim=1)\n dur_outs = torch.cat(dur_outs, dim=1)\n # print(pitch_outs.size())\n # print(dur_outs.size())\n return pitch_outs, dur_outs\n\n def forward(self, z, inference, x, lengths, teacher_forcing_ratio1,\n teacher_forcing_ratio2):\n return self.decoder(z, inference, x, lengths, teacher_forcing_ratio1,\n teacher_forcing_ratio2)\n\n def recon_loss(self, x, recon_pitch, recon_dur, weights=(1, 0.5),\n weighted_dur=False):\n pitch_loss_func = \\\n nn.CrossEntropyLoss(ignore_index=self.pitch_pad)\n recon_pitch = recon_pitch.view(-1, recon_pitch.size(-1))\n #print(recon_pitch.shape)\n \n gt_pitch = x[:, :, 1:, 0].contiguous().view(-1)\n #print(gt_pitch.shape)\n pitch_loss = pitch_loss_func(recon_pitch, gt_pitch)\n\n dur_loss_func = \\\n nn.CrossEntropyLoss(ignore_index=self.dur_pad)\n if not weighted_dur:\n recon_dur = recon_dur.view(-1, 2)\n gt_dur = x[:, :, 1:, 1:].contiguous().view(-1)\n dur_loss = dur_loss_func(recon_dur, gt_dur)\n else:\n recon_dur = recon_dur.view(-1, self.dur_width, 2)\n gt_dur = x[:, :, 1:, 1:].contiguous().view(-1, self.dur_width)\n dur0 = dur_loss_func(recon_dur[:, 0, :], gt_dur[:, 0])\n dur1 = dur_loss_func(recon_dur[:, 1, :], gt_dur[:, 1])\n dur2 = dur_loss_func(recon_dur[:, 2, :], gt_dur[:, 2])\n dur3 = dur_loss_func(recon_dur[:, 3, :], gt_dur[:, 3])\n dur4 = dur_loss_func(recon_dur[:, 4, :], gt_dur[:, 4])\n w = torch.tensor([1, 0.6, 0.4, 0.3, 0.3],\n device=recon_dur.device).float()\n dur_loss = \\\n w[0] * dur0 + \\\n w[1] * dur1 + \\\n w[2] * dur2 + \\\n w[3] * dur3 + \\\n w[4] * dur4\n loss = weights[0] * pitch_loss + weights[1] * dur_loss\n return loss, pitch_loss, dur_loss\n\n def emb_x(self, x):\n lengths = self.get_len_index_tensor(x)\n x = self.index_tensor_to_multihot_tensor(x)\n embedded = self.note_embedding(x)\n return embedded, lengths\n\n def output_to_numpy(self, recon_pitch, recon_dur):\n est_pitch = recon_pitch.max(-1)[1].unsqueeze(-1) # (B, 32, 11, 1)\n est_dur = recon_dur.max(-1)[1] # (B, 32, 11, 5)\n est_x = torch.cat([est_pitch, est_dur], dim=-1) # (B, 32, 11, 6)\n est_x = est_x.cpu().numpy()\n recon_pitch = recon_pitch.cpu().numpy()\n recon_dur = recon_dur.cpu().numpy()\n return est_x, recon_pitch, recon_dur\n\n def pr_to_notes(self, pr, bpm=80, start=0., one_hot=False):\n pr_matrix = self.pr_to_pr_matrix(pr, one_hot)\n alpha = 0.25 * 60 / bpm\n notes = []\n for t in range(32):\n for p in range(128):\n if pr_matrix[t, p] >= 1:\n s = alpha * t + start\n e = alpha * (t + pr_matrix[t, p]) + start\n notes.append(pretty_midi.Note(100, int(p), s, e))\n return notes\n \n def pr_matrix_to_note(self, pr_matrix, bpm=120, start=0):\n alpha = 0.25 * 60 / bpm\n notes = []\n for t in range(32):\n for p in range(128):\n if pr_matrix[t, p] >= 1:\n s = alpha * t + start\n e = alpha * (t + pr_matrix[t, p]) + start\n notes.append(pretty_midi.Note(100, int(p), s, e))\n return notes\n\n def grid_to_pr_and_notes(self, grid, bpm=60., start=0.):\n if grid.shape[1] == self.max_simu_note:\n grid = grid[:, 1:]\n pr = np.zeros((32, 128), dtype=int)\n alpha = 0.25 * 60 / bpm\n notes = []\n for t in range(32):\n for n in range(10):\n note = grid[t, n]\n if note[0] == self.pitch_eos:\n break\n pitch = note[0] + self.min_pitch\n dur = int(''.join([str(_) for _ in note[1:]]), 2) + 1\n pr[t, pitch] = min(dur, 32 - t)\n notes.append(\n pretty_midi.Note(100, int(pitch), start + t * alpha,\n start + (t + dur) * alpha))\n return pr, notes"
},
{
"identifier": "TextureEncoder",
"path": "piano_arranger/models/ptvae.py",
"snippet": "class TextureEncoder(nn.Module):\n\n def __init__(self, emb_size, hidden_dim, z_dim, num_channel=10, for_contrastive=False):\n '''input must be piano_mat: (B, 32, 128)'''\n super(TextureEncoder, self).__init__()\n self.cnn = nn.Sequential(nn.Conv2d(1, num_channel, kernel_size=(4, 12),\n stride=(4, 1), padding=0),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(1, 4),\n stride=(1, 4)))\n self.fc1 = nn.Linear(num_channel * 29, 1000)\n self.fc2 = nn.Linear(1000, emb_size)\n self.gru = nn.GRU(emb_size, hidden_dim, batch_first=True,\n bidirectional=True)\n self.linear_mu = nn.Linear(hidden_dim * 2, z_dim)\n self.linear_var = nn.Linear(hidden_dim * 2, z_dim)\n self.emb_size = emb_size\n self.hidden_dim = hidden_dim\n self.z_dim = z_dim\n self.for_contrastive = for_contrastive\n\n def forward(self, pr):\n # pr: (bs, 32, 128)\n bs = pr.size(0)\n pr = pr.unsqueeze(1)\n pr = self.cnn(pr).view(bs, 8, -1)\n pr = self.fc2(self.fc1(pr)) # (bs, 8, emb_size)\n pr = self.gru(pr)[-1]\n pr = pr.transpose_(0, 1).contiguous()\n pr = pr.view(pr.size(0), -1)\n mu = self.linear_mu(pr)\n var = self.linear_var(pr).exp_()\n dist = Normal(mu, var)\n if self.for_contrastive:\n return mu, pr\n else:\n return dist"
}
] | from .amc_dl.torch_plus import PytorchModel
from .amc_dl.torch_plus.train_utils import get_zs_from_dists, kl_with_normal
from torch import nn
from torch.distributions import Normal
from .ptvae import RnnEncoder, RnnDecoder, PtvaeDecoder, TextureEncoder
import torch
import numpy as np | 9,526 | return est_x
def inference(self, pr_mat, c, sample):
self.eval()
with torch.no_grad():
dist_chd = self.chd_encoder(c)
dist_rhy = self.rhy_encoder(pr_mat)
z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], sample)
dec_z = torch.cat([z_chd, z_rhy], dim=-1)
pitch_outs, dur_outs = self.decoder(dec_z, True, None,
None, 0., 0.)
est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)
return est_x
def swap(self, pr_mat1, pr_mat2, c1, c2, fix_rhy, fix_chd):
pr_mat = pr_mat1 if fix_rhy else pr_mat2
c = c1 if fix_chd else c2
est_x = self.inference(pr_mat, c, sample=False)
return est_x
def posterior_sample(self, pr_mat, c, scale=None, sample_chd=True,
sample_txt=True):
if scale is None and sample_chd and sample_txt:
est_x = self.inference(pr_mat, c, sample=True)
else:
dist_chd, dist_rhy = self.inference_encode(pr_mat, c)
if scale is not None:
mean_chd = dist_chd.mean
mean_rhy = dist_rhy.mean
# std_chd = torch.ones_like(dist_chd.mean) * scale
# std_rhy = torch.ones_like(dist_rhy.mean) * scale
std_chd = dist_chd.scale * scale
std_rhy = dist_rhy.scale * scale
dist_rhy = Normal(mean_rhy, std_rhy)
dist_chd = Normal(mean_chd, std_chd)
z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)
if not sample_chd:
z_chd = dist_chd.mean
if not sample_txt:
z_rhy = dist_rhy.mean
est_x = self.inference_decode(z_chd, z_rhy)
return est_x
def prior_sample(self, x, c, sample_chd=False, sample_rhy=False,
scale=1.):
dist_chd, dist_rhy = self.inference_encode(x, c)
mean = torch.zeros_like(dist_rhy.mean)
loc = torch.ones_like(dist_rhy.mean) * scale
if sample_chd:
dist_chd = Normal(mean, loc)
if sample_rhy:
dist_rhy = Normal(mean, loc)
z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)
return self.inference_decode(z_chd, z_rhy)
def gt_sample(self, x):
out = x[:, :, 1:].numpy()
return out
def interp(self, pr_mat1, c1, pr_mat2, c2, interp_chd=False,
interp_rhy=False, int_count=10):
dist_chd1, dist_rhy1 = self.inference_encode(pr_mat1, c1)
dist_chd2, dist_rhy2 = self.inference_encode(pr_mat2, c2)
[z_chd1, z_rhy1, z_chd2, z_rhy2] = \
get_zs_from_dists([dist_chd1, dist_rhy1, dist_chd2, dist_rhy2],
False)
if interp_chd:
z_chds = self.interp_z(z_chd1, z_chd2, int_count)
else:
z_chds = z_chd1.unsqueeze(1).repeat(1, int_count, 1)
if interp_rhy:
z_rhys = self.interp_z(z_rhy1, z_rhy2, int_count)
else:
z_rhys = z_rhy1.unsqueeze(1).repeat(1, int_count, 1)
bs = z_chds.size(0)
z_chds = z_chds.view(bs * int_count, -1).contiguous()
z_rhys = z_rhys.view(bs * int_count, -1).contiguous()
estxs = self.inference_decode(z_chds, z_rhys)
return estxs.reshape((bs, int_count, 32, 15, -1))
def interp_z(self, z1, z2, int_count=10):
z1 = z1.numpy()
z2 = z2.numpy()
zs = torch.stack([self.interp_path(zz1, zz2, int_count)
for zz1, zz2 in zip(z1, z2)], dim=0)
return zs
def interp_path(self, z1, z2, interpolation_count=10):
result_shape = z1.shape
z1 = z1.reshape(-1)
z2 = z2.reshape(-1)
def slerp2(p0, p1, t):
omega = np.arccos(
np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))
so = np.sin(omega)
return np.sin((1.0 - t) * omega)[:, None] / so * p0[
None] + np.sin(
t * omega)[:, None] / so * p1[None]
percentages = np.linspace(0.0, 1.0, interpolation_count)
normalized_z1 = z1 / np.linalg.norm(z1)
normalized_z2 = z2 / np.linalg.norm(z2)
dirs = slerp2(normalized_z1, normalized_z2, percentages)
length = np.linspace(np.log(np.linalg.norm(z1)),
np.log(np.linalg.norm(z2)),
interpolation_count)
out = (dirs * np.exp(length[:, None])).reshape(
[interpolation_count] + list(result_shape))
# out = np.array([(1 - t) * z1 + t * z2 for t in percentages])
return torch.from_numpy(out).to(self.device).float()
@staticmethod
def init_model(device=None, chd_size=256, txt_size=256, num_channel=10):
name = 'disvae'
if device is None:
device = torch.device('cuda' if torch.cuda.is_available()
else 'cpu')
# chd_encoder = RnnEncoder(36, 1024, 256)
|
"""
Credit to Z. Wang et al., "Learning interpretable representation for controllable polyphonic music generation," ISMIR 2020.
https://github.com/ZZWaang/polyphonic-chord-texture-disentanglement
"""
class DisentangleVAE(PytorchModel):
def __init__(self, name, device, chd_encoder, rhy_encoder, decoder,
chd_decoder):
super(DisentangleVAE, self).__init__(name, device)
self.chd_encoder = chd_encoder
self.rhy_encoder = rhy_encoder
self.decoder = decoder
self.num_step = self.decoder.num_step
self.chd_decoder = chd_decoder
def confuse_prmat(self, pr_mat):
non_zero_ent = torch.nonzero(pr_mat.long())
eps = torch.randint(0, 2, (non_zero_ent.size(0),))
eps = ((2 * eps) - 1).long()
confuse_ent = torch.clamp(non_zero_ent[:, 2] + eps, min=0, max=127)
pr_mat[non_zero_ent[:, 0], non_zero_ent[:, 1], confuse_ent] = \
pr_mat[non_zero_ent[:, 0], non_zero_ent[:, 1], non_zero_ent[:, 2]]
return pr_mat
def get_chroma(self, pr_mat):
bs = pr_mat.size(0)
pad = torch.zeros(bs, 32, 4).to(self.device)
pr_mat = torch.cat([pr_mat, pad], dim=-1)
c = pr_mat.view(bs, 32, -1, 12).contiguous()
c = c.sum(dim=-2) # (bs, 32, 12)
c = c.view(bs, 8, 4, 12)
c = c.sum(dim=-2).float()
c = torch.log(c + 1)
return c.to(self.device)
def run(self, x, c, pr_mat, tfr1, tfr2, tfr3, confuse=True):
embedded_x, lengths = self.decoder.emb_x(x)
# cc = self.get_chroma(pr_mat)
dist_chd = self.chd_encoder(c)
# pr_mat = self.confuse_prmat(pr_mat)
dist_rhy = self.rhy_encoder(pr_mat)
z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)
dec_z = torch.cat([z_chd, z_rhy], dim=-1)
pitch_outs, dur_outs = self.decoder(dec_z, False, embedded_x,
lengths, tfr1, tfr2)
recon_root, recon_chroma, recon_bass = self.chd_decoder(z_chd, False,
tfr3, c)
return pitch_outs, dur_outs, dist_chd, dist_rhy, recon_root, \
recon_chroma, recon_bass
def loss_function(self, x, c, recon_pitch, recon_dur, dist_chd,
dist_rhy, recon_root, recon_chroma, recon_bass,
beta, weights, weighted_dur=False):
recon_loss, pl, dl = self.decoder.recon_loss(x, recon_pitch, recon_dur,
weights, weighted_dur)
kl_loss, kl_chd, kl_rhy = self.kl_loss(dist_chd, dist_rhy)
chord_loss, root, chroma, bass = self.chord_loss(c, recon_root,
recon_chroma,
recon_bass)
loss = recon_loss + beta * kl_loss + chord_loss
return loss, recon_loss, pl, dl, kl_loss, kl_chd, kl_rhy, chord_loss, \
root, chroma, bass
def chord_loss(self, c, recon_root, recon_chroma, recon_bass):
loss_fun = nn.CrossEntropyLoss()
root = c[:, :, 0: 12].max(-1)[-1].view(-1).contiguous()
chroma = c[:, :, 12: 24].long().view(-1).contiguous()
bass = c[:, :, 24:].max(-1)[-1].view(-1).contiguous()
recon_root = recon_root.view(-1, 12).contiguous()
recon_chroma = recon_chroma.view(-1, 2).contiguous()
recon_bass = recon_bass.view(-1, 12).contiguous()
root_loss = loss_fun(recon_root, root)
chroma_loss = loss_fun(recon_chroma, chroma)
bass_loss = loss_fun(recon_bass, bass)
chord_loss = root_loss + chroma_loss + bass_loss
return chord_loss, root_loss, chroma_loss, bass_loss
def kl_loss(self, *dists):
# kl = kl_with_normal(dists[0])
kl_chd = kl_with_normal(dists[0])
kl_rhy = kl_with_normal(dists[1])
kl_loss = kl_chd + kl_rhy
return kl_loss, kl_chd, kl_rhy
def loss(self, x, c, pr_mat, dt_x, tfr1=0., tfr2=0., tfr3=0., beta=0.1, weights=(1, 0.5)):
#print(pr_mat.shape, dt_x.shape)
outputs = self.run(x, c, pr_mat, tfr1, tfr2, tfr3)
loss = self.loss_function(x, c, *outputs, beta, weights)
return loss
# def inference(self, c, pr_mat):
# self.eval()
# with torch.no_grad():
# dist_chd = self.chd_encoder(c)
# # pr_mat = self.confuse_prmat(pr_mat)
# dist_rhy = self.rhy_encoder(pr_mat)
# z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)
# dec_z = torch.cat([z_chd, z_rhy], dim=-1)
# pitch_outs, dur_outs = self.decoder(dec_z, True, None,
# None, 0., 0.)
# est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)
# return est_x
#
# def swap(self, c1, c2, pr_mat1, pr_mat2, fix_rhy, fix_chd):
# pr_mat = pr_mat1 if fix_rhy else pr_mat2
# c = c1 if fix_chd else c2
# est_x = self.inference(c, pr_mat)
# return est_x
def inference_encode(self, pr_mat, c):
self.eval()
with torch.no_grad():
dist_chd = self.chd_encoder(c)
dist_rhy = self.rhy_encoder(pr_mat)
return dist_chd, dist_rhy
def inference_decode(self, z_chd, z_rhy):
self.eval()
with torch.no_grad():
dec_z = torch.cat([z_chd, z_rhy], dim=-1)
pitch_outs, dur_outs = self.decoder(dec_z, True, None,
None, 0., 0.)
est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)
return est_x
def inference(self, pr_mat, c, sample):
self.eval()
with torch.no_grad():
dist_chd = self.chd_encoder(c)
dist_rhy = self.rhy_encoder(pr_mat)
z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], sample)
dec_z = torch.cat([z_chd, z_rhy], dim=-1)
pitch_outs, dur_outs = self.decoder(dec_z, True, None,
None, 0., 0.)
est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)
return est_x
def swap(self, pr_mat1, pr_mat2, c1, c2, fix_rhy, fix_chd):
pr_mat = pr_mat1 if fix_rhy else pr_mat2
c = c1 if fix_chd else c2
est_x = self.inference(pr_mat, c, sample=False)
return est_x
def posterior_sample(self, pr_mat, c, scale=None, sample_chd=True,
sample_txt=True):
if scale is None and sample_chd and sample_txt:
est_x = self.inference(pr_mat, c, sample=True)
else:
dist_chd, dist_rhy = self.inference_encode(pr_mat, c)
if scale is not None:
mean_chd = dist_chd.mean
mean_rhy = dist_rhy.mean
# std_chd = torch.ones_like(dist_chd.mean) * scale
# std_rhy = torch.ones_like(dist_rhy.mean) * scale
std_chd = dist_chd.scale * scale
std_rhy = dist_rhy.scale * scale
dist_rhy = Normal(mean_rhy, std_rhy)
dist_chd = Normal(mean_chd, std_chd)
z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)
if not sample_chd:
z_chd = dist_chd.mean
if not sample_txt:
z_rhy = dist_rhy.mean
est_x = self.inference_decode(z_chd, z_rhy)
return est_x
def prior_sample(self, x, c, sample_chd=False, sample_rhy=False,
scale=1.):
dist_chd, dist_rhy = self.inference_encode(x, c)
mean = torch.zeros_like(dist_rhy.mean)
loc = torch.ones_like(dist_rhy.mean) * scale
if sample_chd:
dist_chd = Normal(mean, loc)
if sample_rhy:
dist_rhy = Normal(mean, loc)
z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)
return self.inference_decode(z_chd, z_rhy)
def gt_sample(self, x):
out = x[:, :, 1:].numpy()
return out
def interp(self, pr_mat1, c1, pr_mat2, c2, interp_chd=False,
interp_rhy=False, int_count=10):
dist_chd1, dist_rhy1 = self.inference_encode(pr_mat1, c1)
dist_chd2, dist_rhy2 = self.inference_encode(pr_mat2, c2)
[z_chd1, z_rhy1, z_chd2, z_rhy2] = \
get_zs_from_dists([dist_chd1, dist_rhy1, dist_chd2, dist_rhy2],
False)
if interp_chd:
z_chds = self.interp_z(z_chd1, z_chd2, int_count)
else:
z_chds = z_chd1.unsqueeze(1).repeat(1, int_count, 1)
if interp_rhy:
z_rhys = self.interp_z(z_rhy1, z_rhy2, int_count)
else:
z_rhys = z_rhy1.unsqueeze(1).repeat(1, int_count, 1)
bs = z_chds.size(0)
z_chds = z_chds.view(bs * int_count, -1).contiguous()
z_rhys = z_rhys.view(bs * int_count, -1).contiguous()
estxs = self.inference_decode(z_chds, z_rhys)
return estxs.reshape((bs, int_count, 32, 15, -1))
def interp_z(self, z1, z2, int_count=10):
z1 = z1.numpy()
z2 = z2.numpy()
zs = torch.stack([self.interp_path(zz1, zz2, int_count)
for zz1, zz2 in zip(z1, z2)], dim=0)
return zs
def interp_path(self, z1, z2, interpolation_count=10):
result_shape = z1.shape
z1 = z1.reshape(-1)
z2 = z2.reshape(-1)
def slerp2(p0, p1, t):
omega = np.arccos(
np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))
so = np.sin(omega)
return np.sin((1.0 - t) * omega)[:, None] / so * p0[
None] + np.sin(
t * omega)[:, None] / so * p1[None]
percentages = np.linspace(0.0, 1.0, interpolation_count)
normalized_z1 = z1 / np.linalg.norm(z1)
normalized_z2 = z2 / np.linalg.norm(z2)
dirs = slerp2(normalized_z1, normalized_z2, percentages)
length = np.linspace(np.log(np.linalg.norm(z1)),
np.log(np.linalg.norm(z2)),
interpolation_count)
out = (dirs * np.exp(length[:, None])).reshape(
[interpolation_count] + list(result_shape))
# out = np.array([(1 - t) * z1 + t * z2 for t in percentages])
return torch.from_numpy(out).to(self.device).float()
@staticmethod
def init_model(device=None, chd_size=256, txt_size=256, num_channel=10):
name = 'disvae'
if device is None:
device = torch.device('cuda' if torch.cuda.is_available()
else 'cpu')
# chd_encoder = RnnEncoder(36, 1024, 256) | chd_encoder = RnnEncoder(36, 1024, chd_size) | 3 | 2023-10-23 12:36:57+00:00 | 12k |
bytedance/ColTrack | motlib/mot_models/network/dino_mot/layer/deformable_transformer/default_decoder.py | [
{
"identifier": "DeformableTransformerDecoderLayer",
"path": "models/dino/deformable_transformer.py",
"snippet": "class DeformableTransformerDecoderLayer(nn.Module):\n def __init__(self, d_model=256, d_ffn=1024,\n dropout=0.1, activation=\"relu\",\n n_levels=4, n_heads=8, n_points=4,\n use_deformable_box_attn=False,\n box_attn_type='roi_align',\n key_aware_type=None,\n decoder_sa_type='ca',\n module_seq=['sa', 'ca', 'ffn'],\n ):\n super().__init__()\n self.module_seq = module_seq\n assert sorted(module_seq) == ['ca', 'ffn', 'sa']\n\n # cross attention\n # self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n if use_deformable_box_attn:\n self.cross_attn = MSDeformableBoxAttention(d_model, n_levels, n_heads, n_boxes=n_points, used_func=box_attn_type)\n else:\n self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n self.dropout1 = nn.Dropout(dropout)\n self.norm1 = nn.LayerNorm(d_model)\n\n # self attention\n self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.norm2 = nn.LayerNorm(d_model)\n\n # ffn\n self.linear1 = nn.Linear(d_model, d_ffn)\n self.activation = _get_activation_fn(activation, d_model=d_ffn, batch_dim=1)\n self.dropout3 = nn.Dropout(dropout)\n self.linear2 = nn.Linear(d_ffn, d_model)\n self.dropout4 = nn.Dropout(dropout)\n self.norm3 = nn.LayerNorm(d_model)\n\n self.key_aware_type = key_aware_type\n self.key_aware_proj = None\n self.decoder_sa_type = decoder_sa_type\n assert decoder_sa_type in ['sa', 'ca_label', 'ca_content']\n\n if decoder_sa_type == 'ca_content':\n self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n\n\n\n\n def rm_self_attn_modules(self):\n self.self_attn = None\n self.dropout2 = None\n self.norm2 = None\n\n\n @staticmethod\n def with_pos_embed(tensor, pos):\n return tensor if pos is None else tensor + pos\n\n def forward_ffn(self, tgt):\n tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))\n tgt = tgt + self.dropout4(tgt2)\n tgt = self.norm3(tgt)\n return tgt\n\n def forward_sa(self,\n # for tgt\n tgt: Optional[Tensor], # nq, bs, d_model\n tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))\n tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)\n tgt_key_padding_mask: Optional[Tensor] = None,\n tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4\n\n # for memory\n memory: Optional[Tensor] = None, # hw, bs, d_model\n memory_key_padding_mask: Optional[Tensor] = None,\n memory_level_start_index: Optional[Tensor] = None, # num_levels\n memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\n memory_pos: Optional[Tensor] = None, # pos for memory\n\n # sa\n self_attn_mask: Optional[Tensor] = None, # mask used for self-attention\n cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention\n ):\n # self attention\n if self.self_attn is not None:\n if self.decoder_sa_type == 'sa':\n q = k = self.with_pos_embed(tgt, tgt_query_pos)\n tgt2 = self.self_attn(q, k, tgt, attn_mask=self_attn_mask)[0]\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n elif self.decoder_sa_type == 'ca_label':\n bs = tgt.shape[1]\n k = v = self.label_embedding.weight[:, None, :].repeat(1, bs, 1)\n tgt2 = self.self_attn(tgt, k, v, attn_mask=self_attn_mask)[0]\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n elif self.decoder_sa_type == 'ca_content':\n tgt2 = self.self_attn(self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),\n tgt_reference_points.transpose(0, 1).contiguous(),\n memory.transpose(0, 1), memory_spatial_shapes, memory_level_start_index, memory_key_padding_mask).transpose(0, 1)\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n else:\n raise NotImplementedError(\"Unknown decoder_sa_type {}\".format(self.decoder_sa_type))\n\n return tgt\n\n def forward_ca(self,\n # for tgt\n tgt: Optional[Tensor], # nq, bs, d_model\n tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))\n tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)\n tgt_key_padding_mask: Optional[Tensor] = None,\n tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4\n\n # for memory\n memory: Optional[Tensor] = None, # hw, bs, d_model\n memory_key_padding_mask: Optional[Tensor] = None,\n memory_level_start_index: Optional[Tensor] = None, # num_levels\n memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\n memory_pos: Optional[Tensor] = None, # pos for memory\n\n # sa\n self_attn_mask: Optional[Tensor] = None, # mask used for self-attention\n cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention\n ):\n # cross attention\n if self.key_aware_type is not None:\n\n if self.key_aware_type == 'mean':\n tgt = tgt + memory.mean(0, keepdim=True)\n elif self.key_aware_type == 'proj_mean':\n tgt = tgt + self.key_aware_proj(memory).mean(0, keepdim=True)\n else:\n raise NotImplementedError(\"Unknown key_aware_type: {}\".format(self.key_aware_type))\n tgt2 = self.cross_attn(self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),\n tgt_reference_points.transpose(0, 1).contiguous(),\n memory.transpose(0, 1), memory_spatial_shapes, memory_level_start_index, memory_key_padding_mask).transpose(0, 1)\n tgt = tgt + self.dropout1(tgt2)\n tgt = self.norm1(tgt)\n\n return tgt\n\n def forward(self,\n # for tgt\n tgt: Optional[Tensor], # nq, bs, d_model\n tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))\n tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)\n tgt_key_padding_mask: Optional[Tensor] = None,\n tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4\n\n # for memory\n memory: Optional[Tensor] = None, # hw, bs, d_model\n memory_key_padding_mask: Optional[Tensor] = None,\n memory_level_start_index: Optional[Tensor] = None, # num_levels\n memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\n memory_pos: Optional[Tensor] = None, # pos for memory\n\n # sa\n self_attn_mask: Optional[Tensor] = None, # mask used for self-attention\n cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention\n other_input = None\n ):\n\n for funcname in self.module_seq:\n if funcname == 'ffn':\n tgt = self.forward_ffn(tgt)\n elif funcname == 'ca':\n tgt = self.forward_ca(tgt, tgt_query_pos, tgt_query_sine_embed, \\\n tgt_key_padding_mask, tgt_reference_points, \\\n memory, memory_key_padding_mask, memory_level_start_index, \\\n memory_spatial_shapes, memory_pos, self_attn_mask, cross_attn_mask)\n elif funcname == 'sa':\n tgt = self.forward_sa(tgt, tgt_query_pos, tgt_query_sine_embed, \\\n tgt_key_padding_mask, tgt_reference_points, \\\n memory, memory_key_padding_mask, memory_level_start_index, \\\n memory_spatial_shapes, memory_pos, self_attn_mask, cross_attn_mask)\n else:\n raise ValueError('unknown funcname {}'.format(funcname))\n\n return tgt"
},
{
"identifier": "TransformerDecoder",
"path": "models/dino/deformable_transformer.py",
"snippet": "class TransformerDecoder(nn.Module):\n\n def __init__(self, decoder_layer, num_layers, norm=None, \n return_intermediate=False, \n d_model=256, query_dim=4, \n modulate_hw_attn=False,\n num_feature_levels=1,\n deformable_decoder=False,\n decoder_query_perturber=None,\n dec_layer_number=None, # number of queries each layer in decoder\n rm_dec_query_scale=False,\n dec_layer_share=False,\n dec_layer_dropout_prob=None,\n use_detached_boxes_dec_out=False\n ):\n super().__init__()\n if num_layers > 0:\n self.layers = _get_clones(decoder_layer, num_layers, layer_share=dec_layer_share)\n else:\n self.layers = []\n self.num_layers = num_layers\n self.norm = norm\n self.return_intermediate = return_intermediate\n assert return_intermediate, \"support return_intermediate only\"\n self.query_dim = query_dim\n assert query_dim in [2, 4], \"query_dim should be 2/4 but {}\".format(query_dim)\n self.num_feature_levels = num_feature_levels\n self.use_detached_boxes_dec_out = use_detached_boxes_dec_out\n\n \n self.ref_point_head = MLP(query_dim // 2 * d_model, d_model, d_model, 2)\n if not deformable_decoder:\n self.query_pos_sine_scale = MLP(d_model, d_model, d_model, 2)\n else:\n self.query_pos_sine_scale = None\n\n if rm_dec_query_scale:\n self.query_scale = None\n else:\n raise NotImplementedError\n self.query_scale = MLP(d_model, d_model, d_model, 2)\n self.bbox_embed = None\n self.class_embed = None\n\n self.d_model = d_model\n self.modulate_hw_attn = modulate_hw_attn\n self.deformable_decoder = deformable_decoder\n\n if not deformable_decoder and modulate_hw_attn:\n self.ref_anchor_head = MLP(d_model, d_model, 2, 2)\n else:\n self.ref_anchor_head = None\n\n self.decoder_query_perturber = decoder_query_perturber\n self.box_pred_damping = None\n\n self.dec_layer_number = dec_layer_number\n if dec_layer_number is not None:\n assert isinstance(dec_layer_number, list)\n assert len(dec_layer_number) == num_layers\n # assert dec_layer_number[0] == \n \n self.dec_layer_dropout_prob = dec_layer_dropout_prob\n if dec_layer_dropout_prob is not None:\n assert isinstance(dec_layer_dropout_prob, list)\n assert len(dec_layer_dropout_prob) == num_layers\n for i in dec_layer_dropout_prob:\n assert 0.0 <= i <= 1.0\n\n self.rm_detach = None\n\n def forward(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n refpoints_unsigmoid: Optional[Tensor] = None, # num_queries, bs, 2\n # for memory\n level_start_index: Optional[Tensor] = None, # num_levels\n spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\n valid_ratios: Optional[Tensor] = None,\n \n ):\n \"\"\"\n Input:\n - tgt: nq, bs, d_model\n - memory: hw, bs, d_model\n - pos: hw, bs, d_model\n - refpoints_unsigmoid: nq, bs, 2/4\n - valid_ratios/spatial_shapes: bs, nlevel, 2\n \"\"\"\n output = tgt\n\n intermediate = []\n reference_points = scale_sigmoid(refpoints_unsigmoid.sigmoid())\n ref_points = [reference_points] \n\n for layer_id, layer in enumerate(self.layers):\n # preprocess ref points\n if self.training and self.decoder_query_perturber is not None and layer_id != 0:\n reference_points = self.decoder_query_perturber(reference_points)\n\n\n\n if self.deformable_decoder:\n if reference_points.shape[-1] == 4:\n reference_points_input = reference_points[:, :, None] \\\n * torch.cat([valid_ratios, valid_ratios], -1)[None, :] # nq, bs, nlevel, 4\n else:\n assert reference_points.shape[-1] == 2\n reference_points_input = reference_points[:, :, None] * valid_ratios[None, :]\n query_sine_embed = gen_sineembed_for_position(reference_points_input[:, :, 0, :]) # nq, bs, 256*2 \n else:\n query_sine_embed = gen_sineembed_for_position(reference_points) # nq, bs, 256*2\n reference_points_input = None\n\n # conditional query\n # import ipdb; ipdb.set_trace()\n raw_query_pos = self.ref_point_head(query_sine_embed) # nq, bs, 256\n pos_scale = self.query_scale(output) if self.query_scale is not None else 1\n query_pos = pos_scale * raw_query_pos\n if not self.deformable_decoder:\n query_sine_embed = query_sine_embed[..., :self.d_model] * self.query_pos_sine_scale(output)\n\n # modulated HW attentions\n if not self.deformable_decoder and self.modulate_hw_attn:\n refHW_cond = scale_sigmoid(self.ref_anchor_head(output).sigmoid()) # nq, bs, 2\n query_sine_embed[..., self.d_model // 2:] *= (refHW_cond[..., 0] / reference_points[..., 2]).unsqueeze(-1)\n query_sine_embed[..., :self.d_model // 2] *= (refHW_cond[..., 1] / reference_points[..., 3]).unsqueeze(-1)\n\n # main process\n # import ipdb; ipdb.set_trace()\n dropflag = False\n if self.dec_layer_dropout_prob is not None:\n prob = random.random()\n if prob < self.dec_layer_dropout_prob[layer_id]:\n dropflag = True\n if not dropflag:\n output = layer(\n tgt = output,\n tgt_query_pos = query_pos,\n tgt_query_sine_embed = query_sine_embed,\n tgt_key_padding_mask = tgt_key_padding_mask,\n tgt_reference_points = reference_points_input,\n\n memory = memory,\n memory_key_padding_mask = memory_key_padding_mask,\n memory_level_start_index = level_start_index,\n memory_spatial_shapes = spatial_shapes,\n memory_pos = pos,\n\n self_attn_mask = tgt_mask,\n cross_attn_mask = memory_mask\n )\n\n # iter update\n if self.bbox_embed is not None:\n\n reference_before_sigmoid = inverse_sigmoid(reference_points)\n delta_unsig = self.bbox_embed[layer_id](output)\n outputs_unsig = delta_unsig + reference_before_sigmoid\n new_reference_points = scale_sigmoid(outputs_unsig.sigmoid())\n\n # select # ref points\n if self.dec_layer_number is not None and layer_id != self.num_layers - 1:\n # import ipdb; ipdb.set_trace()\n nq_now = new_reference_points.shape[0]\n select_number = self.dec_layer_number[layer_id + 1]\n if nq_now != select_number:\n class_unselected = self.class_embed[layer_id](output) # nq, bs, 91\n topk_proposals = torch.topk(class_unselected.max(-1)[0], select_number, dim=0)[1] # new_nq, bs\n new_reference_points = torch.gather(new_reference_points, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) # unsigmoid\n\n if self.rm_detach and 'dec' in self.rm_detach:\n reference_points = new_reference_points\n else:\n reference_points = new_reference_points.detach()\n if self.use_detached_boxes_dec_out:\n ref_points.append(reference_points)\n else:\n ref_points.append(new_reference_points)\n\n\n intermediate.append(self.norm(output))\n if self.dec_layer_number is not None and layer_id != self.num_layers - 1:\n if nq_now != select_number:\n output = torch.gather(output, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model)) # unsigmoid\n\n\n return [\n [itm_out.transpose(0, 1) for itm_out in intermediate],\n [itm_refpoint.transpose(0, 1) for itm_refpoint in ref_points]\n ]"
},
{
"identifier": "gen_encoder_output_proposals",
"path": "models/dino/utils.py",
"snippet": "def gen_encoder_output_proposals(memory:Tensor, memory_padding_mask:Tensor, spatial_shapes:Tensor, learnedwh=None):\n \"\"\"\n Input:\n - memory: bs, \\sum{hw}, d_model\n - memory_padding_mask: bs, \\sum{hw}\n - spatial_shapes: nlevel, 2\n - learnedwh: 2\n Output:\n - output_memory: bs, \\sum{hw}, d_model\n - output_proposals: bs, \\sum{hw}, 4\n \"\"\"\n N_, S_, C_ = memory.shape\n base_scale = 4.0\n proposals = []\n _cur = 0\n for lvl, (H_, W_) in enumerate(spatial_shapes):\n mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1)\n valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n\n # import ipdb; ipdb.set_trace()\n\n grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),\n torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device))\n grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2\n\n scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)\n grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale\n\n if learnedwh is not None:\n # import ipdb; ipdb.set_trace()\n wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0 ** lvl)\n raise NotImplementedError\n else:\n wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)\n\n # scale = torch.cat([W_[None].unsqueeze(-1), H_[None].unsqueeze(-1)], 1).view(1, 1, 1, 2).repeat(N_, 1, 1, 1)\n # grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale\n # wh = torch.ones_like(grid) / scale\n proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)\n proposals.append(proposal)\n _cur += (H_ * W_)\n # import ipdb; ipdb.set_trace()\n output_proposals = torch.cat(proposals, 1)\n output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)\n output_proposals = torch.cat(((output_proposals[..., :2] + 1) / 3, output_proposals[..., 2:] / 2), dim=-1)\n output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid\n output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))\n output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))\n\n output_memory = memory\n output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))\n output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))\n\n # output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))\n # output_memory = output_memory.masked_fill(~output_proposals_valid, float('inf'))\n\n return output_memory, output_proposals"
},
{
"identifier": "MLP",
"path": "models/dino/utils.py",
"snippet": "class MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x"
},
{
"identifier": "_get_activation_fn",
"path": "models/dino/utils.py",
"snippet": "def _get_activation_fn(activation, d_model=256, batch_dim=0):\n \"\"\"Return an activation function given a string\"\"\"\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n if activation == \"prelu\":\n return nn.PReLU()\n if activation == \"selu\":\n return F.selu\n\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")"
},
{
"identifier": "gen_sineembed_for_position",
"path": "models/dino/utils.py",
"snippet": "def gen_sineembed_for_position(pos_tensor):\n # n_query, bs, _ = pos_tensor.size()\n # sineembed_tensor = torch.zeros(n_query, bs, 256)\n scale = 2 * math.pi\n dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device)\n dim_t = 10000 ** (2 * (dim_t // 2) / 128)\n x_embed = pos_tensor[:, :, 0] * scale\n y_embed = pos_tensor[:, :, 1] * scale\n pos_x = x_embed[:, :, None] / dim_t\n pos_y = y_embed[:, :, None] / dim_t\n pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)\n pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)\n if pos_tensor.size(-1) == 2:\n pos = torch.cat((pos_y, pos_x), dim=2)\n elif pos_tensor.size(-1) == 4:\n w_embed = pos_tensor[:, :, 2] * scale\n pos_w = w_embed[:, :, None] / dim_t\n pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)\n\n h_embed = pos_tensor[:, :, 3] * scale\n pos_h = h_embed[:, :, None] / dim_t\n pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)\n\n pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)\n else:\n raise ValueError(\"Unknown pos_tensor shape(-1):{}\".format(pos_tensor.size(-1)))\n return pos"
},
{
"identifier": "MSDeformAttn",
"path": "models/dino/ops/modules/ms_deform_attn.py",
"snippet": "class MSDeformAttn(nn.Module):\n def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):\n \"\"\"\n Multi-Scale Deformable Attention Module\n :param d_model hidden dimension\n :param n_levels number of feature levels\n :param n_heads number of attention heads\n :param n_points number of sampling points per attention head per feature level\n \"\"\"\n super().__init__()\n if d_model % n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))\n _d_per_head = d_model // n_heads\n # you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation\n if not _is_power_of_2(_d_per_head):\n warnings.warn(\"You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 \"\n \"which is more efficient in our CUDA implementation.\")\n\n self.im2col_step = 64\n\n self.d_model = d_model\n self.n_levels = n_levels\n self.n_heads = n_heads\n self.n_points = n_points\n\n self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)\n self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)\n self.value_proj = nn.Linear(d_model, d_model)\n self.output_proj = nn.Linear(d_model, d_model)\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n constant_(self.sampling_offsets.weight.data, 0.)\n thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)\n grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\n grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)\n for i in range(self.n_points):\n grid_init[:, :, i, :] *= i + 1\n with torch.no_grad():\n self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))\n constant_(self.attention_weights.weight.data, 0.)\n constant_(self.attention_weights.bias.data, 0.)\n xavier_uniform_(self.value_proj.weight.data)\n constant_(self.value_proj.bias.data, 0.)\n xavier_uniform_(self.output_proj.weight.data)\n constant_(self.output_proj.bias.data, 0.)\n\n def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None):\n \"\"\"\n :param query (N, Length_{query}, C)\n :param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area\n or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes\n :param input_flatten (N, \\sum_{l=0}^{L-1} H_l \\cdot W_l, C)\n :param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]\n :param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]\n :param input_padding_mask (N, \\sum_{l=0}^{L-1} H_l \\cdot W_l), True for padding elements, False for non-padding elements\n\n :return output (N, Length_{query}, C)\n \"\"\"\n N, Len_q, _ = query.shape\n N, Len_in, _ = input_flatten.shape\n assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in\n\n value = self.value_proj(input_flatten)\n if input_padding_mask is not None:\n value = value.masked_fill(input_padding_mask[..., None], float(0))\n value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)\n sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)\n attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)\n attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)\n # N, Len_q, n_heads, n_levels, n_points, 2\n if reference_points.shape[-1] == 2:\n offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1)\n sampling_locations = reference_points[:, :, None, :, None, :] \\\n + sampling_offsets / offset_normalizer[None, None, None, :, None, :]\n elif reference_points.shape[-1] == 4:\n sampling_locations = reference_points[:, :, None, :, None, :2] \\\n + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5\n else:\n raise ValueError(\n 'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1]))\n\n # for amp\n if value.dtype == torch.float16:\n # for mixed precision\n output = MSDeformAttnFunction.apply(\n value.to(torch.float32), input_spatial_shapes, input_level_start_index, sampling_locations.to(torch.float32), attention_weights, self.im2col_step)\n output = output.to(torch.float16)\n output = self.output_proj(output)\n return output\n\n\n output = MSDeformAttnFunction.apply(\n value, input_spatial_shapes, input_level_start_index, sampling_locations, attention_weights, self.im2col_step)\n output = self.output_proj(output)\n return output"
},
{
"identifier": "inverse_sigmoid",
"path": "util/misc.py",
"snippet": "def inverse_sigmoid(x, eps=1e-3):\n if x.shape[-1] == 4:\n x = torch.cat(((x[..., :2] + 1) / 3, x[..., 2:] / 2), dim=-1)\n elif x.shape[-1] == 2:\n x = (x + 1) / 3\n else:\n raise ValueError\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1/x2)"
},
{
"identifier": "scale_sigmoid",
"path": "util/misc.py",
"snippet": "def scale_sigmoid(x, eps=1e-3):\n if x.shape[-1] == 4:\n x = torch.cat((3 * (x[..., :2]) - 1, x[..., 2:] * 2), dim=-1)\n elif x.shape[-1] == 2:\n x = 3 * x - 1\n else:\n raise ValueError\n return x"
}
] | from turtle import forward
from models.dino.deformable_transformer import DeformableTransformerDecoderLayer, TransformerDecoder
from models.dino.utils import gen_encoder_output_proposals, MLP,_get_activation_fn, gen_sineembed_for_position
from models.dino.ops.modules import MSDeformAttn
from torch import nn, Tensor
from typing import Optional
from util.misc import inverse_sigmoid, scale_sigmoid
import torch
import math, random | 10,427 | memory = memory,
memory_key_padding_mask = memory_key_padding_mask,
memory_level_start_index = level_start_index,
memory_spatial_shapes = spatial_shapes,
memory_pos = pos,
self_attn_mask = tgt_mask,
cross_attn_mask = memory_mask,
other_input=other_input
)
if isinstance(output, (list, tuple)):
output, track_res_layer = output
else:
track_res_layer = None
# iter update
if self.bbox_embed is not None:
reference_before_sigmoid = inverse_sigmoid(reference_points)
delta_unsig = self.bbox_embed[layer_id](output)
outputs_unsig = delta_unsig + reference_before_sigmoid
new_reference_points = scale_sigmoid(outputs_unsig.sigmoid())
# select # ref points
if self.dec_layer_number is not None and layer_id != self.num_layers - 1:
# import ipdb; ipdb.set_trace()
nq_now = new_reference_points.shape[0]
select_number = self.dec_layer_number[layer_id + 1]
if nq_now != select_number:
class_unselected = self.class_embed[layer_id](output) # nq, bs, 91
topk_proposals = torch.topk(class_unselected.max(-1)[0], select_number, dim=0)[1] # new_nq, bs
new_reference_points = torch.gather(new_reference_points, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) # unsigmoid
if self.rm_detach and 'dec' in self.rm_detach:
reference_points = new_reference_points
else:
reference_points = new_reference_points.detach()
if self.use_detached_boxes_dec_out:
ref_points.append(reference_points)
else:
ref_points.append(new_reference_points)
intermediate.append(self.norm(output))
if self.dec_layer_number is not None and layer_id != self.num_layers - 1:
if nq_now != select_number:
output = torch.gather(output, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model)) # unsigmoid
return output, reference_points, track_res_layer
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
refpoints_unsigmoid: Optional[Tensor] = None, # num_queries, bs, 2
# for memory
level_start_index: Optional[Tensor] = None, # num_levels
spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
valid_ratios: Optional[Tensor] = None,
track_instances = None,
track_info = None
):
"""
Input:
- tgt: nq, bs, d_model
- memory: hw, bs, d_model
- pos: hw, bs, d_model
- refpoints_unsigmoid: nq, bs, 2/4
- valid_ratios/spatial_shapes: bs, nlevel, 2
"""
output = tgt
intermediate = []
reference_points = scale_sigmoid(refpoints_unsigmoid.sigmoid())
ref_points = [reference_points]
track_res = {}
for layer_id, layer in enumerate(self.layers):
# preprocess ref points
output, reference_points, track_res_layer = self.forward_one_layer(layer_id=layer_id, layer=layer, output=output, memory=memory,reference_points=reference_points, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, level_start_index=level_start_index,spatial_shapes=spatial_shapes,valid_ratios=valid_ratios,pos=pos, tgt_mask=tgt_mask, memory_mask=memory_mask, ref_points=ref_points, intermediate=intermediate)
track_res[layer_id] = track_res_layer
return [
[itm_out.transpose(0, 1) for itm_out in intermediate],
[itm_refpoint.transpose(0, 1) for itm_refpoint in ref_points],
track_res
]
class MotDeformableTransformerDecoderLayer(DeformableTransformerDecoderLayer):
def __init__(self, args, layer_id, d_model=256, d_ffn=1024, dropout=0.1, activation="relu", n_levels=4, n_heads=8, n_points=4, use_deformable_box_attn=False, box_attn_type='roi_align', key_aware_type=None, decoder_sa_type='ca', module_seq=...):
self.args = args
self.layer_id = layer_id
self.dropout_p = dropout
self.n_heads = n_heads
super(DeformableTransformerDecoderLayer, self).__init__()
self.module_seq = module_seq
assert sorted(module_seq) == ['ca', 'ffn', 'sa']
# cross attention
# self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
if use_deformable_box_attn:
self.cross_attn = MSDeformableBoxAttention(d_model, n_levels, n_heads, n_boxes=n_points, used_func=box_attn_type)
else:
self.cross_attn = self.init_cross_attn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
| # Copyright (2023) Bytedance Ltd. and/or its affiliates
class MotTransformerDecoder(TransformerDecoder):
def __init__(self, args, decoder_layer, num_layers, norm=None, return_intermediate=False, d_model=256, query_dim=4, modulate_hw_attn=False, num_feature_levels=1, deformable_decoder=False, decoder_query_perturber=None, dec_layer_number=None, rm_dec_query_scale=False, dec_layer_share=False, dec_layer_dropout_prob=None, use_detached_boxes_dec_out=False):
super(TransformerDecoder, self).__init__()
self.args = args
self.layers = decoder_layer
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
assert return_intermediate, "support return_intermediate only"
self.query_dim = query_dim
assert query_dim in [2, 4], "query_dim should be 2/4 but {}".format(query_dim)
self.num_feature_levels = num_feature_levels
self.use_detached_boxes_dec_out = use_detached_boxes_dec_out
self.ref_point_head = MLP(query_dim // 2 * d_model, d_model, d_model, 2)
if not deformable_decoder:
self.query_pos_sine_scale = MLP(d_model, d_model, d_model, 2)
else:
self.query_pos_sine_scale = None
if rm_dec_query_scale:
self.query_scale = None
else:
raise NotImplementedError
self.query_scale = MLP(d_model, d_model, d_model, 2)
self.bbox_embed = None
self.class_embed = None
self.d_model = d_model
self.modulate_hw_attn = modulate_hw_attn
self.deformable_decoder = deformable_decoder
if not deformable_decoder and modulate_hw_attn:
self.ref_anchor_head = MLP(d_model, d_model, 2, 2)
else:
self.ref_anchor_head = None
self.decoder_query_perturber = decoder_query_perturber
self.box_pred_damping = None
self.dec_layer_number = dec_layer_number
if dec_layer_number is not None:
assert isinstance(dec_layer_number, list)
assert len(dec_layer_number) == num_layers
# assert dec_layer_number[0] ==
self.dec_layer_dropout_prob = dec_layer_dropout_prob
if dec_layer_dropout_prob is not None:
assert isinstance(dec_layer_dropout_prob, list)
assert len(dec_layer_dropout_prob) == num_layers
for i in dec_layer_dropout_prob:
assert 0.0 <= i <= 1.0
self.rm_detach = None
self.init()
def init(self):
pass
def forward_one_layer(self, layer_id, layer, output, memory, reference_points, tgt_key_padding_mask, memory_key_padding_mask, level_start_index, spatial_shapes, valid_ratios, pos, tgt_mask, memory_mask, ref_points, intermediate, other_input=None):
if self.training and self.decoder_query_perturber is not None and layer_id != 0:
reference_points = self.decoder_query_perturber(reference_points)
if self.deformable_decoder:
if reference_points.shape[-1] == 4:
reference_points_input = reference_points[:, :, None] \
* torch.cat([valid_ratios, valid_ratios], -1)[None, :] # nq, bs, nlevel, 4
else:
assert reference_points.shape[-1] == 2
reference_points_input = reference_points[:, :, None] * valid_ratios[None, :]
query_sine_embed = gen_sineembed_for_position(reference_points_input[:, :, 0, :]) # nq, bs, 256*2
else:
query_sine_embed = gen_sineembed_for_position(reference_points) # nq, bs, 256*2
reference_points_input = None
# conditional query
# import ipdb; ipdb.set_trace()
raw_query_pos = self.ref_point_head(query_sine_embed) # nq, bs, 256
pos_scale = self.query_scale(output) if self.query_scale is not None else 1
query_pos = pos_scale * raw_query_pos
if not self.deformable_decoder:
query_sine_embed = query_sine_embed[..., :self.d_model] * self.query_pos_sine_scale(output)
# modulated HW attentions
if not self.deformable_decoder and self.modulate_hw_attn:
refHW_cond = scale_sigmoid(self.ref_anchor_head(output).sigmoid()) # nq, bs, 2
query_sine_embed[..., self.d_model // 2:] *= (refHW_cond[..., 0] / reference_points[..., 2]).unsqueeze(-1)
query_sine_embed[..., :self.d_model // 2] *= (refHW_cond[..., 1] / reference_points[..., 3]).unsqueeze(-1)
# main process
# import ipdb; ipdb.set_trace()
dropflag = False
if self.dec_layer_dropout_prob is not None:
prob = random.random()
if prob < self.dec_layer_dropout_prob[layer_id]:
dropflag = True
if not dropflag:
output = layer(
tgt = output,
tgt_query_pos = query_pos,
tgt_query_sine_embed = query_sine_embed,
tgt_key_padding_mask = tgt_key_padding_mask,
tgt_reference_points = reference_points_input,
memory = memory,
memory_key_padding_mask = memory_key_padding_mask,
memory_level_start_index = level_start_index,
memory_spatial_shapes = spatial_shapes,
memory_pos = pos,
self_attn_mask = tgt_mask,
cross_attn_mask = memory_mask,
other_input=other_input
)
if isinstance(output, (list, tuple)):
output, track_res_layer = output
else:
track_res_layer = None
# iter update
if self.bbox_embed is not None:
reference_before_sigmoid = inverse_sigmoid(reference_points)
delta_unsig = self.bbox_embed[layer_id](output)
outputs_unsig = delta_unsig + reference_before_sigmoid
new_reference_points = scale_sigmoid(outputs_unsig.sigmoid())
# select # ref points
if self.dec_layer_number is not None and layer_id != self.num_layers - 1:
# import ipdb; ipdb.set_trace()
nq_now = new_reference_points.shape[0]
select_number = self.dec_layer_number[layer_id + 1]
if nq_now != select_number:
class_unselected = self.class_embed[layer_id](output) # nq, bs, 91
topk_proposals = torch.topk(class_unselected.max(-1)[0], select_number, dim=0)[1] # new_nq, bs
new_reference_points = torch.gather(new_reference_points, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) # unsigmoid
if self.rm_detach and 'dec' in self.rm_detach:
reference_points = new_reference_points
else:
reference_points = new_reference_points.detach()
if self.use_detached_boxes_dec_out:
ref_points.append(reference_points)
else:
ref_points.append(new_reference_points)
intermediate.append(self.norm(output))
if self.dec_layer_number is not None and layer_id != self.num_layers - 1:
if nq_now != select_number:
output = torch.gather(output, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model)) # unsigmoid
return output, reference_points, track_res_layer
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
refpoints_unsigmoid: Optional[Tensor] = None, # num_queries, bs, 2
# for memory
level_start_index: Optional[Tensor] = None, # num_levels
spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
valid_ratios: Optional[Tensor] = None,
track_instances = None,
track_info = None
):
"""
Input:
- tgt: nq, bs, d_model
- memory: hw, bs, d_model
- pos: hw, bs, d_model
- refpoints_unsigmoid: nq, bs, 2/4
- valid_ratios/spatial_shapes: bs, nlevel, 2
"""
output = tgt
intermediate = []
reference_points = scale_sigmoid(refpoints_unsigmoid.sigmoid())
ref_points = [reference_points]
track_res = {}
for layer_id, layer in enumerate(self.layers):
# preprocess ref points
output, reference_points, track_res_layer = self.forward_one_layer(layer_id=layer_id, layer=layer, output=output, memory=memory,reference_points=reference_points, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, level_start_index=level_start_index,spatial_shapes=spatial_shapes,valid_ratios=valid_ratios,pos=pos, tgt_mask=tgt_mask, memory_mask=memory_mask, ref_points=ref_points, intermediate=intermediate)
track_res[layer_id] = track_res_layer
return [
[itm_out.transpose(0, 1) for itm_out in intermediate],
[itm_refpoint.transpose(0, 1) for itm_refpoint in ref_points],
track_res
]
class MotDeformableTransformerDecoderLayer(DeformableTransformerDecoderLayer):
def __init__(self, args, layer_id, d_model=256, d_ffn=1024, dropout=0.1, activation="relu", n_levels=4, n_heads=8, n_points=4, use_deformable_box_attn=False, box_attn_type='roi_align', key_aware_type=None, decoder_sa_type='ca', module_seq=...):
self.args = args
self.layer_id = layer_id
self.dropout_p = dropout
self.n_heads = n_heads
super(DeformableTransformerDecoderLayer, self).__init__()
self.module_seq = module_seq
assert sorted(module_seq) == ['ca', 'ffn', 'sa']
# cross attention
# self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
if use_deformable_box_attn:
self.cross_attn = MSDeformableBoxAttention(d_model, n_levels, n_heads, n_boxes=n_points, used_func=box_attn_type)
else:
self.cross_attn = self.init_cross_attn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn) | self.activation = _get_activation_fn(activation, d_model=d_ffn, batch_dim=1) | 4 | 2023-10-16 02:18:33+00:00 | 12k |
CuriseJia/FreeStyleRet | test.py | [
{
"identifier": "ShallowStyleRetrieval",
"path": "src/models/style_retrieval.py",
"snippet": "class ShallowStyleRetrieval(nn.Module):\n def __init__(self, model_args):\n super(ShallowStyleRetrieval, self).__init__()\n self.args = model_args\n self.openclip, self.pre_process_train, self.pre_process_val = open_clip.create_model_and_transforms(\n model_name='ViT-L-14', pretrained=self.args.origin_resume)\n self.tokenizer = open_clip.get_tokenizer('ViT-L-14')\n self.openclip.apply(freeze_all_but_bn)\n self.visual = self.openclip.visual\n self.transformer = self.visual.transformer\n # Prompt Token\n self.gram_prompt = nn.Parameter(torch.randn(\n self.args.gram_prompts, self.args.gram_prompt_dim))\n self.gram_encoder = VGG\n self.gram_encoder.load_state_dict(torch.load(self.args.gram_encoder_path))\n self.gram_encoder.apply(freeze_model)\n self.gram_patch = nn.Conv2d(128, 256, 16, 16)\n self.gram_pool = nn.Linear(256, 4)\n self.gram_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n self.style_prompt = nn.Parameter(torch.randn(\n self.args.style_prompts, self.args.style_prompt_dim))\n self.style_patch = nn.Conv2d(256, 256, 16, 16)\n self.style_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n # loss\n self.i2t_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n self.t2i_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n \n\n def get_loss(self, image_feature, pair_feature, negative_feature, optimizer):\n loss_1 = self.i2t_loss(image_feature, pair_feature, negative_feature)\n loss_2 = self.t2i_loss(pair_feature, image_feature, negative_feature)\n loss = (loss_1 + loss_2) / 2\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss.detach().cpu().numpy()\n \n\n def _get_features(self, image, model, layers=None):\n if layers is None:\n layers = {'0': 'conv1_1', \n '5': 'conv2_1', \n '10': 'conv3_1', \n '19': 'conv4_1', \n '21': 'conv4_2', \n '28': 'conv5_1',\n '31': 'conv5_2'} \n features = {}\n x = image\n for name, layer in model._modules.items():\n x = layer(x) \n if name in layers:\n features[layers[name]] = x\n \n return features\n \n\n def _get_gram_prompt(self, input):\n latent_feature = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(latent_feature['conv3_1'])\n n, c, h, w = embed.shape # (b, 256, 7, 7)\n\n features = embed.view(n, c, -1) # (b*256, 49)\n features = torch.bmm(features, features.transpose(1, 2))\n features = self.gram_pool(features)\n prompt_feature = self.gram_linear(features.permute(0, 2, 1))\n\n return prompt_feature\n \n\n def _get_style_prompt(self, input):\n # style_feature = torch.tensor(torch.randn(4, 4096))\n feature = torch.from_numpy(np.load(self.args.style_cluster_path)).view(4, 4096).float().to(self.args.device)\n \n gram = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(gram['conv3_1'])\n n, c, h, w = embed.shape\n gram = embed.view(n, c, -1) # (b*256, 49)\n gram = torch.bmm(gram, gram.transpose(1, 2))\n\n gram = self.gram_pool(gram)\n gram = self.gram_linear(gram.permute(0, 2, 1))\n\n feature = select_style_prompt(gram, feature)\n\n return feature\n \n\n def _visual_forward(self, x):\n gram_prompt = self._get_gram_prompt(x)\n style_prompt = self._get_style_prompt(x)\n\n x = self.visual.conv1(x) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n # class embeddings and positional embeddings\n x = torch.cat(\n [self.visual.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n x = x + self.visual.positional_embedding.to(x.dtype)\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.visual.patch_dropout(x)\n x = self.visual.ln_pre(x)\n\n if self.args.prompt_location == 'Shallow':\n\n x = torch.cat([x[:, 0, :].unsqueeze(1), style_prompt, x[:, 1:, :]], dim=1)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.visual.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n \n elif self.args.prompt_location == 'Bottom':\n\n x = x.permute(1, 0, 2) # NLD -> LND\n for r in range(len(self.transformer.resblocks)):\n if r == len(self.transformer.resblocks)-1:\n x = torch.cat([x[0, :, :].unsqueeze(0), \n gram_prompt.permute(1, 0, 2), \n x[1:, :, :]], dim=0)\n x = self.transformer.resblocks[r](x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n # if self.visual.attn_pool is not None:\n # x = self.visual.attn_pool(x)\n # x = self.visual.ln_post(x)\n # pooled, tokens = self.visual._global_pool(x)\n # else:\n pooled, tokens = self.visual._global_pool(x)\n pooled = self.visual.ln_post(pooled)\n\n if self.visual.proj is not None:\n pooled = pooled @ self.visual.proj\n\n # if self.visual.output_tokens:\n # return pooled, tokens\n \n return pooled\n \n\n def forward(self, data, dtype='image'):\n if dtype == 'image': \n feat = self._visual_forward(data)\n\n elif dtype == 'text':\n feat = self.openclip.encode_text(data)\n\n return feat"
},
{
"identifier": "DeepStyleRetrieval",
"path": "src/models/style_retrieval.py",
"snippet": "class DeepStyleRetrieval(nn.Module):\n def __init__(self, model_args):\n super(DeepStyleRetrieval, self).__init__()\n self.args = model_args\n self.openclip, self.pre_process_train, self.pre_process_val = open_clip.create_model_and_transforms(\n model_name='ViT-L-14', pretrained=self.args.origin_resume)\n self.tokenizer = open_clip.get_tokenizer('ViT-L-14')\n self.openclip.apply(freeze_all_but_bn)\n self.visual = self.openclip.visual\n self.transformer = self.visual.transformer\n # Prompt Token\n self.gram_prompt = nn.Parameter(torch.randn(\n self.args.gram_prompts, self.args.gram_prompt_dim))\n self.gram_encoder = VGG\n self.gram_encoder.load_state_dict(torch.load(self.args.gram_encoder_path))\n self.gram_encoder.apply(freeze_model)\n self.gram_patch = nn.Conv2d(128, 256, 16, 16)\n self.gram_pool = nn.Linear(256, 4)\n self.gram_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n self.style_prompt = nn.Parameter(torch.randn(\n self.args.style_prompts, self.args.style_prompt_dim))\n self.style_patch = nn.Conv2d(256, 256, 16, 16)\n self.style_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n # loss\n self.i2t_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n self.t2i_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n \n\n def get_loss(self, image_feature, pair_feature, negative_feature, optimizer):\n loss_1 = self.i2t_loss(image_feature, pair_feature, negative_feature)\n loss_2 = self.t2i_loss(pair_feature, image_feature, negative_feature)\n loss = (loss_1 + loss_2) / 2\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss.detach().cpu().numpy()\n \n\n def _get_features(self, image, model, layers=None):\n if layers is None:\n layers = {'0': 'conv1_1', \n '5': 'conv2_1', \n '10': 'conv3_1', \n '19': 'conv4_1', \n '21': 'conv4_2', \n '28': 'conv5_1',\n '31': 'conv5_2'} \n features = {}\n x = image\n for name, layer in model._modules.items():\n x = layer(x) \n if name in layers:\n features[layers[name]] = x\n \n return features\n \n\n def _get_gram_prompt(self, input):\n latent_feature = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(latent_feature['conv3_1'])\n n, c, h, w = embed.shape # (b, 256, 7, 7)\n\n features = embed.view(n, c, -1) # (b*256, 49)\n features = torch.bmm(features, features.transpose(1, 2))\n features = self.gram_pool(features)\n prompt_feature = self.gram_linear(features.permute(0, 2, 1))\n\n return prompt_feature\n \n\n # def _get_style_prompt(self, input):\n # feature = torch.from_numpy(np.load(self.args.style_cluster_path)).view(self.args.style_prompts, 128, 112, 112).float().to(self.args.device) # (4, 1605632)\n # # style_feature = torch.tensor(torch.randn(4, 256, 256))\n # style_feature = self.gram_patch(feature)\n # n, c, h, w = style_feature.shape # (b, 256, 7, 7)\n # style_feature = style_feature.view(n, c, -1) # (b*256, 49)\n # style_feature = torch.bmm(style_feature, style_feature.transpose(1, 2))\n \n # gram = self._get_features(input, self.gram_encoder)\n # embed = self.gram_patch(gram['conv3_1'])\n # n, c, h, w = embed.shape\n # gram = embed.view(n, c, -1) # (b*256, 49)\n # gram = torch.bmm(gram, gram.transpose(1, 2))\n # feature = select_style_prompt(gram, style_feature.view(self.args.style_prompts, -1)) # (b, 65536)\n # feature = self.style_patch(feature.view(self.args.batch_size, 256, 16, 16)).view(self.args.batch_size, 256)\n # feature = self.style_linear(feature).unsqueeze(1).repeat(1, self.args.style_prompts, 1)\n\n # return feature\n\n def _get_style_prompt(self, input):\n # style_feature = torch.tensor(torch.randn(4, 4096))\n feature = torch.from_numpy(np.load(self.args.style_cluster_path)).view(4, 4096).float().to(self.args.device)\n \n gram = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(gram['conv3_1'])\n n, c, h, w = embed.shape\n gram = embed.view(n, c, -1) # (b*256, 49)\n gram = torch.bmm(gram, gram.transpose(1, 2))\n\n gram = self.gram_pool(gram)\n gram = self.gram_linear(gram.permute(0, 2, 1))\n\n feature = select_style_prompt(gram, feature)\n\n return feature\n\n\n def _visual_forward(self, x):\n input = x\n self.gram_prompt.parameter = self._get_gram_prompt(input)\n self.style_prompt.parameter = self._get_style_prompt(input)\n\n x = self.visual.conv1(x) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n # class embeddings and positional embeddings\n x = torch.cat(\n [self.visual.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n x = x + self.visual.positional_embedding.to(x.dtype)\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.visual.patch_dropout(x)\n x = self.visual.ln_pre(x)\n\n # add style_prompt\n x = torch.cat([x[:, 0, :].unsqueeze(1), self.style_prompt.expand(x.shape[0],-1,-1), x[:, 1:, :]], dim=1)\n\n # add gram_prompt before the last block of transformer\n x = x.permute(1, 0, 2) # NLD -> LND\n for r in range(len(self.transformer.resblocks)):\n if r == len(self.transformer.resblocks)-1:\n x = torch.cat([x[0, :, :].unsqueeze(0), \n self.gram_prompt.expand(self.args.batch_size,-1,-1).permute(1, 0, 2), \n x[1:, :, :]], dim=0)\n x = self.transformer.resblocks[r](x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n # if self.visual.attn_pool is not None:\n # x = self.visual.attn_pool(x)\n # x = self.visual.ln_post(x)\n # pooled, tokens = self.visual._global_pool(x)\n # else:\n pooled, tokens = self.visual._global_pool(x)\n pooled = self.visual.ln_post(pooled)\n\n if self.visual.proj is not None:\n pooled = pooled @ self.visual.proj\n\n # if self.visual.output_tokens:\n # return pooled, tokens\n \n return pooled\n \n\n def forward(self, data, dtype='image'):\n if dtype == 'image': \n feat = self._visual_forward(data)\n\n elif dtype == 'text':\n feat = self.openclip.encode_text(data)\n\n return feat"
},
{
"identifier": "BLIP_Retrieval",
"path": "src/models/blip_retrieval.py",
"snippet": "class BLIP_Retrieval(nn.Module):\n def __init__(self, model_args):\n super(BLIP_Retrieval, self).__init__()\n self.args = model_args\n self.blip = blip_retrieval(pretrained=self.args.origin_resume, image_size=224, vit='large', vit_grad_ckpt=True, vit_ckpt_layer=10)\n self.blip.apply(freeze_all_but_bn)\n self.visual = self.blip.visual_encoder.blocks\n # Prompt Token\n self.gram_prompt = nn.Parameter(torch.randn(\n self.args.gram_prompts, self.args.gram_prompt_dim))\n self.gram_encoder = VGG\n self.gram_encoder.load_state_dict(torch.load(self.args.gram_encoder_path))\n self.gram_encoder.apply(freeze_model)\n self.gram_patch = nn.Conv2d(128, 256, 16, 16)\n self.gram_pool = nn.Linear(256, 4)\n self.gram_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n self.style_prompt = nn.Parameter(torch.randn(\n self.args.style_prompts, self.args.style_prompt_dim))\n self.style_patch = nn.Conv2d(256, 256, 16, 16)\n self.style_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n # loss and process\n self.triplet_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n self.pre_process_train = image_transform(224, True, image_mean, image_std)\n self.pre_process_val = image_transform(224, False, image_mean, image_std)\n \n\n def _get_features(self, image, model, layers=None):\n if layers is None:\n layers = {'0': 'conv1_1', \n '5': 'conv2_1', \n '10': 'conv3_1', \n '19': 'conv4_1', \n '21': 'conv4_2', \n '28': 'conv5_1',\n '31': 'conv5_2'} \n features = {}\n x = image\n for name, layer in model._modules.items():\n x = layer(x) \n if name in layers:\n features[layers[name]] = x\n \n return features\n\n\n def _get_gram_prompt(self, input):\n latent_feature = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(latent_feature['conv3_1'])\n n, c, h, w = embed.shape # (b, 256, 7, 7)\n\n features = embed.view(n, c, -1) # (b*256, 49)\n features = torch.bmm(features, features.transpose(1, 2))\n features = self.gram_pool(features)\n prompt_feature = self.gram_linear(features.permute(0, 2, 1))\n\n return prompt_feature\n \n\n def _get_style_prompt(self, input):\n # style_feature = torch.tensor(torch.randn(4, 4096))\n feature = torch.from_numpy(np.load(self.args.style_cluster_path)).view(4, 4096).float().to(self.args.device)\n \n gram = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(gram['conv3_1'])\n n, c, h, w = embed.shape\n gram = embed.view(n, c, -1) # (b*256, 49)\n gram = torch.bmm(gram, gram.transpose(1, 2))\n\n gram = self.gram_pool(gram)\n gram = self.gram_linear(gram.permute(0, 2, 1))\n\n feature = select_style_prompt(gram, feature)\n\n return feature\n\n\n def forward(self, data, dtype='image'):\n if dtype == 'image':\n gram_prompt = self._get_gram_prompt(data)\n style_prompt = self._get_style_prompt(data)\n\n feat = self.blip.visual_encoder.patch_embed(data)\n cls_tokens = self.blip.visual_encoder.cls_token.expand(data.shape[0], -1, -1)\n feat = torch.cat((cls_tokens, feat), dim=1)\n feat = feat + self.blip.visual_encoder.pos_embed[:,:feat.size(1),:]\n feat = self.blip.visual_encoder.pos_drop(feat)\n\n feat = torch.cat([feat[:, 0, :].unsqueeze(1), style_prompt, feat[:, 1:, :]], dim=1)\n for r in range(len(self.blip.visual_encoder.blocks)):\n if r == len(self.blip.visual_encoder.blocks)-1:\n feat = torch.cat([feat[:, 0, :].unsqueeze(1), \n gram_prompt,\n feat[:, 1:, :]], dim=1)\n feat = self.blip.visual_encoder.blocks[r](feat)\n \n feat = self.blip.visual_encoder.norm(feat)\n \n ori_embed = F.normalize(self.blip.vision_proj(feat[:,0,:]),dim=-1) \n\n return ori_embed\n \n else:\n text = self.blip.tokenizer(data, padding='max_length', truncation=True, max_length=35, \n return_tensors=\"pt\").to(self.args.device)\n text_output = self.blip.text_encoder(text.input_ids, attention_mask = text.attention_mask, \n return_dict = True, mode = 'text')\n text_feat = F.normalize(self.blip.text_proj(text_output.last_hidden_state[:,0,:]),dim=-1)\n\n return text_feat\n \n\n def get_loss(self, image_feature, pair_feature, negative_feature, optimizer):\n loss = self.triplet_loss(image_feature, pair_feature, negative_feature)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss.detach().cpu().numpy()"
},
{
"identifier": "T2ITestDataset",
"path": "src/dataset/data.py",
"snippet": "class T2ITestDataset(Dataset):\n def __init__(self, root_path, json_path, image_transform):\n self.root_path = root_path\n self.dataset = json.load(open(json_path,'r'))\n self.image_transform = image_transform\n \n\n def __len__(self):\n return len(self.dataset)\n \n \n def __getitem__(self, index):\n caption_path = os.path.join(self.root_path, 'text/'+self.dataset[index]['caption'])\n image_path = os.path.join(self.root_path, 'images/'+self.dataset[index]['image'])\n \n f = open(caption_path, 'r')\n caption = f.readline().replace('\\n', '')\n pair_image = self.image_transform(Image.open(image_path))\n\n return [caption, pair_image, index]"
},
{
"identifier": "I2ITestDataset",
"path": "src/dataset/data.py",
"snippet": "class I2ITestDataset(Dataset):\n def __init__(self, style, root_path, json_path, image_transform):\n self.style = style\n self.root_path = root_path\n self.dataset = json.load(open(json_path,'r'))\n self.image_transform = image_transform\n \n\n def __len__(self):\n return len(self.dataset)\n \n \n def __getitem__(self, index):\n ori_path = os.path.join(self.root_path, 'images/'+self.dataset[index]['image'])\n pair_path = os.path.join(self.root_path, '{}/'.format(self.style)+self.dataset[index]['image'])\n \n ori_image = self.image_transform(Image.open(ori_path))\n pair_image = self.image_transform(Image.open(pair_path))\n\n return [ori_image, pair_image, index]"
},
{
"identifier": "X2ITestDataset",
"path": "src/dataset/data.py",
"snippet": "class X2ITestDataset(Dataset):\n def __init__(self, style, root_path, json_path, image_transform):\n self.style = style\n self.root_path = root_path\n self.dataset = json.load(open(json_path,'r'))\n self.image_transform = image_transform\n \n\n def __len__(self):\n return len(self.dataset)\n \n \n def __getitem__(self, index):\n caption_path = os.path.join(self.root_path, 'text/'+self.dataset[index]['caption'])\n ori_path = os.path.join(self.root_path, 'images/'+self.dataset[index]['image'])\n pair_path = os.path.join(self.root_path, '{}/'.format(self.style)+self.dataset[index]['image'])\n \n f = open(caption_path, 'r')\n caption = f.readline().replace('\\n', '')\n ori_image = self.image_transform(Image.open(ori_path))\n pair_image = self.image_transform(Image.open(pair_path))\n\n return [caption, ori_image, pair_image, index]"
},
{
"identifier": "setup_seed",
"path": "src/utils/utils.py",
"snippet": "def setup_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n cudnn.benchmark = True"
},
{
"identifier": "getR1Accuary",
"path": "src/utils/utils.py",
"snippet": "def getR1Accuary(prob):\n temp = prob.detach().cpu().numpy()\n temp = np.argsort(temp, axis=1)\n count = 0\n for i in range(prob.shape[0]):\n if temp[i][prob.shape[1]-1] == i:\n count+=1\n acc = count/prob.shape[0]\n return acc"
},
{
"identifier": "getR5Accuary",
"path": "src/utils/utils.py",
"snippet": "def getR5Accuary(prob):\n temp = prob.detach().cpu().numpy()\n temp = np.argsort(temp, axis=1)\n count = 0\n for i in range(prob.shape[0]):\n for j in range(prob.shape[1]-4,prob.shape[1]):\n if temp[i][j] == i:\n count+=1\n acc = count/prob.shape[0]\n return acc"
}
] | import argparse
import torch
import torch.nn.functional as F
from tqdm import tqdm
from torch.utils.data import DataLoader
from src.models import ShallowStyleRetrieval, DeepStyleRetrieval, BLIP_Retrieval
from src.dataset.data import T2ITestDataset, I2ITestDataset, X2ITestDataset
from src.utils.utils import setup_seed, getR1Accuary, getR5Accuary | 7,566 |
def parse_args():
parser = argparse.ArgumentParser(description='Parse args for FreeStyleRet Training.')
# project settings
parser.add_argument('--resume', default='', type=str, help='load checkpoints from given path')
parser.add_argument('--origin_resume', default='model_large_retrieval_coco.pth', type=str, help='load checkpoints from given path')
parser.add_argument('--gram_encoder_path', default='pretrained/vgg_normalised.pth', type=str, help='load vgg from given path')
parser.add_argument('--style_cluster_path', default='pretrained/style_cluster.npy', type=str, help='load style prompt from given npy')
parser.add_argument('--device', default='cuda:0')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--num_workers', default=6, type=int)
# data settings
parser.add_argument("--type", type=str, default='style2image', help='choose train text2image or style2image.')
parser.add_argument("--style", type=str, default='sketch', help='choose sketch, art or mosaic.')
parser.add_argument("--test_dataset_path", type=str, default='DSR/')
parser.add_argument("--test_json_path", type=str, default='DSR/test.json')
parser.add_argument("--batch_size", type=int, default=24)
# model settings
parser.add_argument('--prompt', type=str, default='DeepPrompt', help='ShallowPrompt or DeepPrompt')
parser.add_argument('--gram_prompts', type=int, default=4)
parser.add_argument('--gram_prompt_dim', type=int, default=1024)
parser.add_argument('--style_prompts', type=int, default=4)
parser.add_argument('--style_prompt_dim', type=int, default=1024)
args = parser.parse_args()
return args
def eval(args, model, dataloader):
model.eval()
r1 = []
r5 = []
if args.type == 'text2image':
for data in enumerate(tqdm(dataloader)):
if args.prompt == 'BLIP_Retrieval':
caption = data[1][0]
else:
caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True)
image = data[1][1].to(args.device, non_blocking=True)
image_feature = model(image, dtype='image')
text_feature = model(caption, dtype='text')
image_feature = F.normalize(image_feature, dim=-1)
text_feature = F.normalize(text_feature, dim=-1)
prob = torch.softmax((100.0 * text_feature @ image_feature.T), dim=-1)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
elif args.type == 'style2image':
for data in enumerate(tqdm(dataloader)):
origin_image = data[1][0].to(args.device, non_blocking=True)
retrival_image = data[1][1].to(args.device, non_blocking=True)
original_feature = model(origin_image, dtype='image')
retrival_feature = model(retrival_image, dtype='image')
original_feature = F.normalize(original_feature, dim=-1)
retrival_feature = F.normalize(retrival_feature, dim=-1)
prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
else:
for data in enumerate(tqdm(dataloader)):
if args.prompt == 'BLIP_Retrieval':
caption = data[1][0]
else:
caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True)
origin_image = data[1][1].to(args.device, non_blocking=True)
retrival_image = data[1][2].to(args.device, non_blocking=True)
text_feature = model(caption, dtype='text')
original_feature = model(origin_image, dtype='image')
retrival_feature = model(retrival_image, dtype='image')
text_feature = F.normalize(text_feature, dim=-1)
original_feature = F.normalize(original_feature, dim=-1)
retrival_feature = F.normalize(retrival_feature, dim=-1)
prob1 = torch.softmax((100.0 * text_feature @ original_feature.T), dim=-1)
prob2 = prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1)
prob = prob1.max(prob2)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
resr1 = sum(r1)/len(r1)
resr5 = sum(r5)/len(r5)
print('R@1 Acc is {}'.format(resr1))
print('R@5 Acc is {}'.format(resr5))
if __name__ == "__main__":
args = parse_args()
|
def parse_args():
parser = argparse.ArgumentParser(description='Parse args for FreeStyleRet Training.')
# project settings
parser.add_argument('--resume', default='', type=str, help='load checkpoints from given path')
parser.add_argument('--origin_resume', default='model_large_retrieval_coco.pth', type=str, help='load checkpoints from given path')
parser.add_argument('--gram_encoder_path', default='pretrained/vgg_normalised.pth', type=str, help='load vgg from given path')
parser.add_argument('--style_cluster_path', default='pretrained/style_cluster.npy', type=str, help='load style prompt from given npy')
parser.add_argument('--device', default='cuda:0')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--num_workers', default=6, type=int)
# data settings
parser.add_argument("--type", type=str, default='style2image', help='choose train text2image or style2image.')
parser.add_argument("--style", type=str, default='sketch', help='choose sketch, art or mosaic.')
parser.add_argument("--test_dataset_path", type=str, default='DSR/')
parser.add_argument("--test_json_path", type=str, default='DSR/test.json')
parser.add_argument("--batch_size", type=int, default=24)
# model settings
parser.add_argument('--prompt', type=str, default='DeepPrompt', help='ShallowPrompt or DeepPrompt')
parser.add_argument('--gram_prompts', type=int, default=4)
parser.add_argument('--gram_prompt_dim', type=int, default=1024)
parser.add_argument('--style_prompts', type=int, default=4)
parser.add_argument('--style_prompt_dim', type=int, default=1024)
args = parser.parse_args()
return args
def eval(args, model, dataloader):
model.eval()
r1 = []
r5 = []
if args.type == 'text2image':
for data in enumerate(tqdm(dataloader)):
if args.prompt == 'BLIP_Retrieval':
caption = data[1][0]
else:
caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True)
image = data[1][1].to(args.device, non_blocking=True)
image_feature = model(image, dtype='image')
text_feature = model(caption, dtype='text')
image_feature = F.normalize(image_feature, dim=-1)
text_feature = F.normalize(text_feature, dim=-1)
prob = torch.softmax((100.0 * text_feature @ image_feature.T), dim=-1)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
elif args.type == 'style2image':
for data in enumerate(tqdm(dataloader)):
origin_image = data[1][0].to(args.device, non_blocking=True)
retrival_image = data[1][1].to(args.device, non_blocking=True)
original_feature = model(origin_image, dtype='image')
retrival_feature = model(retrival_image, dtype='image')
original_feature = F.normalize(original_feature, dim=-1)
retrival_feature = F.normalize(retrival_feature, dim=-1)
prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
else:
for data in enumerate(tqdm(dataloader)):
if args.prompt == 'BLIP_Retrieval':
caption = data[1][0]
else:
caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True)
origin_image = data[1][1].to(args.device, non_blocking=True)
retrival_image = data[1][2].to(args.device, non_blocking=True)
text_feature = model(caption, dtype='text')
original_feature = model(origin_image, dtype='image')
retrival_feature = model(retrival_image, dtype='image')
text_feature = F.normalize(text_feature, dim=-1)
original_feature = F.normalize(original_feature, dim=-1)
retrival_feature = F.normalize(retrival_feature, dim=-1)
prob1 = torch.softmax((100.0 * text_feature @ original_feature.T), dim=-1)
prob2 = prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1)
prob = prob1.max(prob2)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
resr1 = sum(r1)/len(r1)
resr5 = sum(r5)/len(r5)
print('R@1 Acc is {}'.format(resr1))
print('R@5 Acc is {}'.format(resr5))
if __name__ == "__main__":
args = parse_args() | setup_seed(args.seed) | 6 | 2023-10-17 09:32:57+00:00 | 12k |
liuqidong07/MOELoRA-peft | src/MLoRA/peft/tuners/adalora.py | [
{
"identifier": "PeftType",
"path": "src/MLoRA/peft/utils/config.py",
"snippet": "class PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"\n ADAPTION_PROMPT = \"ADAPTION_PROMPT\"\n MMOELORAS = \"MMOELORAS\""
},
{
"identifier": "TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING",
"path": "src/MLoRA/peft/utils/other.py",
"snippet": "TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING = {\n \"t5\": [\"q\", \"k\", \"v\", \"o\", \"wi\", \"wo\"],\n \"mt5\": [\"q\", \"k\", \"v\", \"o\", \"wi_0\", \"wi_1\", \"wo\"],\n \"bart\": [\"q_proj\", \"k_proj\", \"v_proj\", \"out_proj\", \"fc1\", \"fc2\"],\n # \"gpt2\": [\"c_attn\"],\n # \"bloom\": [\"query_key_value\"],\n \"opt\": [\"q_proj\", \"k_proj\", \"v_proj\", \"out_proj\", \"fc1\", \"fc2\"],\n # \"gptj\": [\"q_proj\", \"v_proj\"],\n # \"gpt_neox\": [\"query_key_value\"],\n # \"gpt_neo\": [\"q_proj\", \"v_proj\"],\n # \"bert\": [\"query\", \"value\"],\n \"roberta\": [\"query\", \"key\", \"value\", \"dense\"],\n # \"xlm-roberta\": [\"query\", \"value\"],\n # \"electra\": [\"query\", \"value\"],\n \"deberta-v2\": [\"query_proj\", \"key_proj\", \"value_proj\", \"dense\"],\n # \"deberta\": [\"in_proj\"],\n # \"layoutlm\": [\"query\", \"value\"],\n}"
},
{
"identifier": "transpose",
"path": "src/MLoRA/peft/utils/other.py",
"snippet": "def transpose(weight, fan_in_fan_out):\n return weight.T if fan_in_fan_out else weight"
},
{
"identifier": "_get_submodules",
"path": "src/MLoRA/peft/utils/other.py",
"snippet": "def _get_submodules(model, key):\n parent = model.get_submodule(\".\".join(key.split(\".\")[:-1]))\n target_name = key.split(\".\")[-1]\n target = model.get_submodule(key)\n return parent, target, target_name"
},
{
"identifier": "_freeze_adapter",
"path": "src/MLoRA/peft/utils/other.py",
"snippet": "def _freeze_adapter(model, adapter_name):\n for n, p in model.named_parameters():\n if adapter_name in n:\n p.requires_grad = False"
},
{
"identifier": "LoraConfig",
"path": "src/MLoRA/peft/tuners/lora.py",
"snippet": "class LoraConfig(PeftConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`LoraModel`].\n\n Args:\n r (`int`): Lora attention dimension.\n target_modules (`Union[List[str],str]`): The names of the modules to apply Lora to.\n lora_alpha (`float`): The alpha parameter for Lora scaling.\n lora_dropout (`float`): The dropout probability for Lora layers.\n fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out).\n For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.:\n bias (`str`): Bias type for Lora. Can be 'none', 'all' or 'lora_only'\n modules_to_save (`List[str]`):List of modules apart from LoRA layers to be set as trainable\n and saved in the final checkpoint.\n \"\"\"\n\n r: int = field(default=8, metadata={\"help\": \"Lora attention dimension\"})\n target_modules: Optional[Union[List[str], str]] = field(\n default=None,\n metadata={\n \"help\": \"List of module names or regex expression of the module names to replace with Lora.\"\n \"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' \"\n },\n )\n lora_alpha: int = field(default=None, metadata={\"help\": \"Lora alpha\"})\n lora_dropout: float = field(default=None, metadata={\"help\": \"Lora dropout\"})\n fan_in_fan_out: bool = field(\n default=False,\n metadata={\"help\": \"Set this to True if the layer to replace stores weight like (fan_in, fan_out)\"},\n )\n bias: str = field(default=\"none\", metadata={\"help\": \"Bias type for Lora. Can be 'none', 'all' or 'lora_only'\"})\n modules_to_save: Optional[List[str]] = field(\n default=None,\n metadata={\n \"help\": \"List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. \"\n \"For example, in Sequence Classification or Token Classification tasks, \"\n \"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.\"\n },\n )\n init_lora_weights: bool = field(\n default=True,\n metadata={\"help\": \"Whether to initialize the weights of the Lora layers.\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.LORA"
},
{
"identifier": "LoraLayer",
"path": "src/MLoRA/peft/tuners/lora.py",
"snippet": "class LoraLayer:\n def __init__(\n self,\n in_features: int,\n out_features: int,\n ):\n self.r = {}\n self.lora_alpha = {}\n self.scaling = {}\n self.lora_dropout = nn.ModuleDict({})\n self.lora_A = nn.ModuleDict({})\n self.lora_B = nn.ModuleDict({})\n # Mark the weight as unmerged\n self.merged = False\n self.disable_adapters = False\n self.in_features = in_features\n self.out_features = out_features\n\n def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):\n self.r[adapter_name] = r\n self.lora_alpha[adapter_name] = lora_alpha\n if lora_dropout > 0.0:\n lora_dropout_layer = nn.Dropout(p=lora_dropout)\n else:\n lora_dropout_layer = nn.Identity()\n\n self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))\n # Actual trainable parameters\n if r > 0:\n self.lora_A.update(nn.ModuleDict({adapter_name: nn.Linear(self.in_features, r, bias=False)}))\n self.lora_B.update(nn.ModuleDict({adapter_name: nn.Linear(r, self.out_features, bias=False)}))\n self.scaling[adapter_name] = lora_alpha / r\n if init_lora_weights:\n self.reset_lora_parameters(adapter_name)\n self.to(self.weight.device)\n\n def reset_lora_parameters(self, adapter_name):\n if adapter_name in self.lora_A.keys():\n # initialize A the same way as the default for nn.Linear and B to zero\n nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5))\n nn.init.zeros_(self.lora_B[adapter_name].weight)"
},
{
"identifier": "LoraModel",
"path": "src/MLoRA/peft/tuners/lora.py",
"snippet": "class LoraModel(torch.nn.Module):\n \"\"\"\n Creates Low Rank Adapter (Lora) model from a pretrained transformers model.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): The model to be adapted.\n config ([`LoraConfig`]): The configuration of the Lora model.\n\n Returns:\n `torch.nn.Module`: The Lora model.\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig\n >>> from peft import LoraModel, LoraConfig\n\n >>> config = LoraConfig(\n ... peft_type=\"LORA\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... r=8,\n ... lora_alpha=32,\n ... target_modules=[\"q\", \"v\"],\n ... lora_dropout=0.01,\n ... )\n\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\")\n >>> lora_model = LoraModel(config, model)\n ```\n\n **Attributes**:\n - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.\n - **peft_config** ([`LoraConfig`]): The configuration of the Lora model.\n \"\"\"\n\n def __init__(self, model, config, adapter_name):\n super().__init__()\n self.model = model\n self.forward = self.model.forward\n self.peft_config = config\n self.add_adapter(adapter_name, self.peft_config[adapter_name])\n\n def add_adapter(self, adapter_name, config=None):\n if config is not None:\n model_config = self.model.config.to_dict() if hasattr(self.model.config, \"to_dict\") else self.model.config\n config = self._prepare_lora_config(config, model_config)\n self.peft_config[adapter_name] = config\n self._find_and_replace(adapter_name)\n if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != \"none\":\n raise ValueError(\n \"LoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.\"\n )\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias) # freeze all layers except for lora layer\n if self.peft_config[adapter_name].inference_mode: # if inference, also freeze lora layer\n _freeze_adapter(self.model, adapter_name)\n\n def _find_and_replace(self, adapter_name):\n \"\"\"Replace the target `Linear` module with LoRA layer (Linear+LoRA)\"\"\"\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n }\n key_list = [key for key, _ in self.model.named_modules()]\n for key in key_list:\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key) # parent: the parent mudle of target (e.g., SelfAttention), target: target module (e.g., nn.Linear()), target name: the name of target module (e.g., query_key_value)\n bias = target.bias is not None\n if isinstance(target, LoraLayer): # if the target is LoraLayer, only need to update the parameters\n target.update_layer(\n adapter_name,\n lora_config.r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else: # if not, get the lora parameter for create.\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n eightbit_kwargs = kwargs.copy()\n eightbit_kwargs.update(\n {\n \"has_fp16_weights\": target.state.has_fp16_weights,\n \"memory_efficient_backward\": target.state.memory_efficient_backward,\n \"threshold\": target.state.threshold,\n \"index\": target.index,\n }\n )\n new_module = Linear8bitLt(\n adapter_name, target.in_features, target.out_features, bias=bias, **eightbit_kwargs\n )\n else: # create based on the original module type\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = Linear(adapter_name, in_features, out_features, bias=bias, **kwargs) # create the lora module, here is not the raw nn.Linear, but the lora layer\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )\n\n def _replace_module(self, parent_module, child_name, new_module, old_module):\n \"\"\"substitute the original nn.Linear to new Linear (nn.Linear+LoRA block)\"\"\"\n setattr(parent_module, child_name, new_module)\n new_module.weight = old_module.weight\n if old_module.bias is not None:\n new_module.bias = old_module.bias\n if getattr(old_module, \"state\", None) is not None: # synchronize the state and device\n new_module.state = old_module.state\n new_module.to(old_module.weight.device)\n\n # dispatch to correct device\n for name, module in new_module.named_modules():\n if \"lora_\" in name:\n module.to(old_module.weight.device)\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.model, name)\n\n def get_peft_config_as_dict(self, inference: bool = False):\n config_dict = {}\n for key, value in self.peft_config.items():\n config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}\n if inference:\n config[\"inference_mode\"] = True\n config_dict[key] = config\n return config\n\n def _set_adapter_layers(self, enabled=True):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.disable_adapters = False if enabled else True\n\n def enable_adapter_layers(self):\n self._set_adapter_layers(enabled=True)\n\n def disable_adapter_layers(self):\n self._set_adapter_layers(enabled=False)\n\n def set_adapter(self, adapter_name):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n if module.merged:\n warnings.warn(\"Adapter cannot be set when the model is merged. Unmerging the model first.\")\n module.unmerge()\n module.active_adapter = adapter_name\n\n def merge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.merge()\n\n def unmerge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.unmerge()\n\n @staticmethod\n def _prepare_lora_config(peft_config, model_config):\n if peft_config.target_modules is None:\n if model_config[\"model_type\"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:\n raise ValueError(\"Please specify `target_modules` in `peft_config`\")\n peft_config.target_modules = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config[\"model_type\"]]\n if peft_config.inference_mode:\n peft_config.merge_weights = True\n return peft_config\n\n def merge_and_unload(self):\n r\"\"\"\n This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model\n as a standalone model.\n \"\"\"\n if getattr(self.config, \"model_type\", None) == \"gpt2\":\n raise ValueError(\"GPT2 models are not supported for merging LORA layers\")\n\n if getattr(self.model, \"is_loaded_in_8bit\", False):\n raise ValueError(\"Cannot merge LORA layers when the model is loaded in 8-bit mode\")\n\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n try:\n parent, target, target_name = _get_submodules(self.model, key)\n except AttributeError:\n continue\n if isinstance(target, LoraLayer):\n bias = target.bias is not None\n new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)\n target.merge()\n self._replace_module(parent, target_name, new_module, target)\n\n # save any additional trainable modules part of `modules_to_save`\n if isinstance(target, ModulesToSaveWrapper):\n setattr(parent, target_name, target.modules_to_save[target.active_adapter])\n\n return self.model\n\n def add_weighted_adapter(self, adapters, weights, adapter_name):\n if len({self.peft_config[adapter].r for adapter in adapters}) != 1:\n raise ValueError(\"All adapters must have the same r value\")\n self.peft_config[adapter_name] = self.peft_config[adapters[0]]\n self.peft_config[adapter_name].lora_alpha = self.peft_config[adapters[0]].r\n self._find_and_replace(adapter_name)\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n _freeze_adapter(self.model, adapter_name)\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n _, target, _ = _get_submodules(self.model, key)\n if isinstance(target, LoraLayer):\n target.lora_A[adapter_name].weight.data = target.lora_A[adapter_name].weight.data * 0.0\n target.lora_B[adapter_name].weight.data = target.lora_B[adapter_name].weight.data * 0.0\n for adapter, weight in zip(adapters, weights):\n if adapter not in target.lora_A:\n continue\n target.lora_A[adapter_name].weight.data += (\n target.lora_A[adapter].weight.data * weight * target.scaling[adapter]\n )\n target.lora_B[adapter_name].weight.data += target.lora_B[adapter].weight.data * weight"
},
{
"identifier": "mark_only_lora_as_trainable",
"path": "src/MLoRA/peft/tuners/lora.py",
"snippet": "def mark_only_lora_as_trainable(model: nn.Module, bias: str = \"none\") -> None:\n \"\"\"Only activate the LoRA layer as trainable\"\"\"\n for n, p in model.named_parameters():\n if \"lora_\" not in n:\n p.requires_grad = False\n if bias == \"none\":\n return\n elif bias == \"all\":\n for n, p in model.named_parameters():\n if \"bias\" in n:\n p.requires_grad = True\n elif bias == \"lora_only\":\n for m in model.modules():\n if isinstance(m, LoraLayer) and hasattr(m, \"bias\") and m.bias is not None:\n m.bias.requires_grad = True\n else:\n raise NotImplementedError"
}
] | import importlib
import re
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
import bitsandbytes as bnb
from dataclasses import dataclass, field
from typing import Optional
from transformers.pytorch_utils import Conv1D
from ..utils import (
TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING,
PeftType,
_freeze_adapter,
_get_submodules,
transpose,
)
from .lora import (
LoraConfig,
LoraLayer,
LoraModel,
mark_only_lora_as_trainable,
) | 8,057 | if not kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to False but the target module is `Conv1D`. "
"Setting fan_in_fan_out to True."
)
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
else:
raise ValueError(
f"Target module {target} is not supported. "
f"Currently, only `torch.nn.Linear` and `Conv1D` are supported."
)
new_module = SVDLinear(adapter_name, in_features, out_features, bias=bias, **kwargs)
self._replace_module(parent, target_name, new_module, target)
if not is_target_modules_in_base_model:
raise ValueError(
f"Target modules {lora_config.target_modules} not found in the base model. "
f"Please check the target modules and try again."
)
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.model, name)
def forward(self, *args, **kwargs):
outputs = self.model.forward(*args, **kwargs)
# Calculate the orthogonal regularization
orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight
assert orth_reg_weight > 0
if hasattr(outputs, "loss"):
regu_loss = 0
num_param = 0
for n, p in self.model.named_parameters():
if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n:
para_cov = p @ p.T if "lora_A" in n else p.T @ p
I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov))
I.requires_grad = False
num_param += 1
regu_loss += torch.norm(para_cov - I, p="fro")
regu_loss = regu_loss / num_param
outputs.loss += orth_reg_weight * regu_loss
return outputs
def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):
lora_config = self.peft_config[adapter_name]
for name, rank_idx in rank_pattern.items():
if isinstance(rank_idx, list):
rank = sum(rank_idx)
elif isinstance(rank_idx, torch.Tensor):
rank_idx = rank_idx.view(-1)
rank = rank_idx.sum().item()
else:
raise ValueError("Unexcepted type of rank_idx")
key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
_, target, _ = _get_submodules(self.model, key)
lora_E_weights = target.lora_E[adapter_name][rank_idx]
lora_A_weights = target.lora_A[adapter_name][rank_idx]
lora_B_weights = target.lora_B[adapter_name][:, rank_idx]
ranknum = target.ranknum[adapter_name]
target.update_layer(
adapter_name,
rank,
lora_config.lora_alpha,
lora_config.lora_dropout,
lora_config.init_lora_weights,
)
with torch.no_grad():
if rank > 0:
target.lora_E[adapter_name].copy_(lora_E_weights)
target.lora_A[adapter_name].copy_(lora_A_weights)
target.lora_B[adapter_name].copy_(lora_B_weights)
# The scaling is exactly as the previous
target.ranknum[adapter_name].copy_(ranknum)
def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):
for name, rank_idx in rank_pattern.items():
rank = sum(rank_idx)
prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
for layer in ["lora_E", "lora_A", "lora_B"]:
key = f"base_model.model.{prefix}.{layer}.{adapter_name}"
if layer != "lora_B":
state_dict[key] = (
state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]
)
else:
state_dict[key] = (
state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]
)
return state_dict
def update_and_allocate(self, global_step):
lora_config = self.peft_config[self.trainable_adapter_name]
# Update the importance score and allocate the budget
if global_step < lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)
if rank_pattern:
lora_config.rank_pattern = rank_pattern
# Finalize the budget allocation
elif global_step == lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)
# for some reason, this freezes the trainable parameters and nothing gets updates
# self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)
lora_config.rank_pattern = rank_pattern
self.rankallocator.reset_ipt()
# Currently using inefficient way to mask the unimportant weights using the rank pattern
# due to problem mentioned above
elif global_step > lora_config.total_step - lora_config.tfinal:
self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)
# Pass the function and do forward propagation
else:
return None
@staticmethod
def _prepare_adalora_config(peft_config, model_config):
if peft_config.target_modules is None:
|
def is_bnb_available():
return importlib.util.find_spec("bitsandbytes") is not None
if is_bnb_available():
@dataclass
class AdaLoraConfig(LoraConfig):
"""
This is the configuration class to store the configuration of a [`~peft.AdaLora`].
Args:
target_r (`int`): The target average rank of incremental matrix.
init_r (`int`): The initial rank for each incremental matrix.
tinit (`int`): The steps of initial fine-tuning warmup.
tfinal (`int`): The step of final fine-tuning.
deltaT (`int`): The time internval between two budget allocations.
beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing.
beta2 (`float`): The hyperparameter of EMA for undertainty quantification.
orth_reg_weight (`float`): The coefficient of orthogonal regularization.
total_step (`int`): The total training steps that should be specified before training.
rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator.
"""
target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."})
init_r: int = field(default=12, metadata={"help": "Intial Lora matrix dimension."})
tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."})
tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."})
deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."})
beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."})
beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."})
orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."})
total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."})
rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."})
def __post_init__(self):
self.peft_type = PeftType.ADALORA
class AdaLoraModel(LoraModel):
"""
Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper:
https://openreview.net/pdf?id=lq62uWRJjiY
Args:
model ([`transformers.PreTrainedModel`]): The model to be adapted.
config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
Returns:
`torch.nn.Module`: The AdaLora model.
Example::
>>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig
>>> config = AdaLoraConfig(
peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"],
lora_dropout=0.01,
)
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(config, model)
**Attributes**:
- **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted.
- **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model.
"""
def __init__(self, model, config, adapter_name):
nn.Module.__init__(self)
self.model = model
self.peft_config = config
self.add_adapter(adapter_name, self.peft_config[adapter_name])
def add_adapter(self, adapter_name, config=None):
if config is not None:
model_config = self.model.config.to_dict() if hasattr(self.model.config, "to_dict") else self.model.config
config = self._prepare_adalora_config(config, model_config)
self.peft_config[adapter_name] = config
self._find_and_replace(adapter_name)
if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != "none":
raise ValueError(
"AdaLoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters."
)
traininable_mode_counter = 0
for config in self.peft_config.values():
if not config.inference_mode:
traininable_mode_counter += 1
if traininable_mode_counter > 1:
raise ValueError(
"AdaLoraModel supports only 1 trainable adapter. "
"When using multiple adapters, set inference_mode to True for all adapters except the one you want to train."
)
mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)
if self.peft_config[adapter_name].inference_mode:
_freeze_adapter(self.model, adapter_name)
else:
self.trainable_adapter_name = adapter_name
self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)
def _find_and_replace(self, adapter_name):
lora_config = self.peft_config[adapter_name]
loaded_in_8bit = getattr(self.model, "is_loaded_in_8bit", False)
if loaded_in_8bit and not is_bnb_available():
raise ImportError(
"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. "
"You can install it with `pip install bitsandbytes`."
)
is_target_modules_in_base_model = False
kwargs = {
"r": lora_config.init_r,
"lora_alpha": lora_config.lora_alpha,
"lora_dropout": lora_config.lora_dropout,
"fan_in_fan_out": lora_config.fan_in_fan_out,
"init_lora_weights": lora_config.init_lora_weights,
}
key_list = [key for key, _ in self.model.named_modules()]
for key in key_list:
if isinstance(lora_config.target_modules, str):
target_module_found = re.fullmatch(lora_config.target_modules, key)
else:
target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)
if target_module_found:
if not is_target_modules_in_base_model:
is_target_modules_in_base_model = True
parent, target, target_name = _get_submodules(self.model, key)
bias = target.bias is not None
if isinstance(target, LoraLayer):
target.update_layer(
adapter_name,
lora_config.init_r,
lora_config.lora_alpha,
lora_config.lora_dropout,
lora_config.init_lora_weights,
)
else:
if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):
kwargs.update(
{
"has_fp16_weights": target.state.has_fp16_weights,
"memory_efficient_backward": target.state.memory_efficient_backward,
"threshold": target.state.threshold,
"index": target.index,
}
)
new_module = SVDLinear8bitLt(
adapter_name, target.in_features, target.out_features, bias=bias, **kwargs
)
else:
if isinstance(target, torch.nn.Linear):
in_features, out_features = target.in_features, target.out_features
if kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
"Setting fan_in_fan_out to False."
)
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
elif isinstance(target, Conv1D):
in_features, out_features = (
target.weight.ds_shape if hasattr(target.weight, "ds_shape") else target.weight.shape
)
if not kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to False but the target module is `Conv1D`. "
"Setting fan_in_fan_out to True."
)
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
else:
raise ValueError(
f"Target module {target} is not supported. "
f"Currently, only `torch.nn.Linear` and `Conv1D` are supported."
)
new_module = SVDLinear(adapter_name, in_features, out_features, bias=bias, **kwargs)
self._replace_module(parent, target_name, new_module, target)
if not is_target_modules_in_base_model:
raise ValueError(
f"Target modules {lora_config.target_modules} not found in the base model. "
f"Please check the target modules and try again."
)
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.model, name)
def forward(self, *args, **kwargs):
outputs = self.model.forward(*args, **kwargs)
# Calculate the orthogonal regularization
orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight
assert orth_reg_weight > 0
if hasattr(outputs, "loss"):
regu_loss = 0
num_param = 0
for n, p in self.model.named_parameters():
if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n:
para_cov = p @ p.T if "lora_A" in n else p.T @ p
I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov))
I.requires_grad = False
num_param += 1
regu_loss += torch.norm(para_cov - I, p="fro")
regu_loss = regu_loss / num_param
outputs.loss += orth_reg_weight * regu_loss
return outputs
def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):
lora_config = self.peft_config[adapter_name]
for name, rank_idx in rank_pattern.items():
if isinstance(rank_idx, list):
rank = sum(rank_idx)
elif isinstance(rank_idx, torch.Tensor):
rank_idx = rank_idx.view(-1)
rank = rank_idx.sum().item()
else:
raise ValueError("Unexcepted type of rank_idx")
key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
_, target, _ = _get_submodules(self.model, key)
lora_E_weights = target.lora_E[adapter_name][rank_idx]
lora_A_weights = target.lora_A[adapter_name][rank_idx]
lora_B_weights = target.lora_B[adapter_name][:, rank_idx]
ranknum = target.ranknum[adapter_name]
target.update_layer(
adapter_name,
rank,
lora_config.lora_alpha,
lora_config.lora_dropout,
lora_config.init_lora_weights,
)
with torch.no_grad():
if rank > 0:
target.lora_E[adapter_name].copy_(lora_E_weights)
target.lora_A[adapter_name].copy_(lora_A_weights)
target.lora_B[adapter_name].copy_(lora_B_weights)
# The scaling is exactly as the previous
target.ranknum[adapter_name].copy_(ranknum)
def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):
for name, rank_idx in rank_pattern.items():
rank = sum(rank_idx)
prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
for layer in ["lora_E", "lora_A", "lora_B"]:
key = f"base_model.model.{prefix}.{layer}.{adapter_name}"
if layer != "lora_B":
state_dict[key] = (
state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]
)
else:
state_dict[key] = (
state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]
)
return state_dict
def update_and_allocate(self, global_step):
lora_config = self.peft_config[self.trainable_adapter_name]
# Update the importance score and allocate the budget
if global_step < lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)
if rank_pattern:
lora_config.rank_pattern = rank_pattern
# Finalize the budget allocation
elif global_step == lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)
# for some reason, this freezes the trainable parameters and nothing gets updates
# self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)
lora_config.rank_pattern = rank_pattern
self.rankallocator.reset_ipt()
# Currently using inefficient way to mask the unimportant weights using the rank pattern
# due to problem mentioned above
elif global_step > lora_config.total_step - lora_config.tfinal:
self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)
# Pass the function and do forward propagation
else:
return None
@staticmethod
def _prepare_adalora_config(peft_config, model_config):
if peft_config.target_modules is None: | if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING: | 1 | 2023-10-19 10:55:50+00:00 | 12k |
YuroFR/freqtrade-modded-crypto-trading-bot | tests/data/test_btanalysis.py | [
{
"identifier": "TimeRange",
"path": "freqtrade/configuration/timerange.py",
"snippet": "class TimeRange:\n \"\"\"\n object defining timerange inputs.\n [start/stop]type defines if [start/stop]ts shall be used.\n if *type is None, don't use corresponding startvalue.\n \"\"\"\n\n def __init__(self, starttype: Optional[str] = None, stoptype: Optional[str] = None,\n startts: int = 0, stopts: int = 0):\n\n self.starttype: Optional[str] = starttype\n self.stoptype: Optional[str] = stoptype\n self.startts: int = startts\n self.stopts: int = stopts\n\n @property\n def startdt(self) -> Optional[datetime]:\n if self.startts:\n return datetime.fromtimestamp(self.startts, tz=timezone.utc)\n return None\n\n @property\n def stopdt(self) -> Optional[datetime]:\n if self.stopts:\n return datetime.fromtimestamp(self.stopts, tz=timezone.utc)\n return None\n\n @property\n def timerange_str(self) -> str:\n \"\"\"\n Returns a string representation of the timerange as used by parse_timerange.\n Follows the format yyyymmdd-yyyymmdd - leaving out the parts that are not set.\n \"\"\"\n start = ''\n stop = ''\n if startdt := self.startdt:\n start = startdt.strftime('%Y%m%d')\n if stopdt := self.stopdt:\n stop = stopdt.strftime('%Y%m%d')\n return f\"{start}-{stop}\"\n\n @property\n def start_fmt(self) -> str:\n \"\"\"\n Returns a string representation of the start date\n \"\"\"\n val = 'unbounded'\n if (startdt := self.startdt) is not None:\n val = startdt.strftime(DATETIME_PRINT_FORMAT)\n return val\n\n @property\n def stop_fmt(self) -> str:\n \"\"\"\n Returns a string representation of the stop date\n \"\"\"\n val = 'unbounded'\n if (stopdt := self.stopdt) is not None:\n val = stopdt.strftime(DATETIME_PRINT_FORMAT)\n return val\n\n def __eq__(self, other):\n \"\"\"Override the default Equals behavior\"\"\"\n return (self.starttype == other.starttype and self.stoptype == other.stoptype\n and self.startts == other.startts and self.stopts == other.stopts)\n\n def subtract_start(self, seconds: int) -> None:\n \"\"\"\n Subtracts <seconds> from startts if startts is set.\n :param seconds: Seconds to subtract from starttime\n :return: None (Modifies the object in place)\n \"\"\"\n if self.startts:\n self.startts = self.startts - seconds\n\n def adjust_start_if_necessary(self, timeframe_secs: int, startup_candles: int,\n min_date: datetime) -> None:\n \"\"\"\n Adjust startts by <startup_candles> candles.\n Applies only if no startup-candles have been available.\n :param timeframe_secs: Timeframe in seconds e.g. `timeframe_to_seconds('5m')`\n :param startup_candles: Number of candles to move start-date forward\n :param min_date: Minimum data date loaded. Key kriterium to decide if start-time\n has to be moved\n :return: None (Modifies the object in place)\n \"\"\"\n if (not self.starttype or (startup_candles\n and min_date.timestamp() >= self.startts)):\n # If no startts was defined, or backtest-data starts at the defined backtest-date\n logger.warning(\"Moving start-date by %s candles to account for startup time.\",\n startup_candles)\n self.startts = int(min_date.timestamp() + timeframe_secs * startup_candles)\n self.starttype = 'date'\n\n @classmethod\n def parse_timerange(cls, text: Optional[str]) -> Self:\n \"\"\"\n Parse the value of the argument --timerange to determine what is the range desired\n :param text: value from --timerange\n :return: Start and End range period\n \"\"\"\n if not text:\n return cls(None, None, 0, 0)\n syntax = [(r'^-(\\d{8})$', (None, 'date')),\n (r'^(\\d{8})-$', ('date', None)),\n (r'^(\\d{8})-(\\d{8})$', ('date', 'date')),\n (r'^-(\\d{10})$', (None, 'date')),\n (r'^(\\d{10})-$', ('date', None)),\n (r'^(\\d{10})-(\\d{10})$', ('date', 'date')),\n (r'^-(\\d{13})$', (None, 'date')),\n (r'^(\\d{13})-$', ('date', None)),\n (r'^(\\d{13})-(\\d{13})$', ('date', 'date')),\n ]\n for rex, stype in syntax:\n # Apply the regular expression to text\n match = re.match(rex, text)\n if match: # Regex has matched\n rvals = match.groups()\n index = 0\n start: int = 0\n stop: int = 0\n if stype[0]:\n starts = rvals[index]\n if stype[0] == 'date' and len(starts) == 8:\n start = int(datetime.strptime(starts, '%Y%m%d').replace(\n tzinfo=timezone.utc).timestamp())\n elif len(starts) == 13:\n start = int(starts) // 1000\n else:\n start = int(starts)\n index += 1\n if stype[1]:\n stops = rvals[index]\n if stype[1] == 'date' and len(stops) == 8:\n stop = int(datetime.strptime(stops, '%Y%m%d').replace(\n tzinfo=timezone.utc).timestamp())\n elif len(stops) == 13:\n stop = int(stops) // 1000\n else:\n stop = int(stops)\n if start > stop > 0:\n raise OperationalException(\n f'Start date is after stop date for timerange \"{text}\"')\n return cls(stype[0], stype[1], start, stop)\n raise OperationalException(f'Incorrect syntax for timerange \"{text}\"')"
},
{
"identifier": "LAST_BT_RESULT_FN",
"path": "freqtrade/constants.py",
"snippet": "LAST_BT_RESULT_FN = '.last_result.json'"
},
{
"identifier": "BT_DATA_COLUMNS",
"path": "freqtrade/data/btanalysis.py",
"snippet": "BT_DATA_COLUMNS = ['pair', 'stake_amount', 'max_stake_amount', 'amount',\n 'open_date', 'close_date', 'open_rate', 'close_rate',\n 'fee_open', 'fee_close', 'trade_duration',\n 'profit_ratio', 'profit_abs', 'exit_reason',\n 'initial_stop_loss_abs', 'initial_stop_loss_ratio', 'stop_loss_abs',\n 'stop_loss_ratio', 'min_rate', 'max_rate', 'is_open', 'enter_tag',\n 'leverage', 'is_short', 'open_timestamp', 'close_timestamp', 'orders'\n ]"
},
{
"identifier": "analyze_trade_parallelism",
"path": "freqtrade/data/btanalysis.py",
"snippet": "def analyze_trade_parallelism(results: pd.DataFrame, timeframe: str) -> pd.DataFrame:\n \"\"\"\n Find overlapping trades by expanding each trade once per period it was open\n and then counting overlaps.\n :param results: Results Dataframe - can be loaded\n :param timeframe: Timeframe used for backtest\n :return: dataframe with open-counts per time-period in timeframe\n \"\"\"\n from freqtrade.exchange import timeframe_to_minutes\n timeframe_min = timeframe_to_minutes(timeframe)\n dates = [pd.Series(pd.date_range(row[1]['open_date'], row[1]['close_date'],\n freq=f\"{timeframe_min}min\"))\n for row in results[['open_date', 'close_date']].iterrows()]\n deltas = [len(x) for x in dates]\n dates = pd.Series(pd.concat(dates).values, name='date')\n df2 = pd.DataFrame(np.repeat(results.values, deltas, axis=0), columns=results.columns)\n\n df2 = pd.concat([dates, df2], axis=1)\n df2 = df2.set_index('date')\n df_final = df2.resample(f\"{timeframe_min}min\")[['pair']].count()\n df_final = df_final.rename({'pair': 'open_trades'}, axis=1)\n return df_final"
},
{
"identifier": "extract_trades_of_period",
"path": "freqtrade/data/btanalysis.py",
"snippet": "def extract_trades_of_period(dataframe: pd.DataFrame, trades: pd.DataFrame,\n date_index=False) -> pd.DataFrame:\n \"\"\"\n Compare trades and backtested pair DataFrames to get trades performed on backtested period\n :return: the DataFrame of a trades of period\n \"\"\"\n if date_index:\n trades_start = dataframe.index[0]\n trades_stop = dataframe.index[-1]\n else:\n trades_start = dataframe.iloc[0]['date']\n trades_stop = dataframe.iloc[-1]['date']\n trades = trades.loc[(trades['open_date'] >= trades_start) &\n (trades['close_date'] <= trades_stop)]\n return trades"
},
{
"identifier": "get_latest_backtest_filename",
"path": "freqtrade/data/btanalysis.py",
"snippet": "def get_latest_backtest_filename(directory: Union[Path, str]) -> str:\n \"\"\"\n Get latest backtest export based on '.last_result.json'.\n :param directory: Directory to search for last result\n :return: string containing the filename of the latest backtest result\n :raises: ValueError in the following cases:\n * Directory does not exist\n * `directory/.last_result.json` does not exist\n * `directory/.last_result.json` has the wrong content\n \"\"\"\n return get_latest_optimize_filename(directory, 'backtest')"
},
{
"identifier": "get_latest_hyperopt_file",
"path": "freqtrade/data/btanalysis.py",
"snippet": "def get_latest_hyperopt_file(\n directory: Union[Path, str], predef_filename: Optional[str] = None) -> Path:\n \"\"\"\n Get latest hyperopt export based on '.last_result.json'.\n :param directory: Directory to search for last result\n :return: string containing the filename of the latest hyperopt result\n :raises: ValueError in the following cases:\n * Directory does not exist\n * `directory/.last_result.json` does not exist\n * `directory/.last_result.json` has the wrong content\n \"\"\"\n if isinstance(directory, str):\n directory = Path(directory)\n if predef_filename:\n if Path(predef_filename).is_absolute():\n raise OperationalException(\n \"--hyperopt-filename expects only the filename, not an absolute path.\")\n return directory / predef_filename\n return directory / get_latest_hyperopt_filename(directory)"
},
{
"identifier": "load_backtest_data",
"path": "freqtrade/data/btanalysis.py",
"snippet": "def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = None) -> pd.DataFrame:\n \"\"\"\n Load backtest data file.\n :param filename: pathlib.Path object, or string pointing to a file or directory\n :param strategy: Strategy to load - mainly relevant for multi-strategy backtests\n Can also serve as protection to load the correct result.\n :return: a dataframe with the analysis results\n :raise: ValueError if loading goes wrong.\n \"\"\"\n data = load_backtest_stats(filename)\n if not isinstance(data, list):\n # new, nested format\n if 'strategy' not in data:\n raise ValueError(\"Unknown dataformat.\")\n\n if not strategy:\n if len(data['strategy']) == 1:\n strategy = list(data['strategy'].keys())[0]\n else:\n raise ValueError(\"Detected backtest result with more than one strategy. \"\n \"Please specify a strategy.\")\n\n if strategy not in data['strategy']:\n raise ValueError(f\"Strategy {strategy} not available in the backtest result.\")\n\n data = data['strategy'][strategy]['trades']\n df = pd.DataFrame(data)\n if not df.empty:\n df = _load_backtest_data_df_compatibility(df)\n\n else:\n # old format - only with lists.\n raise OperationalException(\n \"Backtest-results with only trades data are no longer supported.\")\n if not df.empty:\n df = df.sort_values(\"open_date\").reset_index(drop=True)\n return df"
},
{
"identifier": "load_backtest_metadata",
"path": "freqtrade/data/btanalysis.py",
"snippet": "def load_backtest_metadata(filename: Union[Path, str]) -> Dict[str, Any]:\n \"\"\"\n Read metadata dictionary from backtest results file without reading and deserializing entire\n file.\n :param filename: path to backtest results file.\n :return: metadata dict or None if metadata is not present.\n \"\"\"\n filename = get_backtest_metadata_filename(filename)\n try:\n with filename.open() as fp:\n return json_load(fp)\n except FileNotFoundError:\n return {}\n except Exception as e:\n raise OperationalException('Unexpected error while loading backtest metadata.') from e"
},
{
"identifier": "load_trades",
"path": "freqtrade/data/btanalysis.py",
"snippet": "def load_trades(source: str, db_url: str, exportfilename: Path,\n no_trades: bool = False, strategy: Optional[str] = None) -> pd.DataFrame:\n \"\"\"\n Based on configuration option 'trade_source':\n * loads data from DB (using `db_url`)\n * loads data from backtestfile (using `exportfilename`)\n :param source: \"DB\" or \"file\" - specify source to load from\n :param db_url: sqlalchemy formatted url to a database\n :param exportfilename: Json file generated by backtesting\n :param no_trades: Skip using trades, only return backtesting data columns\n :return: DataFrame containing trades\n \"\"\"\n if no_trades:\n df = pd.DataFrame(columns=BT_DATA_COLUMNS)\n return df\n\n if source == \"DB\":\n return load_trades_from_db(db_url)\n elif source == \"file\":\n return load_backtest_data(exportfilename, strategy)"
},
{
"identifier": "load_trades_from_db",
"path": "freqtrade/data/btanalysis.py",
"snippet": "def load_trades_from_db(db_url: str, strategy: Optional[str] = None) -> pd.DataFrame:\n \"\"\"\n Load trades from a DB (using dburl)\n :param db_url: Sqlite url (default format sqlite:///tradesv3.dry-run.sqlite)\n :param strategy: Strategy to load - mainly relevant for multi-strategy backtests\n Can also serve as protection to load the correct result.\n :return: Dataframe containing Trades\n \"\"\"\n init_db(db_url)\n\n filters = []\n if strategy:\n filters.append(Trade.strategy == strategy)\n trades = trade_list_to_dataframe(list(Trade.get_trades(filters).all()))\n\n return trades"
},
{
"identifier": "load_data",
"path": "freqtrade/data/history/history_utils.py",
"snippet": "def load_data(datadir: Path,\n timeframe: str,\n pairs: List[str], *,\n timerange: Optional[TimeRange] = None,\n fill_up_missing: bool = True,\n startup_candles: int = 0,\n fail_without_data: bool = False,\n data_format: str = 'feather',\n candle_type: CandleType = CandleType.SPOT,\n user_futures_funding_rate: Optional[int] = None,\n ) -> Dict[str, DataFrame]:\n \"\"\"\n Load ohlcv history data for a list of pairs.\n\n :param datadir: Path to the data storage location.\n :param timeframe: Timeframe (e.g. \"5m\")\n :param pairs: List of pairs to load\n :param timerange: Limit data to be loaded to this timerange\n :param fill_up_missing: Fill missing values with \"No action\"-candles\n :param startup_candles: Additional candles to load at the start of the period\n :param fail_without_data: Raise OperationalException if no data is found.\n :param data_format: Data format which should be used. Defaults to json\n :param candle_type: Any of the enum CandleType (must match trading mode!)\n :return: dict(<pair>:<Dataframe>)\n \"\"\"\n result: Dict[str, DataFrame] = {}\n if startup_candles > 0 and timerange:\n logger.info(f'Using indicator startup period: {startup_candles} ...')\n\n data_handler = get_datahandler(datadir, data_format)\n\n for pair in pairs:\n hist = load_pair_history(pair=pair, timeframe=timeframe,\n datadir=datadir, timerange=timerange,\n fill_up_missing=fill_up_missing,\n startup_candles=startup_candles,\n data_handler=data_handler,\n candle_type=candle_type,\n )\n if not hist.empty:\n result[pair] = hist\n else:\n if candle_type is CandleType.FUNDING_RATE and user_futures_funding_rate is not None:\n logger.warn(f\"{pair} using user specified [{user_futures_funding_rate}]\")\n elif candle_type not in (CandleType.SPOT, CandleType.FUTURES):\n result[pair] = DataFrame(columns=[\"date\", \"open\", \"close\", \"high\", \"low\", \"volume\"])\n\n if fail_without_data and not result:\n raise OperationalException(\"No data found. Terminating.\")\n return result"
},
{
"identifier": "load_pair_history",
"path": "freqtrade/data/history/history_utils.py",
"snippet": "def load_pair_history(pair: str,\n timeframe: str,\n datadir: Path, *,\n timerange: Optional[TimeRange] = None,\n fill_up_missing: bool = True,\n drop_incomplete: bool = False,\n startup_candles: int = 0,\n data_format: Optional[str] = None,\n data_handler: Optional[IDataHandler] = None,\n candle_type: CandleType = CandleType.SPOT\n ) -> DataFrame:\n \"\"\"\n Load cached ohlcv history for the given pair.\n\n :param pair: Pair to load data for\n :param timeframe: Timeframe (e.g. \"5m\")\n :param datadir: Path to the data storage location.\n :param data_format: Format of the data. Ignored if data_handler is set.\n :param timerange: Limit data to be loaded to this timerange\n :param fill_up_missing: Fill missing values with \"No action\"-candles\n :param drop_incomplete: Drop last candle assuming it may be incomplete.\n :param startup_candles: Additional candles to load at the start of the period\n :param data_handler: Initialized data-handler to use.\n Will be initialized from data_format if not set\n :param candle_type: Any of the enum CandleType (must match trading mode!)\n :return: DataFrame with ohlcv data, or empty DataFrame\n \"\"\"\n data_handler = get_datahandler(datadir, data_format, data_handler)\n\n return data_handler.ohlcv_load(pair=pair,\n timeframe=timeframe,\n timerange=timerange,\n fill_missing=fill_up_missing,\n drop_incomplete=drop_incomplete,\n startup_candles=startup_candles,\n candle_type=candle_type,\n )"
},
{
"identifier": "calculate_cagr",
"path": "freqtrade/data/metrics.py",
"snippet": "def calculate_cagr(days_passed: int, starting_balance: float, final_balance: float) -> float:\n \"\"\"\n Calculate CAGR\n :param days_passed: Days passed between start and ending balance\n :param starting_balance: Starting balance\n :param final_balance: Final balance to calculate CAGR against\n :return: CAGR\n \"\"\"\n return (final_balance / starting_balance) ** (1 / (days_passed / 365)) - 1"
},
{
"identifier": "calculate_calmar",
"path": "freqtrade/data/metrics.py",
"snippet": "def calculate_calmar(trades: pd.DataFrame, min_date: datetime, max_date: datetime,\n starting_balance: float) -> float:\n \"\"\"\n Calculate calmar\n :param trades: DataFrame containing trades (requires columns close_date and profit_abs)\n :return: calmar\n \"\"\"\n if (len(trades) == 0) or (min_date is None) or (max_date is None) or (min_date == max_date):\n return 0\n\n total_profit = trades['profit_abs'].sum() / starting_balance\n days_period = max(1, (max_date - min_date).days)\n\n # adding slippage of 0.1% per trade\n # total_profit = total_profit - 0.0005\n expected_returns_mean = total_profit / days_period * 100\n\n # calculate max drawdown\n try:\n _, _, _, _, _, max_drawdown = calculate_max_drawdown(\n trades, value_col=\"profit_abs\", starting_balance=starting_balance\n )\n except ValueError:\n max_drawdown = 0\n\n if max_drawdown != 0:\n calmar_ratio = expected_returns_mean / max_drawdown * math.sqrt(365)\n else:\n # Define high (negative) calmar ratio to be clear that this is NOT optimal.\n calmar_ratio = -100\n\n # print(expected_returns_mean, max_drawdown, calmar_ratio)\n return calmar_ratio"
},
{
"identifier": "calculate_csum",
"path": "freqtrade/data/metrics.py",
"snippet": "def calculate_csum(trades: pd.DataFrame, starting_balance: float = 0) -> Tuple[float, float]:\n \"\"\"\n Calculate min/max cumsum of trades, to show if the wallet/stake amount ratio is sane\n :param trades: DataFrame containing trades (requires columns close_date and profit_percent)\n :param starting_balance: Add starting balance to results, to show the wallets high / low points\n :return: Tuple (float, float) with cumsum of profit_abs\n :raise: ValueError if trade-dataframe was found empty.\n \"\"\"\n if len(trades) == 0:\n raise ValueError(\"Trade dataframe empty.\")\n\n csum_df = pd.DataFrame()\n csum_df['sum'] = trades['profit_abs'].cumsum()\n csum_min = csum_df['sum'].min() + starting_balance\n csum_max = csum_df['sum'].max() + starting_balance\n\n return csum_min, csum_max"
},
{
"identifier": "calculate_expectancy",
"path": "freqtrade/data/metrics.py",
"snippet": "def calculate_expectancy(trades: pd.DataFrame) -> Tuple[float, float]:\n \"\"\"\n Calculate expectancy\n :param trades: DataFrame containing trades (requires columns close_date and profit_abs)\n :return: expectancy, expectancy_ratio\n \"\"\"\n\n expectancy = 0\n expectancy_ratio = 100\n\n if len(trades) > 0:\n winning_trades = trades.loc[trades['profit_abs'] > 0]\n losing_trades = trades.loc[trades['profit_abs'] < 0]\n profit_sum = winning_trades['profit_abs'].sum()\n loss_sum = abs(losing_trades['profit_abs'].sum())\n nb_win_trades = len(winning_trades)\n nb_loss_trades = len(losing_trades)\n\n average_win = (profit_sum / nb_win_trades) if nb_win_trades > 0 else 0\n average_loss = (loss_sum / nb_loss_trades) if nb_loss_trades > 0 else 0\n winrate = (nb_win_trades / len(trades))\n loserate = (nb_loss_trades / len(trades))\n\n expectancy = (winrate * average_win) - (loserate * average_loss)\n if (average_loss > 0):\n risk_reward_ratio = average_win / average_loss\n expectancy_ratio = ((1 + risk_reward_ratio) * winrate) - 1\n\n return expectancy, expectancy_ratio"
},
{
"identifier": "calculate_market_change",
"path": "freqtrade/data/metrics.py",
"snippet": "def calculate_market_change(data: Dict[str, pd.DataFrame], column: str = \"close\") -> float:\n \"\"\"\n Calculate market change based on \"column\".\n Calculation is done by taking the first non-null and the last non-null element of each column\n and calculating the pctchange as \"(last - first) / first\".\n Then the results per pair are combined as mean.\n\n :param data: Dict of Dataframes, dict key should be pair.\n :param column: Column in the original dataframes to use\n :return:\n \"\"\"\n tmp_means = []\n for pair, df in data.items():\n start = df[column].dropna().iloc[0]\n end = df[column].dropna().iloc[-1]\n tmp_means.append((end - start) / start)\n\n return float(np.mean(tmp_means))"
},
{
"identifier": "calculate_max_drawdown",
"path": "freqtrade/data/metrics.py",
"snippet": "def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date',\n value_col: str = 'profit_abs', starting_balance: float = 0,\n relative: bool = False\n ) -> Tuple[float, pd.Timestamp, pd.Timestamp, float, float, float]:\n \"\"\"\n Calculate max drawdown and the corresponding close dates\n :param trades: DataFrame containing trades (requires columns close_date and profit_ratio)\n :param date_col: Column in DataFrame to use for dates (defaults to 'close_date')\n :param value_col: Column in DataFrame to use for values (defaults to 'profit_abs')\n :param starting_balance: Portfolio starting balance - properly calculate relative drawdown.\n :return: Tuple (float, highdate, lowdate, highvalue, lowvalue, relative_drawdown)\n with absolute max drawdown, high and low time and high and low value,\n and the relative account drawdown\n :raise: ValueError if trade-dataframe was found empty.\n \"\"\"\n if len(trades) == 0:\n raise ValueError(\"Trade dataframe empty.\")\n profit_results = trades.sort_values(date_col).reset_index(drop=True)\n max_drawdown_df = _calc_drawdown_series(\n profit_results,\n date_col=date_col,\n value_col=value_col,\n starting_balance=starting_balance\n )\n\n idxmin = max_drawdown_df['drawdown_relative'].idxmax() if relative \\\n else max_drawdown_df['drawdown'].idxmin()\n if idxmin == 0:\n raise ValueError(\"No losing trade, therefore no drawdown.\")\n high_date = profit_results.loc[max_drawdown_df.iloc[:idxmin]['high_value'].idxmax(), date_col]\n low_date = profit_results.loc[idxmin, date_col]\n high_val = max_drawdown_df.loc[max_drawdown_df.iloc[:idxmin]\n ['high_value'].idxmax(), 'cumulative']\n low_val = max_drawdown_df.loc[idxmin, 'cumulative']\n max_drawdown_rel = max_drawdown_df.loc[idxmin, 'drawdown_relative']\n\n return (\n abs(max_drawdown_df.loc[idxmin, 'drawdown']),\n high_date,\n low_date,\n high_val,\n low_val,\n max_drawdown_rel\n )"
},
{
"identifier": "calculate_sharpe",
"path": "freqtrade/data/metrics.py",
"snippet": "def calculate_sharpe(trades: pd.DataFrame, min_date: datetime, max_date: datetime,\n starting_balance: float) -> float:\n \"\"\"\n Calculate sharpe\n :param trades: DataFrame containing trades (requires column profit_abs)\n :return: sharpe\n \"\"\"\n if (len(trades) == 0) or (min_date is None) or (max_date is None) or (min_date == max_date):\n return 0\n\n total_profit = trades['profit_abs'] / starting_balance\n days_period = max(1, (max_date - min_date).days)\n\n expected_returns_mean = total_profit.sum() / days_period\n up_stdev = np.std(total_profit)\n\n if up_stdev != 0:\n sharp_ratio = expected_returns_mean / up_stdev * np.sqrt(365)\n else:\n # Define high (negative) sharpe ratio to be clear that this is NOT optimal.\n sharp_ratio = -100\n\n # print(expected_returns_mean, up_stdev, sharp_ratio)\n return sharp_ratio"
},
{
"identifier": "calculate_sortino",
"path": "freqtrade/data/metrics.py",
"snippet": "def calculate_sortino(trades: pd.DataFrame, min_date: datetime, max_date: datetime,\n starting_balance: float) -> float:\n \"\"\"\n Calculate sortino\n :param trades: DataFrame containing trades (requires columns profit_abs)\n :return: sortino\n \"\"\"\n if (len(trades) == 0) or (min_date is None) or (max_date is None) or (min_date == max_date):\n return 0\n\n total_profit = trades['profit_abs'] / starting_balance\n days_period = max(1, (max_date - min_date).days)\n\n expected_returns_mean = total_profit.sum() / days_period\n\n down_stdev = np.std(trades.loc[trades['profit_abs'] < 0, 'profit_abs'] / starting_balance)\n\n if down_stdev != 0 and not np.isnan(down_stdev):\n sortino_ratio = expected_returns_mean / down_stdev * np.sqrt(365)\n else:\n # Define high (negative) sortino ratio to be clear that this is NOT optimal.\n sortino_ratio = -100\n\n # print(expected_returns_mean, down_stdev, sortino_ratio)\n return sortino_ratio"
},
{
"identifier": "calculate_underwater",
"path": "freqtrade/data/metrics.py",
"snippet": "def calculate_underwater(trades: pd.DataFrame, *, date_col: str = 'close_date',\n value_col: str = 'profit_ratio', starting_balance: float = 0.0\n ):\n \"\"\"\n Calculate max drawdown and the corresponding close dates\n :param trades: DataFrame containing trades (requires columns close_date and profit_ratio)\n :param date_col: Column in DataFrame to use for dates (defaults to 'close_date')\n :param value_col: Column in DataFrame to use for values (defaults to 'profit_ratio')\n :return: Tuple (float, highdate, lowdate, highvalue, lowvalue) with absolute max drawdown,\n high and low time and high and low value.\n :raise: ValueError if trade-dataframe was found empty.\n \"\"\"\n if len(trades) == 0:\n raise ValueError(\"Trade dataframe empty.\")\n profit_results = trades.sort_values(date_col).reset_index(drop=True)\n max_drawdown_df = _calc_drawdown_series(\n profit_results,\n date_col=date_col,\n value_col=value_col,\n starting_balance=starting_balance)\n\n return max_drawdown_df"
},
{
"identifier": "combine_dataframes_with_mean",
"path": "freqtrade/data/metrics.py",
"snippet": "def combine_dataframes_with_mean(data: Dict[str, pd.DataFrame],\n column: str = \"close\") -> pd.DataFrame:\n \"\"\"\n Combine multiple dataframes \"column\"\n :param data: Dict of Dataframes, dict key should be pair.\n :param column: Column in the original dataframes to use\n :return: DataFrame with the column renamed to the dict key, and a column\n named mean, containing the mean of all pairs.\n :raise: ValueError if no data is provided.\n \"\"\"\n df_comb = pd.concat([data[pair].set_index('date').rename(\n {column: pair}, axis=1)[pair] for pair in data], axis=1)\n\n df_comb['mean'] = df_comb.mean(axis=1)\n\n return df_comb"
},
{
"identifier": "create_cum_profit",
"path": "freqtrade/data/metrics.py",
"snippet": "def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str,\n timeframe: str) -> pd.DataFrame:\n \"\"\"\n Adds a column `col_name` with the cumulative profit for the given trades array.\n :param df: DataFrame with date index\n :param trades: DataFrame containing trades (requires columns close_date and profit_abs)\n :param col_name: Column name that will be assigned the results\n :param timeframe: Timeframe used during the operations\n :return: Returns df with one additional column, col_name, containing the cumulative profit.\n :raise: ValueError if trade-dataframe was found empty.\n \"\"\"\n if len(trades) == 0:\n raise ValueError(\"Trade dataframe empty.\")\n from freqtrade.exchange import timeframe_to_minutes\n timeframe_minutes = timeframe_to_minutes(timeframe)\n # Resample to timeframe to make sure trades match candles\n _trades_sum = trades.resample(f'{timeframe_minutes}min', on='close_date'\n )[['profit_abs']].sum()\n df.loc[:, col_name] = _trades_sum['profit_abs'].cumsum()\n # Set first value to 0\n df.loc[df.iloc[0].name, col_name] = 0\n # FFill to get continuous\n df[col_name] = df[col_name].ffill()\n return df"
},
{
"identifier": "OperationalException",
"path": "freqtrade/exceptions.py",
"snippet": "class OperationalException(FreqtradeException):\n \"\"\"\n Requires manual intervention and will stop the bot.\n Most of the time, this is caused by an invalid Configuration.\n \"\"\""
},
{
"identifier": "dt_utc",
"path": "freqtrade/util/datetime_helpers.py",
"snippet": "def dt_utc(year: int, month: int, day: int, hour: int = 0, minute: int = 0, second: int = 0,\n microsecond: int = 0) -> datetime:\n \"\"\"Return a datetime in UTC.\"\"\"\n return datetime(year, month, day, hour, minute, second, microsecond, tzinfo=timezone.utc)"
},
{
"identifier": "CURRENT_TEST_STRATEGY",
"path": "tests/conftest.py",
"snippet": "CURRENT_TEST_STRATEGY = 'StrategyTestV3'"
},
{
"identifier": "create_mock_trades",
"path": "tests/conftest.py",
"snippet": "def create_mock_trades(fee, is_short: Optional[bool] = False, use_db: bool = True):\n \"\"\"\n Create some fake trades ...\n :param is_short: Optional bool, None creates a mix of long and short trades.\n \"\"\"\n def add_trade(trade):\n if use_db:\n Trade.session.add(trade)\n else:\n LocalTrade.add_bt_trade(trade)\n is_short1 = is_short if is_short is not None else True\n is_short2 = is_short if is_short is not None else False\n # Simulate dry_run entries\n trade = mock_trade_1(fee, is_short1)\n add_trade(trade)\n\n trade = mock_trade_2(fee, is_short1)\n add_trade(trade)\n\n trade = mock_trade_3(fee, is_short2)\n add_trade(trade)\n\n trade = mock_trade_4(fee, is_short2)\n add_trade(trade)\n\n trade = mock_trade_5(fee, is_short2)\n add_trade(trade)\n\n trade = mock_trade_6(fee, is_short1)\n add_trade(trade)\n\n if use_db:\n Trade.commit()"
},
{
"identifier": "MOCK_TRADE_COUNT",
"path": "tests/conftest_trades.py",
"snippet": "MOCK_TRADE_COUNT = 6"
}
] | from datetime import datetime, timedelta, timezone
from pathlib import Path
from unittest.mock import MagicMock
from pandas import DataFrame, DateOffset, Timestamp, to_datetime
from freqtrade.configuration import TimeRange
from freqtrade.constants import LAST_BT_RESULT_FN
from freqtrade.data.btanalysis import (BT_DATA_COLUMNS, analyze_trade_parallelism,
extract_trades_of_period, get_latest_backtest_filename,
get_latest_hyperopt_file, load_backtest_data,
load_backtest_metadata, load_trades, load_trades_from_db)
from freqtrade.data.history import load_data, load_pair_history
from freqtrade.data.metrics import (calculate_cagr, calculate_calmar, calculate_csum,
calculate_expectancy, calculate_market_change,
calculate_max_drawdown, calculate_sharpe, calculate_sortino,
calculate_underwater, combine_dataframes_with_mean,
create_cum_profit)
from freqtrade.exceptions import OperationalException
from freqtrade.util import dt_utc
from tests.conftest import CURRENT_TEST_STRATEGY, create_mock_trades
from tests.conftest_trades import MOCK_TRADE_COUNT
import pytest | 10,229 | load_backtest_data(filename)
def test_load_backtest_data_new_format(testdatadir):
filename = testdatadir / "backtest_results/backtest-result.json"
bt_data = load_backtest_data(filename)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
bt_data2 = load_backtest_data(str(filename))
assert bt_data.equals(bt_data2)
# Test loading from folder (must yield same result)
bt_data3 = load_backtest_data(testdatadir / "backtest_results")
assert bt_data.equals(bt_data3)
with pytest.raises(ValueError, match=r"File .* does not exist\."):
load_backtest_data("filename" + "nofile")
with pytest.raises(ValueError, match=r"Unknown dataformat."):
load_backtest_data(testdatadir / "backtest_results" / LAST_BT_RESULT_FN)
def test_load_backtest_data_multi(testdatadir):
filename = testdatadir / "backtest_results/backtest-result_multistrat.json"
for strategy in ('StrategyTestV2', 'TestStrategy'):
bt_data = load_backtest_data(filename, strategy=strategy)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(
BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
bt_data2 = load_backtest_data(str(filename), strategy=strategy)
assert bt_data.equals(bt_data2)
with pytest.raises(ValueError, match=r"Strategy XYZ not available in the backtest result\."):
load_backtest_data(filename, strategy='XYZ')
with pytest.raises(ValueError, match=r"Detected backtest result with more than one strategy.*"):
load_backtest_data(filename)
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize('is_short', [False, True])
def test_load_trades_from_db(default_conf, fee, is_short, mocker):
create_mock_trades(fee, is_short)
# remove init so it does not init again
init_mock = mocker.patch('freqtrade.data.btanalysis.init_db', MagicMock())
trades = load_trades_from_db(db_url=default_conf['db_url'])
assert init_mock.call_count == 1
assert len(trades) == MOCK_TRADE_COUNT
assert isinstance(trades, DataFrame)
assert "pair" in trades.columns
assert "open_date" in trades.columns
assert "profit_ratio" in trades.columns
for col in BT_DATA_COLUMNS:
if col not in ['index', 'open_at_end']:
assert col in trades.columns
trades = load_trades_from_db(db_url=default_conf['db_url'], strategy=CURRENT_TEST_STRATEGY)
assert len(trades) == 4
trades = load_trades_from_db(db_url=default_conf['db_url'], strategy='NoneStrategy')
assert len(trades) == 0
def test_extract_trades_of_period(testdatadir):
pair = "UNITTEST/BTC"
# 2018-11-14 06:07:00
timerange = TimeRange('date', None, 1510639620, 0)
data = load_pair_history(pair=pair, timeframe='1m',
datadir=testdatadir, timerange=timerange)
trades = DataFrame(
{'pair': [pair, pair, pair, pair],
'profit_ratio': [0.0, 0.1, -0.2, -0.5],
'profit_abs': [0.0, 1, -2, -5],
'open_date': to_datetime([datetime(2017, 11, 13, 15, 40, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 9, 41, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 14, 20, 0, tzinfo=timezone.utc),
datetime(2017, 11, 15, 3, 40, 0, tzinfo=timezone.utc),
], utc=True
),
'close_date': to_datetime([datetime(2017, 11, 13, 16, 40, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 10, 41, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 15, 25, 0, tzinfo=timezone.utc),
datetime(2017, 11, 15, 3, 55, 0, tzinfo=timezone.utc),
], utc=True)
})
trades1 = extract_trades_of_period(data, trades)
# First and last trade are dropped as they are out of range
assert len(trades1) == 2
assert trades1.iloc[0].open_date == datetime(2017, 11, 14, 9, 41, 0, tzinfo=timezone.utc)
assert trades1.iloc[0].close_date == datetime(2017, 11, 14, 10, 41, 0, tzinfo=timezone.utc)
assert trades1.iloc[-1].open_date == datetime(2017, 11, 14, 14, 20, 0, tzinfo=timezone.utc)
assert trades1.iloc[-1].close_date == datetime(2017, 11, 14, 15, 25, 0, tzinfo=timezone.utc)
def test_analyze_trade_parallelism(testdatadir):
filename = testdatadir / "backtest_results/backtest-result.json"
bt_data = load_backtest_data(filename)
res = analyze_trade_parallelism(bt_data, "5m")
assert isinstance(res, DataFrame)
assert 'open_trades' in res.columns
assert res['open_trades'].max() == 3
assert res['open_trades'].min() == 0
def test_load_trades(default_conf, mocker):
db_mock = mocker.patch("freqtrade.data.btanalysis.load_trades_from_db", MagicMock())
bt_mock = mocker.patch("freqtrade.data.btanalysis.load_backtest_data", MagicMock())
|
def test_get_latest_backtest_filename(testdatadir, mocker):
with pytest.raises(ValueError, match=r"Directory .* does not exist\."):
get_latest_backtest_filename(testdatadir / 'does_not_exist')
with pytest.raises(ValueError,
match=r"Directory .* does not seem to contain .*"):
get_latest_backtest_filename(testdatadir)
testdir_bt = testdatadir / "backtest_results"
res = get_latest_backtest_filename(testdir_bt)
assert res == 'backtest-result.json'
res = get_latest_backtest_filename(str(testdir_bt))
assert res == 'backtest-result.json'
mocker.patch("freqtrade.data.btanalysis.json_load", return_value={})
with pytest.raises(ValueError, match=r"Invalid '.last_result.json' format."):
get_latest_backtest_filename(testdir_bt)
def test_get_latest_hyperopt_file(testdatadir):
res = get_latest_hyperopt_file(testdatadir / 'does_not_exist', 'testfile.pickle')
assert res == testdatadir / 'does_not_exist/testfile.pickle'
res = get_latest_hyperopt_file(testdatadir.parent)
assert res == testdatadir.parent / "hyperopt_results.pickle"
res = get_latest_hyperopt_file(str(testdatadir.parent))
assert res == testdatadir.parent / "hyperopt_results.pickle"
# Test with absolute path
with pytest.raises(
OperationalException,
match="--hyperopt-filename expects only the filename, not an absolute path."):
get_latest_hyperopt_file(str(testdatadir.parent), str(testdatadir.parent))
def test_load_backtest_metadata(mocker, testdatadir):
res = load_backtest_metadata(testdatadir / 'nonexistant.file.json')
assert res == {}
mocker.patch('freqtrade.data.btanalysis.get_backtest_metadata_filename')
mocker.patch('freqtrade.data.btanalysis.json_load', side_effect=Exception())
with pytest.raises(OperationalException,
match=r"Unexpected error.*loading backtest metadata\."):
load_backtest_metadata(testdatadir / 'nonexistant.file.json')
def test_load_backtest_data_old_format(testdatadir, mocker):
filename = testdatadir / "backtest-result_test222.json"
mocker.patch('freqtrade.data.btanalysis.load_backtest_stats', return_value=[])
with pytest.raises(OperationalException,
match=r"Backtest-results with only trades data are no longer supported."):
load_backtest_data(filename)
def test_load_backtest_data_new_format(testdatadir):
filename = testdatadir / "backtest_results/backtest-result.json"
bt_data = load_backtest_data(filename)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
bt_data2 = load_backtest_data(str(filename))
assert bt_data.equals(bt_data2)
# Test loading from folder (must yield same result)
bt_data3 = load_backtest_data(testdatadir / "backtest_results")
assert bt_data.equals(bt_data3)
with pytest.raises(ValueError, match=r"File .* does not exist\."):
load_backtest_data("filename" + "nofile")
with pytest.raises(ValueError, match=r"Unknown dataformat."):
load_backtest_data(testdatadir / "backtest_results" / LAST_BT_RESULT_FN)
def test_load_backtest_data_multi(testdatadir):
filename = testdatadir / "backtest_results/backtest-result_multistrat.json"
for strategy in ('StrategyTestV2', 'TestStrategy'):
bt_data = load_backtest_data(filename, strategy=strategy)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(
BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
bt_data2 = load_backtest_data(str(filename), strategy=strategy)
assert bt_data.equals(bt_data2)
with pytest.raises(ValueError, match=r"Strategy XYZ not available in the backtest result\."):
load_backtest_data(filename, strategy='XYZ')
with pytest.raises(ValueError, match=r"Detected backtest result with more than one strategy.*"):
load_backtest_data(filename)
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize('is_short', [False, True])
def test_load_trades_from_db(default_conf, fee, is_short, mocker):
create_mock_trades(fee, is_short)
# remove init so it does not init again
init_mock = mocker.patch('freqtrade.data.btanalysis.init_db', MagicMock())
trades = load_trades_from_db(db_url=default_conf['db_url'])
assert init_mock.call_count == 1
assert len(trades) == MOCK_TRADE_COUNT
assert isinstance(trades, DataFrame)
assert "pair" in trades.columns
assert "open_date" in trades.columns
assert "profit_ratio" in trades.columns
for col in BT_DATA_COLUMNS:
if col not in ['index', 'open_at_end']:
assert col in trades.columns
trades = load_trades_from_db(db_url=default_conf['db_url'], strategy=CURRENT_TEST_STRATEGY)
assert len(trades) == 4
trades = load_trades_from_db(db_url=default_conf['db_url'], strategy='NoneStrategy')
assert len(trades) == 0
def test_extract_trades_of_period(testdatadir):
pair = "UNITTEST/BTC"
# 2018-11-14 06:07:00
timerange = TimeRange('date', None, 1510639620, 0)
data = load_pair_history(pair=pair, timeframe='1m',
datadir=testdatadir, timerange=timerange)
trades = DataFrame(
{'pair': [pair, pair, pair, pair],
'profit_ratio': [0.0, 0.1, -0.2, -0.5],
'profit_abs': [0.0, 1, -2, -5],
'open_date': to_datetime([datetime(2017, 11, 13, 15, 40, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 9, 41, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 14, 20, 0, tzinfo=timezone.utc),
datetime(2017, 11, 15, 3, 40, 0, tzinfo=timezone.utc),
], utc=True
),
'close_date': to_datetime([datetime(2017, 11, 13, 16, 40, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 10, 41, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 15, 25, 0, tzinfo=timezone.utc),
datetime(2017, 11, 15, 3, 55, 0, tzinfo=timezone.utc),
], utc=True)
})
trades1 = extract_trades_of_period(data, trades)
# First and last trade are dropped as they are out of range
assert len(trades1) == 2
assert trades1.iloc[0].open_date == datetime(2017, 11, 14, 9, 41, 0, tzinfo=timezone.utc)
assert trades1.iloc[0].close_date == datetime(2017, 11, 14, 10, 41, 0, tzinfo=timezone.utc)
assert trades1.iloc[-1].open_date == datetime(2017, 11, 14, 14, 20, 0, tzinfo=timezone.utc)
assert trades1.iloc[-1].close_date == datetime(2017, 11, 14, 15, 25, 0, tzinfo=timezone.utc)
def test_analyze_trade_parallelism(testdatadir):
filename = testdatadir / "backtest_results/backtest-result.json"
bt_data = load_backtest_data(filename)
res = analyze_trade_parallelism(bt_data, "5m")
assert isinstance(res, DataFrame)
assert 'open_trades' in res.columns
assert res['open_trades'].max() == 3
assert res['open_trades'].min() == 0
def test_load_trades(default_conf, mocker):
db_mock = mocker.patch("freqtrade.data.btanalysis.load_trades_from_db", MagicMock())
bt_mock = mocker.patch("freqtrade.data.btanalysis.load_backtest_data", MagicMock())
| load_trades("DB", | 9 | 2023-10-21 10:02:05+00:00 | 12k |
yanzhh/HGERE | transformers/src/transformers/modeling_utils.py | [
{
"identifier": "get_activation",
"path": "transformers/src/transformers/activations.py",
"snippet": "def get_activation(activation_string):\n if activation_string in ACT2FN:\n return ACT2FN[activation_string]\n else:\n raise KeyError(\n \"function {} not found in ACT2FN mapping {} or torch.nn.functional\".format(\n activation_string, list(ACT2FN.keys())\n )\n )"
},
{
"identifier": "PretrainedConfig",
"path": "transformers/src/transformers/configuration_utils.py",
"snippet": "class PretrainedConfig(object):\n r\"\"\" Base class for all configuration classes.\n Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving configurations.\n\n Note:\n A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does **not** load the model weights.\n It only affects the model's configuration.\n\n Class attributes (overridden by derived classes):\n - ``pretrained_config_archive_map``: a python ``dict`` with `shortcut names` (string) as keys and `url` (string) of associated pretrained model configurations as values.\n - ``model_type``: a string that identifies the model type, that we serialize into the JSON file, and that we use to recreate the correct object in :class:`~transformers.AutoConfig`.\n\n Args:\n finetuning_task (:obj:`string` or :obj:`None`, `optional`, defaults to :obj:`None`):\n Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint.\n num_labels (:obj:`int`, `optional`, defaults to `2`):\n Number of classes to use when the model is a classification model (sequences/tokens)\n output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Should the model returns attentions weights.\n output_hidden_states (:obj:`string`, `optional`, defaults to :obj:`False`):\n Should the model returns all hidden-states.\n torchscript (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Is the model used with Torchscript (for PyTorch models).\n \"\"\"\n pretrained_config_archive_map = {} # type: Dict[str, str]\n model_type = \"\" # type: str\n\n def __init__(self, **kwargs):\n # Attributes with defaults\n self.output_attentions = kwargs.pop(\"output_attentions\", False)\n self.output_hidden_states = kwargs.pop(\"output_hidden_states\", False)\n self.output_past = kwargs.pop(\"output_past\", True) # Not used by all models\n self.torchscript = kwargs.pop(\"torchscript\", False) # Only used by PyTorch models\n self.use_bfloat16 = kwargs.pop(\"use_bfloat16\", False)\n self.pruned_heads = kwargs.pop(\"pruned_heads\", {})\n\n # Is decoder is used in encoder-decoder models to differentiate encoder from decoder\n self.is_decoder = kwargs.pop(\"is_decoder\", False)\n\n # Parameters for sequence generation\n self.max_length = kwargs.pop(\"max_length\", 20)\n self.do_sample = kwargs.pop(\"do_sample\", False)\n self.num_beams = kwargs.pop(\"num_beams\", 1)\n self.temperature = kwargs.pop(\"temperature\", 1.0)\n self.top_k = kwargs.pop(\"top_k\", 50)\n self.top_p = kwargs.pop(\"top_p\", 1.0)\n self.repetition_penalty = kwargs.pop(\"repetition_penalty\", 1.0)\n self.bos_token_id = kwargs.pop(\"bos_token_id\", None)\n self.pad_token_id = kwargs.pop(\"pad_token_id\", None)\n self.eos_token_ids = kwargs.pop(\"eos_token_ids\", None)\n self.length_penalty = kwargs.pop(\"length_penalty\", 1.0)\n self.num_return_sequences = kwargs.pop(\"num_return_sequences\", 1)\n\n # Fine-tuning task arguments\n self.architectures = kwargs.pop(\"architectures\", None)\n self.finetuning_task = kwargs.pop(\"finetuning_task\", None)\n self.num_labels = kwargs.pop(\"num_labels\", 2)\n self.id2label = kwargs.pop(\"id2label\", {i: \"LABEL_{}\".format(i) for i in range(self.num_labels)})\n self.id2label = dict((int(key), value) for key, value in self.id2label.items())\n self.label2id = kwargs.pop(\"label2id\", dict(zip(self.id2label.values(), self.id2label.keys())))\n self.label2id = dict((key, int(value)) for key, value in self.label2id.items())\n\n # Additional attributes without default values\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(\"Can't set {} with value {} for {}\".format(key, value, self))\n raise err\n\n def save_pretrained(self, save_directory):\n \"\"\"\n Save a configuration object to the directory `save_directory`, so that it\n can be re-loaded using the :func:`~transformers.PretrainedConfig.from_pretrained` class method.\n\n Args:\n save_directory (:obj:`string`):\n Directory where the configuration JSON file will be saved.\n \"\"\"\n assert os.path.isdir(\n save_directory\n ), \"Saving path should be a directory where the model and configuration can be saved\"\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_config_file = os.path.join(save_directory, CONFIG_NAME)\n\n self.to_json_file(output_config_file)\n logger.info(\"Configuration saved in {}\".format(output_config_file))\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, **kwargs) -> \"PretrainedConfig\":\n r\"\"\"\n\n Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pre-trained model configuration.\n\n Args:\n pretrained_model_name_or_path (:obj:`string`):\n either:\n - a string with the `shortcut name` of a pre-trained model configuration to load from cache or\n download, e.g.: ``bert-base-uncased``.\n - a string with the `identifier name` of a pre-trained model configuration that was user-uploaded to\n our S3, e.g.: ``dbmdz/bert-base-german-cased``.\n - a path to a `directory` containing a configuration file saved using the\n :func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``.\n - a path or url to a saved configuration JSON `file`, e.g.:\n ``./my_model_directory/configuration.json``.\n cache_dir (:obj:`string`, `optional`):\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n kwargs (:obj:`Dict[str, any]`, `optional`):\n The values in kwargs of any keys which are configuration attributes will be used to override the loaded\n values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is\n controlled by the `return_unused_kwargs` keyword parameter.\n force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Force to (re-)download the model weights and configuration files and override the cached versions if they exist.\n resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.\n proxies (:obj:`Dict`, `optional`):\n A dictionary of proxy servers to use by protocol or endpoint, e.g.:\n :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.`\n The proxies are used on each request.\n return_unused_kwargs: (`optional`) bool:\n If False, then this function returns just the final configuration object.\n If True, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs` is a\n dictionary consisting of the key/value pairs whose keys are not configuration attributes: ie the part\n of kwargs which has not been used to update `config` and is otherwise ignored.\n\n Returns:\n :class:`PretrainedConfig`: An instance of a configuration object\n\n Examples::\n\n # We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a\n # derived class: BertConfig\n config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.\n config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`\n config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')\n config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)\n assert config.output_attention == True\n config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,\n foo=False, return_unused_kwargs=True)\n assert config.output_attention == True\n assert unused_kwargs == {'foo': False}\n\n \"\"\"\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n return cls.from_dict(config_dict, **kwargs)\n\n @classmethod\n def get_config_dict(\n cls, pretrained_model_name_or_path: str, pretrained_config_archive_map: Optional[Dict] = None, **kwargs\n ) -> Tuple[Dict, Dict]:\n \"\"\"\n From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used\n for instantiating a Config using `from_dict`.\n\n Parameters:\n pretrained_model_name_or_path (:obj:`string`):\n The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.\n pretrained_config_archive_map: (:obj:`Dict[str, str]`, `optional`) Dict:\n A map of `shortcut names` to `url`. By default, will use the current class attribute.\n\n Returns:\n :obj:`Tuple[Dict, Dict]`: The dictionary that will be used to instantiate the configuration object.\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", None)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n\n if pretrained_config_archive_map is None:\n pretrained_config_archive_map = cls.pretrained_config_archive_map\n\n if pretrained_model_name_or_path in pretrained_config_archive_map:\n config_file = pretrained_config_archive_map[pretrained_model_name_or_path]\n elif os.path.isdir(pretrained_model_name_or_path):\n config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)\n elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n else:\n config_file = hf_bucket_url(pretrained_model_name_or_path, postfix=CONFIG_NAME)\n\n try:\n # Load from URL or cache if already cached\n resolved_config_file = cached_path(\n config_file,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n )\n # Load config dict\n if resolved_config_file is None:\n raise EnvironmentError\n config_dict = cls._dict_from_json_file(resolved_config_file)\n\n except EnvironmentError:\n if pretrained_model_name_or_path in pretrained_config_archive_map:\n msg = \"Couldn't reach server at '{}' to download pretrained model configuration file.\".format(\n config_file\n )\n else:\n msg = (\n \"Model name '{}' was not found in model name list. \"\n \"We assumed '{}' was a path, a model identifier, or url to a configuration file named {} or \"\n \"a directory containing such a file but couldn't find any such file at this path or url.\".format(\n pretrained_model_name_or_path, config_file, CONFIG_NAME,\n )\n )\n raise EnvironmentError(msg)\n\n except json.JSONDecodeError:\n msg = (\n \"Couldn't reach server at '{}' to download configuration file or \"\n \"configuration file is not a valid JSON file. \"\n \"Please check network or file content here: {}.\".format(config_file, resolved_config_file)\n )\n raise EnvironmentError(msg)\n\n if resolved_config_file == config_file:\n logger.info(\"loading configuration file {}\".format(config_file))\n else:\n logger.info(\"loading configuration file {} from cache at {}\".format(config_file, resolved_config_file))\n\n return config_dict, kwargs\n\n @classmethod\n def from_dict(cls, config_dict: Dict, **kwargs) -> \"PretrainedConfig\":\n \"\"\"\n Constructs a `Config` from a Python dictionary of parameters.\n\n Args:\n config_dict (:obj:`Dict[str, any]`):\n Dictionary that will be used to instantiate the configuration object. Such a dictionary can be retrieved\n from a pre-trained checkpoint by leveraging the :func:`~transformers.PretrainedConfig.get_config_dict`\n method.\n kwargs (:obj:`Dict[str, any]`):\n Additional parameters from which to initialize the configuration object.\n\n Returns:\n :class:`PretrainedConfig`: An instance of a configuration object\n \"\"\"\n return_unused_kwargs = kwargs.pop(\"return_unused_kwargs\", False)\n\n config = cls(**config_dict)\n\n if hasattr(config, \"pruned_heads\"):\n config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())\n\n # Update config with kwargs if needed\n to_remove = []\n for key, value in kwargs.items():\n if hasattr(config, key):\n setattr(config, key, value)\n to_remove.append(key)\n for key in to_remove:\n kwargs.pop(key, None)\n\n logger.info(\"Model config %s\", str(config))\n if return_unused_kwargs:\n return config, kwargs\n else:\n return config\n\n @classmethod\n def from_json_file(cls, json_file: str) -> \"PretrainedConfig\":\n \"\"\"\n Constructs a `Config` from the path to a json file of parameters.\n\n Args:\n json_file (:obj:`string`):\n Path to the JSON file containing the parameters.\n\n Returns:\n :class:`PretrainedConfig`: An instance of a configuration object\n\n \"\"\"\n config_dict = cls._dict_from_json_file(json_file)\n return cls(**config_dict)\n\n @classmethod\n def _dict_from_json_file(cls, json_file: str):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n def __repr__(self):\n return \"{} {}\".format(self.__class__.__name__, self.to_json_string())\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary.\n\n Returns:\n :obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n if hasattr(self.__class__, \"model_type\"):\n output[\"model_type\"] = self.__class__.model_type\n return output\n\n def to_json_string(self):\n \"\"\"\n Serializes this instance to a JSON string.\n\n Returns:\n :obj:`string`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path):\n \"\"\"\n Save this instance to a json file.\n\n Args:\n json_file_path (:obj:`string`):\n Path to the JSON file in which this configuration instance's parameters will be saved.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())"
},
{
"identifier": "DUMMY_INPUTS",
"path": "transformers/src/transformers/file_utils.py",
"snippet": "DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]"
},
{
"identifier": "TF2_WEIGHTS_NAME",
"path": "transformers/src/transformers/file_utils.py",
"snippet": "TF2_WEIGHTS_NAME = \"tf_model.h5\""
},
{
"identifier": "TF_WEIGHTS_NAME",
"path": "transformers/src/transformers/file_utils.py",
"snippet": "TF_WEIGHTS_NAME = \"model.ckpt\""
},
{
"identifier": "WEIGHTS_NAME",
"path": "transformers/src/transformers/file_utils.py",
"snippet": "WEIGHTS_NAME = \"pytorch_model.bin\""
},
{
"identifier": "cached_path",
"path": "transformers/src/transformers/file_utils.py",
"snippet": "def cached_path(\n url_or_filename,\n cache_dir=None,\n force_download=False,\n proxies=None,\n resume_download=False,\n user_agent=None,\n extract_compressed_file=False,\n force_extract=False,\n local_files_only=False,\n) -> Optional[str]:\n \"\"\"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n Args:\n cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).\n force_download: if True, re-dowload the file even if it's already cached in the cache dir.\n resume_download: if True, resume the download if incompletly recieved file is found.\n user_agent: Optional string or dict that will be appended to the user-agent on remote requests.\n extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed\n file in a folder along the archive.\n force_extract: if True when extract_compressed_file is True and the archive was already extracted,\n re-extract the archive and overide the folder where it was extracted.\n\n Return:\n None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).\n Local path (string) otherwise\n \"\"\"\n if cache_dir is None:\n cache_dir = TRANSFORMERS_CACHE\n if isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n if is_remote_url(url_or_filename):\n # URL, so get it from the cache (downloading if necessary)\n output_path = get_from_cache(\n url_or_filename,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n user_agent=user_agent,\n local_files_only=local_files_only,\n )\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n output_path = url_or_filename\n elif urlparse(url_or_filename).scheme == \"\":\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))\n\n if extract_compressed_file:\n if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):\n return output_path\n\n # Path where we extract compressed archives\n # We avoid '.' in dir name and add \"-extracted\" at the end: \"./model.zip\" => \"./model-zip-extracted/\"\n output_dir, output_file = os.path.split(output_path)\n output_extract_dir_name = output_file.replace(\".\", \"-\") + \"-extracted\"\n output_path_extracted = os.path.join(output_dir, output_extract_dir_name)\n\n if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:\n return output_path_extracted\n\n # Prevent parallel extractions\n lock_path = output_path + \".lock\"\n with FileLock(lock_path):\n shutil.rmtree(output_path_extracted, ignore_errors=True)\n os.makedirs(output_path_extracted)\n if is_zipfile(output_path):\n with ZipFile(output_path, \"r\") as zip_file:\n zip_file.extractall(output_path_extracted)\n zip_file.close()\n elif tarfile.is_tarfile(output_path):\n tar_file = tarfile.open(output_path)\n tar_file.extractall(output_path_extracted)\n tar_file.close()\n else:\n raise EnvironmentError(\"Archive format of {} could not be identified\".format(output_path))\n\n return output_path_extracted\n\n return output_path"
},
{
"identifier": "hf_bucket_url",
"path": "transformers/src/transformers/file_utils.py",
"snippet": "def hf_bucket_url(identifier, postfix=None, cdn=False) -> str:\n endpoint = CLOUDFRONT_DISTRIB_PREFIX if cdn else S3_BUCKET_PREFIX\n if postfix is None:\n return \"/\".join((endpoint, identifier))\n else:\n return \"/\".join((endpoint, identifier, postfix))"
},
{
"identifier": "is_remote_url",
"path": "transformers/src/transformers/file_utils.py",
"snippet": "def is_remote_url(url_or_filename):\n parsed = urlparse(url_or_filename)\n return parsed.scheme in (\"http\", \"https\", \"s3\")"
}
] | import logging
import os
import typing
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .activations import get_activation
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
WEIGHTS_NAME,
cached_path,
hf_bucket_url,
is_remote_url,
)
from torch.nn import Identity
from transformers import load_tf2_checkpoint_in_pytorch_model | 8,543 | It is up to you to train those weights with a downstream fine-tuning task.
The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.
Parameters:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) one of:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`, or
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
# For example purposes. Not runnable.
model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_tf` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"], pretrained_model_name_or_path
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index"
)
archive_file = pretrained_model_name_or_path + ".index"
else:
| # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
logger = logging.getLogger(__name__)
try:
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
"""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
class ModuleUtilsMixin:
"""
A few utilities for torch.nn.Modules, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get number of (optionally, trainable) parameters in the module.
"""
params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters()
return sum(p.numel() for p in params)
class PreTrainedModel(nn.Module, ModuleUtilsMixin):
r""" Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models
as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values.
- ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:
- ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,
- ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,
- ``path``: a path (string) to the TensorFlow checkpoint.
- ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.
"""
config_class = None
pretrained_model_archive_map = {}
base_model_prefix = ""
@property
def dummy_inputs(self):
""" Dummy inputs to do a forward pass in the network.
Returns:
torch.Tensor with dummy inputs
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
# Save config in model
self.config = config
@property
def base_model(self):
return getattr(self, self.base_model_prefix, self)
def get_input_embeddings(self):
"""
Returns the model's input embeddings.
Returns:
:obj:`nn.Module`:
A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value):
"""
Set model's input embeddings
Args:
value (:obj:`nn.Module`):
A module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self):
"""
Returns the model's output embeddings.
Returns:
:obj:`nn.Module`:
A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None:
if isinstance(output_embeddings, list):
for x in output_embeddings:
self._tie_or_clone_weights(x, self.get_input_embeddings())
else:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if hasattr(output_embeddings, "bias") and output_embeddings.bias is not None:
output_embeddings.bias.data = torch.nn.functional.pad(
output_embeddings.bias.data,
(0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings
def resize_token_embeddings(self, new_num_tokens=None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens: (`optional`) int:
New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.
If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embeddings Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):
""" Build a resized Embedding Module from a provided token Embedding Module.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: return the provided token Embedding Module.
Return: ``torch.nn.Embeddings``
Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy word embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def init_weights(self):
""" Initialize and prunes weights if needed. """
# Initialize weights
self.apply(self._init_weights)
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
# Tie weights if needed
self.tie_weights()
def prune_heads(self, heads_to_prune):
""" Prunes heads of the base model.
Arguments:
heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).
E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
""" Save a model and its configuration file to a directory, so that it
can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
"""
assert os.path.isdir(
save_directory
), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with ``model.train()``
The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.
It is up to you to train those weights with a downstream fine-tuning task.
The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.
Parameters:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) one of:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`, or
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
# For example purposes. Not runnable.
model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_tf` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"], pretrained_model_name_or_path
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index"
)
archive_file = pretrained_model_name_or_path + ".index"
else: | archive_file = hf_bucket_url( | 7 | 2023-10-15 02:31:09+00:00 | 12k |
generative-skill-chaining/gsc-code | generative_skill_chaining/envs/pybullet/table/predicates.py | [
{
"identifier": "primitive_actions",
"path": "generative_skill_chaining/envs/pybullet/table/primitive_actions.py",
"snippet": "class PrimitiveAction:\nclass PickAction(PrimitiveAction):\nclass PlaceAction(PrimitiveAction):\nclass PullAction(PrimitiveAction):\nclass PushAction(PrimitiveAction):\n RANGES: Dict[str, Tuple[float, float]]\n RANGES = {\n \"x\": (-0.2, 0.2),\n \"y\": (-0.1, 0.1),\n \"z\": (-0.05, 0.05),\n \"theta\": (-0.25 * np.pi, 0.75 * np.pi),\n }\n RANGES = {\n \"x\": (-1.0, 1.0),\n \"y\": (-1.0, 1.0),\n \"z\": (0.0, 0.1),\n \"theta\": (-0.25 * np.pi, 0.75 * np.pi),\n }\n RANGES = {\n \"r_reach\": (-0.2, 0.0),\n \"r_pull\": (-0.4, -0.1),\n \"y\": (-0.05, 0.05),\n \"theta\": (-0.5 * np.pi, 0.5 * np.pi),\n }\n RANGES = {\n \"r_reach\": (-0.4, -0.2),\n \"r_push\": (0.1, 0.4),\n \"y\": (-0.05, 0.05),\n \"theta\": (-0.5 * np.pi, 0.5 * np.pi),\n }\n def __init__(self, vector: Optional[np.ndarray] = None):\n def range(cls) -> np.ndarray:\n def random(cls):\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n pos: Optional[np.ndarray] = None,\n theta: Optional[float] = None,\n ):\n def pos(self) -> np.ndarray:\n def pos(self, pos: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n pos: Optional[np.ndarray] = None,\n theta: Optional[float] = None,\n ):\n def pos(self) -> np.ndarray:\n def pos(self, pos: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n r_reach: Optional[float] = None,\n r_pull: Optional[float] = None,\n y: Optional[float] = None,\n theta: Optional[float] = None,\n ):\n def r_reach(self) -> np.ndarray:\n def r_reach(self, r_reach: np.ndarray) -> None:\n def r_pull(self) -> np.ndarray:\n def r_pull(self, r_pull: np.ndarray) -> None:\n def y(self) -> np.ndarray:\n def y(self, y: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n r_reach: Optional[float] = None,\n r_push: Optional[float] = None,\n y: Optional[float] = None,\n theta: Optional[float] = None,\n ):\n def r_reach(self) -> np.ndarray:\n def r_reach(self, r_reach: np.ndarray) -> None:\n def r_push(self) -> np.ndarray:\n def r_push(self, r_push: np.ndarray) -> None:\n def y(self) -> np.ndarray:\n def y(self, y: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:"
},
{
"identifier": "utils",
"path": "generative_skill_chaining/envs/pybullet/table/utils.py",
"snippet": "TABLE_CONSTRAINTS = {\n \"table_z_max\": 0.00,\n \"table_x_min\": 0.28,\n \"table_y_min\": -0.45,\n \"table_y_max\": 0.45,\n \"workspace_x_min\": 0.40,\n \"operational_x_min\": 0.50,\n \"operational_x_max\": 0.60,\n \"obstruction_x_min\": 0.575,\n \"workspace_radius\": 0.7,\n}\nEPSILONS = {\"aabb\": 0.01, \"align\": 0.99, \"twist\": 0.001, \"tipping\": 0.1}\nTWIST_HISTORY: Dict[str, Dict[Object, np.ndarray]] = collections.defaultdict(dict)\ndef compute_margins(obj: Object) -> np.ndarray:\ndef compute_object_pose(obj: Object, theta: float) -> math.Pose:\ndef is_above(obj_a: Object, obj_b: Object) -> bool:\ndef is_upright(obj: Object) -> bool:\ndef is_within_distance(\n obj_a: Object, obj_b: Object, distance: float, physics_id: int\n) -> bool:\ndef is_moving(obj: Object, use_history: Optional[str] = None) -> bool:\ndef is_below_table(obj: Object) -> bool:\ndef is_touching(\n body_a: body.Body,\n body_b: body.Body,\n link_id_a: Optional[int] = None,\n link_id_b: Optional[int] = None,\n) -> bool:\ndef is_intersecting(obj_a: Object, obj_b: Object) -> bool:\ndef is_under(obj_a: Object, obj_b: Object) -> bool:\ndef is_inworkspace(\n obj: Optional[Object] = None,\n obj_pos: Optional[np.ndarray] = None,\n distance: Optional[float] = None,\n) -> bool:\ndef is_beyondworkspace(\n obj: Optional[Object] = None,\n obj_pos: Optional[np.ndarray] = None,\n distance: Optional[float] = None,\n) -> bool:\ndef load_config(config: Union[str, Any]) -> Any:"
},
{
"identifier": "Box",
"path": "generative_skill_chaining/envs/pybullet/table/objects.py",
"snippet": "class Box(Object):\n def __init__(\n self,\n physics_id: int,\n name: str,\n size: Union[List[float], np.ndarray],\n color: Union[List[float], np.ndarray],\n mass: float = 0.1,\n ):\n box = shapes.Box(size=np.array(size), mass=mass, color=np.array(color))\n body_id = shapes.create_body(box, physics_id=physics_id)\n self._shape = box\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.box_size = box.size\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n\n @property\n def size(self) -> np.ndarray:\n return self._state.box_size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return [self._shape]"
},
{
"identifier": "Hook",
"path": "generative_skill_chaining/envs/pybullet/table/objects.py",
"snippet": "class Hook(Object):\n @staticmethod\n def compute_link_positions(\n head_length: float, handle_length: float, handle_y: float, radius: float\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n dy = (\n 0.5\n * np.sign(handle_y)\n * max(0, (abs(handle_y) - 1.0) * head_length / 2 + radius)\n )\n pos_handle = np.array([-radius / 2, handle_y * head_length / 2 - dy, 0.0])\n pos_head = np.array([(handle_length - radius) / 2, -dy, 0.0])\n pos_joint = np.array(\n [(handle_length - radius) / 2, handle_y * head_length / 2 - dy, 0.0]\n )\n\n return pos_handle, pos_head, pos_joint\n\n def __init__(\n self,\n physics_id: int,\n name: str,\n head_length: float,\n handle_length: float,\n handle_y: float,\n color: Union[List[float], np.ndarray],\n radius: float = 0.02,\n mass: float = 0.1,\n ):\n if not isinstance(color, np.ndarray):\n color = np.array(color)\n\n pos_handle, pos_head, pos_joint = Hook.compute_link_positions(\n head_length=head_length,\n handle_length=handle_length,\n handle_y=handle_y,\n radius=radius,\n )\n handle = shapes.Cylinder(\n radius=radius,\n length=handle_length,\n mass=(handle_length / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(\n pos=pos_handle,\n quat=eigen.Quaterniond(\n eigen.AngleAxisd(angle=np.pi / 2, axis=np.array([0.0, 1.0, 0.0]))\n ).coeffs,\n ),\n )\n head = shapes.Cylinder(\n radius=radius,\n length=head_length,\n mass=(head_length / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(\n pos=pos_head,\n quat=eigen.Quaterniond(\n eigen.AngleAxisd(angle=np.pi / 2, axis=np.array([1.0, 0.0, 0.0]))\n ).coeffs,\n ),\n )\n joint = shapes.Sphere(\n radius=radius,\n mass=(radius / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(pos=pos_joint),\n )\n self._shapes = [joint, handle, head]\n body_id = shapes.create_body(\n self.shapes, link_parents=[0, 0], physics_id=physics_id\n )\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.head_length = head_length\n self._state.handle_length = handle_length\n self._state.handle_y = handle_y\n self._radius = radius\n\n self._size = np.array(\n [handle_length + radius, head_length + 2 * abs(pos_head[1]), 2 * radius]\n )\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n\n @property\n def head_length(self) -> float:\n return self._state.head_length # type: ignore\n\n @property\n def handle_length(self) -> float:\n return self._state.handle_length # type: ignore\n\n @property\n def handle_y(self) -> float:\n return self._state.handle_y # type: ignore\n\n @property\n def radius(self) -> float:\n return self._radius\n\n @property\n def size(self) -> np.ndarray:\n return self._size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n def convex_hulls(\n self, world_frame: bool = True, project_2d: bool = False\n ) -> List[np.ndarray]:\n \"\"\"Computes the convex hulls of the handle and head links.\"\"\"\n handle_pose = self.shapes[1].pose\n head_pose = self.shapes[2].pose\n assert handle_pose is not None and head_pose is not None\n\n positions = np.array(\n [\n [0.0, handle_pose.pos[1], 0.0],\n [head_pose.pos[0], 0.0, 0.0],\n ]\n )\n sizes = np.array(\n [\n [self.size[0], 2 * self.radius, 2 * self.radius],\n [2 * self.radius, self.size[1], 2 * self.radius],\n ]\n )\n bboxes = np.array([positions - 0.5 * sizes, positions + 0.5 * sizes]).swapaxes(\n 0, 1\n )\n\n pose = self.pose() if world_frame else None\n vertices = [compute_bbox_vertices(bbox, pose, project_2d) for bbox in bboxes]\n\n return vertices\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return self._shapes\n\n # def aabb(self) -> np.ndarray:\n # raise NotImplementedError"
},
{
"identifier": "Null",
"path": "generative_skill_chaining/envs/pybullet/table/objects.py",
"snippet": "class Null(Object):\n def __init__(self, physics_id: int, name: str):\n sphere = shapes.Sphere(radius=0.001)\n body_id = shapes.create_body(sphere, physics_id=physics_id)\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=True\n )\n\n def state(self) -> object_state.ObjectState:\n # Null object state is a zero vector.\n return self._state\n\n def enable_collisions(self) -> None:\n pass\n\n def unfreeze(self) -> bool:\n return False"
},
{
"identifier": "Object",
"path": "generative_skill_chaining/envs/pybullet/table/objects.py",
"snippet": "class Object(body.Body):\n name: str\n is_static: bool = False\n\n def __init__(\n self, physics_id: int, body_id: int, name: str, is_static: bool = False\n ):\n super().__init__(physics_id, body_id)\n\n self.name = name\n self.is_static = is_static\n\n T_pybullet_to_obj = super().pose().to_eigen()\n self._modified_axes = not T_pybullet_to_obj.is_approx(\n eigen.Isometry3d.identity()\n )\n if self._modified_axes:\n self._T_pybullet_to_obj = T_pybullet_to_obj\n self._T_obj_to_pybullet = T_pybullet_to_obj.inverse()\n\n self._state = object_state.ObjectState()\n\n def pose(self) -> math.Pose:\n if not self._modified_axes:\n return super().pose()\n\n return math.Pose.from_eigen(super().pose().to_eigen() * self._T_obj_to_pybullet)\n\n def set_pose(self, pose: math.Pose) -> None:\n if not self._modified_axes:\n return super().set_pose(pose)\n\n return super().set_pose(\n math.Pose.from_eigen(pose.to_eigen() * self._T_pybullet_to_obj)\n )\n\n def disable_collisions(self) -> None:\n for link_id in range(self.dof):\n p.setCollisionFilterGroupMask(\n self.body_id, link_id, 0, 0, physicsClientId=self.physics_id\n )\n\n def enable_collisions(self) -> None:\n for link_id in range(self.dof):\n p.setCollisionFilterGroupMask(\n self.body_id, link_id, 1, 0xFF, physicsClientId=self.physics_id\n )\n\n @property\n def inertia(self) -> dyn.SpatialInertiad:\n try:\n return self._obj_inertia # type: ignore\n except AttributeError:\n pass\n\n self._obj_inertia = super().inertia\n if self._modified_axes:\n self._obj_inertia = self._obj_inertia * self._T_pybullet_to_obj\n\n T_world_to_obj = self.pose().to_eigen().inverse()\n for link_id in range(self.dof):\n link = body.Link(self.physics_id, self.body_id, link_id)\n T_link_to_obj = T_world_to_obj * link.pose().to_eigen()\n self._obj_inertia += link.inertia * T_link_to_obj\n\n return self._obj_inertia\n\n def state(self) -> object_state.ObjectState:\n pose = self.pose()\n aa = eigen.AngleAxisd(eigen.Quaterniond(pose.quat))\n self._state.pos = pose.pos\n self._state.aa = aa.angle * aa.axis\n\n return self._state\n\n def set_state(self, state: object_state.ObjectState) -> None:\n self.set_pose(state.pose())\n\n def reset(self, action_skeleton: List) -> None:\n pass\n\n @classmethod\n def create(\n cls,\n physics_id: int,\n object_type: Optional[str],\n object_kwargs: Dict[str, Any] = {},\n object_groups: Dict[str, \"ObjectGroup\"] = {},\n **kwargs,\n ) -> \"Object\":\n object_class = Null if object_type is None else globals()[object_type]\n if issubclass(object_class, Variant):\n kwargs[\"object_groups\"] = object_groups\n object_kwargs = object_kwargs.copy()\n object_kwargs.update(kwargs)\n return object_class(physics_id=physics_id, **object_kwargs)\n\n def isinstance(self, class_or_tuple: Union[type, Tuple[type, ...]]) -> bool:\n return isinstance(self, class_or_tuple)\n\n def type(self) -> Type[\"Object\"]:\n return type(self)\n\n @property\n def size(self) -> np.ndarray:\n raise NotImplementedError\n\n @property\n def bbox(self) -> np.ndarray:\n \"\"\"Returns the bounding box in the object frame.\n\n If the origin of the object is at its geometric center, this will be\n equivalent to `(-0.5 * self.size, 0.5 * self.size)`.\n\n Returns:\n An array of shape [2, 3] (min/max, x/y/z).\n \"\"\"\n raise NotImplementedError\n\n def convex_hulls(\n self, world_frame: bool = True, project_2d: bool = False\n ) -> List[np.ndarray]:\n \"\"\"Computes the object's convex hull.\n\n These hulls will be used for rough collision checking. By default,\n the vertices will be the 6 corners of the object's bounding box\n (`Object.bbox`).\n\n Args:\n world_frame: Whether to transform the vertices in world frame or\n leave them in object frame.\n project_2d: Whether to return the 2d convex hull.\n\n Returns:\n List of arrays of shape [_, 3] or [_, 2], where each array is a\n convex hull.\n \"\"\"\n pose = self.pose() if world_frame else None\n vertices = compute_bbox_vertices(self.bbox, pose, project_2d)\n\n return [vertices]\n\n def aabb(self) -> np.ndarray:\n \"\"\"Computes the axis-aligned bounding box from the object pose and size.\n\n This should be more accurate than `super().aabb()`, which gets the aabb\n from Pybullet. Pybullet returns an *enlarged* aabb for the object *base*\n link, while this returns the exact aabb for the entire object.\n\n Returns:\n An array of shape [2, 3] (min/max, x/y/z).\n \"\"\"\n vertices = np.concatenate(self.convex_hulls(world_frame=True), axis=0)\n xyz_min = vertices.min(axis=0)\n xyz_max = vertices.max(axis=0)\n\n return np.array([xyz_min, xyz_max])\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return []\n\n def __str__(self) -> str:\n return self.name\n\n def __hash__(self) -> int:\n return hash(str(self))\n\n def __eq__(self, other) -> bool:\n return str(self) == str(other)"
},
{
"identifier": "Rack",
"path": "generative_skill_chaining/envs/pybullet/table/objects.py",
"snippet": "class Rack(Object):\n TOP_THICKNESS = 0.01\n LEG_THICKNESS = 0.01\n\n def __init__(\n self,\n physics_id: int,\n name: str,\n size: Union[List[float], np.ndarray],\n color: Union[List[float], np.ndarray],\n mass: float = 1.0,\n ):\n mass /= 7 # Divide mass among all 7 parts.\n top = shapes.Box(\n size=np.array([*size[:2], Rack.TOP_THICKNESS]),\n mass=mass,\n color=np.array(color),\n pose=math.Pose(\n pos=np.array([0.0, 0.0, -Rack.TOP_THICKNESS / 2]),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n xy_legs = np.array([(x, y) for x in (-1, 1) for y in (-1, 1)]) * (\n (np.array(size[:2])[None, :] - Rack.LEG_THICKNESS) / 2\n )\n legs = [\n shapes.Box(\n size=np.array(\n [\n Rack.LEG_THICKNESS,\n Rack.LEG_THICKNESS,\n size[2] - Rack.TOP_THICKNESS - Rack.LEG_THICKNESS,\n ]\n ),\n mass=mass,\n color=np.array([0.0, 0.0, 0.0, 1.0]),\n pose=math.Pose(\n pos=np.array(\n [\n *xy_leg,\n -(size[2] + Rack.TOP_THICKNESS - Rack.LEG_THICKNESS) / 2,\n ]\n ),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n for xy_leg in xy_legs\n ]\n stabilizers = [\n shapes.Box(\n size=np.array([size[0], Rack.LEG_THICKNESS, Rack.LEG_THICKNESS]),\n mass=mass,\n color=np.array([0.0, 0.0, 0.0, 1.0]),\n pose=math.Pose(\n pos=np.array([0.0, y_leg, -size[2] + Rack.LEG_THICKNESS / 2]),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n for y_leg in xy_legs[:2, 1]\n ]\n self._shapes = [top, *legs, *stabilizers]\n body_id = shapes.create_body(\n self.shapes,\n link_parents=[0] * (len(legs) + len(stabilizers)),\n physics_id=physics_id,\n )\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.box_size = np.array(size)\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n self._bbox[0, 2] = -size[2]\n self._bbox[1, 2] = 0\n\n @property\n def size(self) -> np.ndarray:\n return self._state.box_size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return self._shapes"
},
{
"identifier": "math",
"path": "generative_skill_chaining/envs/pybullet/sim/math.py",
"snippet": "PYBULLET_STEPS_PER_SEC = 240\nPYBULLET_TIMESTEP = 1 / PYBULLET_STEPS_PER_SEC\nclass Pose:\n def from_eigen(pose: eigen.Isometry3d) -> \"Pose\":\n def to_eigen(self) -> eigen.Isometry3d:\ndef comb(n: int, r: int) -> int:"
},
{
"identifier": "Robot",
"path": "generative_skill_chaining/envs/pybullet/sim/robot.py",
"snippet": "class Robot(body.Body):\n \"\"\"User-facing robot interface.\"\"\"\n\n def __init__(\n self,\n physics_id: int,\n step_simulation_fn: Callable[[], None],\n urdf: str,\n arm_class: Union[str, Type[arm.Arm]],\n arm_kwargs: Dict[str, Any],\n gripper_class: Union[str, Type[gripper.Gripper]],\n gripper_kwargs: Dict[str, Any],\n ):\n \"\"\"Loads the robot from a urdf file.\n\n Args:\n physics_id: Pybullet physics client id.\n step_simulation_fn: Function to step simulation.\n urdf: Path to urdf.\n arm_class: In the generative_skill_chaining.envs.pybullet namespace.\n arm_kwargs: Arm kwargs from yaml config.\n gripper_class: In the generative_skill_chaining.envs.pybullet namespace.\n gripper_kwargs: Gripper kwargs from yaml config.\n \"\"\"\n body_id = p.loadURDF(\n fileName=urdf,\n useFixedBase=True,\n flags=p.URDF_USE_INERTIA_FROM_FILE\n | p.URDF_MAINTAIN_LINK_ORDER, # | p.URDF_MERGE_FIXED_LINKS\n physicsClientId=physics_id,\n )\n super().__init__(physics_id, body_id)\n\n if isinstance(arm_class, str):\n arm_class = configs.get_class(arm_class, pybullet)\n if isinstance(gripper_class, str):\n gripper_class = configs.get_class(gripper_class, pybullet)\n\n self._arm = arm_class(self.physics_id, self.body_id, **arm_kwargs)\n T_world_to_ee = dyn.cartesian_pose(self.arm.ab).inverse()\n self._gripper = gripper_class(\n self.physics_id, self.body_id, T_world_to_ee, **gripper_kwargs\n )\n\n self.step_simulation = step_simulation_fn\n\n @property\n def arm(self) -> arm.Arm:\n \"\"\"Controllable arm.\"\"\"\n return self._arm\n\n @property\n def gripper(self) -> gripper.Gripper:\n \"\"\"Controllable gripper.\"\"\"\n return self._gripper\n\n @property\n def home_pose(self) -> math.Pose:\n return self.arm.home_pose\n\n def reset(self) -> bool:\n \"\"\"Resets the robot by setting the arm to its home configuration and the gripper to the open position.\n\n This method disables torque control and bypasses simulation.\n \"\"\"\n self.gripper.reset()\n self.clear_load()\n status = self.arm.reset()\n if isinstance(self.arm, real.arm.Arm):\n status = self.goto_configuration(self.arm.q_home)\n return status\n\n def clear_load(self) -> None:\n \"\"\"Resets the end-effector load to the gripper inertia.\"\"\"\n if self.gripper.inertia is not None:\n self.arm.ab.replace_load(self.gripper.inertia)\n else:\n self.arm.ab.clear_load()\n\n def set_load(self, inertia: dyn.SpatialInertiad) -> None:\n \"\"\"Sets the end-effector load to the sum of the given inertia and gripper inertia.\"\"\"\n if self.gripper.inertia is not None:\n inertia = inertia + self.gripper.inertia\n self.arm.ab.replace_load(inertia)\n\n def get_state(self) -> Dict[str, Any]:\n return {\n \"arm\": self.arm.get_state(),\n \"gripper\": self.gripper.get_state(),\n \"load\": copy.deepcopy(self.arm.ab.inertia_load),\n }\n\n def set_state(self, state: Dict[str, Any]) -> None:\n self.arm.set_state(state[\"arm\"])\n self.gripper.set_state(state[\"gripper\"])\n idx_link, load_inertia = next(iter(state[\"load\"].items()))\n self.arm.ab.replace_load(load_inertia, idx_link)\n\n def goto_home(self) -> bool:\n \"\"\"Uses opspace control to go to the home position.\"\"\"\n return self.goto_pose(\n self.home_pose.pos,\n self.home_pose.quat,\n pos_gains=(64, 16),\n ori_gains=(64, 16),\n )\n\n def _is_colliding(\n self, body_id_a: int, body_id_b: int, link_id_a: Optional[int] = None\n ) -> bool:\n kwargs = {}\n if link_id_a is not None:\n kwargs[\"linkIndexA\"] = link_id_a\n contacts = p.getContactPoints(\n bodyA=body_id_a, bodyB=body_id_b, physicsClientId=self.physics_id, **kwargs\n )\n\n if not contacts:\n return False\n\n force = contacts[0][9]\n return force > 0.0\n\n def goto_pose(\n self,\n pos: Optional[np.ndarray] = None,\n quat: Optional[Union[eigen.Quaterniond, np.ndarray]] = None,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n ori_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n check_collisions: Sequence[int] = [],\n check_collision_freq: int = 10,\n ) -> bool:\n \"\"\"Uses opspace control to go to the desired pose.\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Args:\n pos: Optional position. Maintains current position if None.\n quat: Optional quaternion. Maintains current orientation if None.\n pos_gains: (kp, kv) gains or [3 x 2] array of xyz gains.\n ori_gains: (kp, kv) gains or [3 x 2] array of xyz gains.\n timeout: Uses the timeout specified in the yaml arm config if None.\n check_collisions: Raise an exception if the gripper or grasped\n object collides with any of the body_ids in this list.\n check_collision_freq: Iteration interval with which to check\n collisions.\n Returns:\n True if the grasp controller converges to the desired position or\n zero velocity, false if the command times out.\n \"\"\"\n if check_collisions:\n body_ids_a = [self.body_id] * len(self.gripper.finger_links)\n link_ids_a: List[Optional[int]] = list(self.gripper.finger_links)\n grasp_body_id = self.gripper._gripper_state.grasp_body_id\n if grasp_body_id is not None:\n body_ids_a.append(grasp_body_id)\n link_ids_a.append(None)\n\n # Set the pose goal.\n self.arm.set_pose_goal(pos, quat, pos_gains, ori_gains, timeout)\n\n # Simulate until the pose goal is reached.\n status = self.arm.update_torques()\n self.gripper.update_torques()\n iter = 0\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.step_simulation()\n status = self.arm.update_torques()\n self.gripper.update_torques()\n iter += 1\n\n if isinstance(self.arm, real.arm.Arm):\n continue\n\n if not check_collisions or iter % check_collision_freq != 0:\n continue\n\n # Terminate early if there are collisions with the gripper fingers\n # or grasped object.\n for body_id_a, link_id_a in zip(body_ids_a, link_ids_a):\n for body_id_b in check_collisions:\n if self._is_colliding(body_id_a, body_id_b, link_id_a):\n raise ControlException(\n f\"Robot.goto_pose({pos}, {quat}): Collision {body_id_a}:{link_id_a}, {body_id_b}\"\n )\n # print(\"Robot.goto_pose:\", pos, quat, status)\n\n if status == articulated_body.ControlStatus.ABORTED:\n raise ControlException(f\"Robot.goto_pose({pos}, {quat}): Singularity\")\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def goto_configuration(self, q: np.ndarray) -> bool:\n \"\"\"Sets the robot to the desired joint configuration.\n\n Args:\n q: Joint configuration.\n Returns:\n True if the controller converges to the desired position or zero\n velocity, false if the command times out.\n \"\"\"\n # Set the configuration goal.\n self.arm.set_configuration_goal(q)\n\n # Simulate until the pose goal is reached.\n status = self.arm.update_torques()\n self.gripper.update_torques()\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.step_simulation()\n status = self.arm.update_torques()\n self.gripper.update_torques()\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def grasp(\n self,\n command: float,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n ) -> bool:\n \"\"\"Sets the gripper to the desired grasp (0.0 open, 1.0 closed).\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Any existing grasp constraints will be cleared and no new ones will be\n created. Use `Robot.grasp_object()` to create a grasp constraint.\n\n Args:\n command: Desired grasp (range from 0.0 open to 1.0 closed).\n pos_gains: kp gains (only used for sim).\n timeout: Uses the timeout specified in the yaml gripper config if None.\n Returns:\n True if the grasp controller converges to the desired position or\n zero velocity, false if the command times out.\n \"\"\"\n # Clear any existing grasp constraints.\n self.gripper.remove_grasp_constraint()\n self.clear_load()\n\n # Set the new grasp command.\n self.gripper.set_grasp(command, pos_gains, timeout)\n\n # Simulate until the grasp command finishes.\n status = self.gripper.update_torques()\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.arm.update_torques()\n self.step_simulation()\n status = self.gripper.update_torques()\n # print(\"Robot.grasp:\", command, status)\n\n if status == articulated_body.ControlStatus.ABORTED:\n raise ControlException(f\"Robot.grasp({command})\")\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def grasp_object(\n self,\n obj: body.Body,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n realistic: bool = True,\n ) -> bool:\n \"\"\"Attempts to grasp an object and attaches the object to the gripper via a pose constraint.\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Args:\n command: Desired grasp (range from 0.0 open to 1.0 closed).\n pos_gains: kp gains (only used for sim).\n timeout: Uses the timeout specified in the yaml gripper config if None.\n realistic: If false, creates a pose constraint regardless of whether\n the object is in a secure grasp.\n Returns:\n True if the object is successfully grasped, false otherwise.\n \"\"\"\n if realistic:\n self.grasp(1, pos_gains, timeout)\n\n # Wait for grasped object to settle.\n status = self.gripper.update_torques()\n while (\n status\n in (\n articulated_body.ControlStatus.VEL_CONVERGED,\n articulated_body.ControlStatus.IN_PROGRESS,\n )\n and self.gripper._gripper_state.iter_timeout >= 0\n and (obj.twist() > 0.001).any()\n ):\n self.arm.update_torques()\n status = self.gripper.update_torques()\n self.step_simulation()\n\n # Make sure fingers aren't fully closed.\n if status == articulated_body.ControlStatus.POS_CONVERGED:\n return False\n\n # Lock the object in place with a grasp constraint.\n if not self.gripper.create_grasp_constraint(obj.body_id, realistic):\n return False\n\n # Add object load.\n T_obj_to_world = obj.pose().to_eigen()\n T_ee_to_world = dyn.cartesian_pose(self.arm.ab)\n T_obj_to_ee = T_ee_to_world.inverse() * T_obj_to_world\n self.set_load(obj.inertia * T_obj_to_ee)\n\n return True"
}
] | import dataclasses
import random
import numpy as np
import pybullet as p
import symbolic
from typing import Optional, Dict, List, Sequence, Tuple, Type
from ctrlutils import eigen
from shapely.geometry import Polygon, LineString
from generative_skill_chaining.envs.pybullet.table import primitive_actions, utils
from generative_skill_chaining.envs.pybullet.table.objects import Box, Hook, Null, Object, Rack
from generative_skill_chaining.envs.pybullet.sim import math
from generative_skill_chaining.envs.pybullet.sim.robot import Robot | 9,389 |
dbprint = lambda *args: None # noqa
# dbprint = print
@dataclasses.dataclass
class Predicate:
args: List[str]
@classmethod
def create(cls, proposition: str) -> "Predicate":
predicate, args = symbolic.parse_proposition(proposition)
predicate_classes = {
name.lower(): predicate_class for name, predicate_class in globals().items()
}
predicate_class = predicate_classes[predicate]
return predicate_class(args)
def sample(
self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"]
) -> bool:
"""Generates a geometric grounding of a predicate."""
return True
def value(
self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"]
) -> bool:
"""Evaluates to True if the geometrically grounded predicate is satisfied."""
return True
def get_arg_objects(self, objects: Dict[str, Object]) -> List[Object]:
return [objects[arg] for arg in self.args]
def __str__(self) -> str:
return f"{type(self).__name__.lower()}({', '.join(self.args)})"
def __hash__(self) -> int:
return hash(str(self))
def __eq__(self, other) -> bool:
return str(self) == str(other)
class HandleGrasp(Predicate):
"""Unary predicate enforcing a handle grasp towards the tail end on a hook object."""
pass
class UpperHandleGrasp(Predicate):
"""Unary predicate enforcing a handle grasp towards the head on a hook object."""
pass
class Free(Predicate):
"""Unary predicate enforcing that no top-down occlusions exist on the object."""
DISTANCE_MIN: Dict[Tuple[Type[Object], Type[Object]], float] = {
(Box, Box): 0.05,
(Box, Hook): 0.05,
|
dbprint = lambda *args: None # noqa
# dbprint = print
@dataclasses.dataclass
class Predicate:
args: List[str]
@classmethod
def create(cls, proposition: str) -> "Predicate":
predicate, args = symbolic.parse_proposition(proposition)
predicate_classes = {
name.lower(): predicate_class for name, predicate_class in globals().items()
}
predicate_class = predicate_classes[predicate]
return predicate_class(args)
def sample(
self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"]
) -> bool:
"""Generates a geometric grounding of a predicate."""
return True
def value(
self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"]
) -> bool:
"""Evaluates to True if the geometrically grounded predicate is satisfied."""
return True
def get_arg_objects(self, objects: Dict[str, Object]) -> List[Object]:
return [objects[arg] for arg in self.args]
def __str__(self) -> str:
return f"{type(self).__name__.lower()}({', '.join(self.args)})"
def __hash__(self) -> int:
return hash(str(self))
def __eq__(self, other) -> bool:
return str(self) == str(other)
class HandleGrasp(Predicate):
"""Unary predicate enforcing a handle grasp towards the tail end on a hook object."""
pass
class UpperHandleGrasp(Predicate):
"""Unary predicate enforcing a handle grasp towards the head on a hook object."""
pass
class Free(Predicate):
"""Unary predicate enforcing that no top-down occlusions exist on the object."""
DISTANCE_MIN: Dict[Tuple[Type[Object], Type[Object]], float] = {
(Box, Box): 0.05,
(Box, Hook): 0.05, | (Box, Rack): 0.1, | 6 | 2023-10-16 00:22:40+00:00 | 12k |
akashgreninja/GreSec | backend/venv/lib/python3.10/site-packages/pydantic/json_schema.py | [
{
"identifier": "_config",
"path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_config.py",
"snippet": "DEPRECATION_MESSAGE = 'Support for class-based `config` is deprecated, use ConfigDict instead.'\nV2_REMOVED_KEYS = {\n 'allow_mutation',\n 'error_msg_templates',\n 'fields',\n 'getter_dict',\n 'smart_union',\n 'underscore_attrs_are_private',\n 'json_loads',\n 'json_dumps',\n 'copy_on_model_validation',\n 'post_init_call',\n}\nV2_RENAMED_KEYS = {\n 'allow_population_by_field_name': 'populate_by_name',\n 'anystr_lower': 'str_to_lower',\n 'anystr_strip_whitespace': 'str_strip_whitespace',\n 'anystr_upper': 'str_to_upper',\n 'keep_untouched': 'ignored_types',\n 'max_anystr_length': 'str_max_length',\n 'min_anystr_length': 'str_min_length',\n 'orm_mode': 'from_attributes',\n 'schema_extra': 'json_schema_extra',\n 'validate_all': 'validate_default',\n}\nclass ConfigWrapper:\nclass ConfigWrapperStack:\n def __init__(self, config: ConfigDict | dict[str, Any] | type[Any] | None, *, check: bool = True):\n def for_model(cls, bases: tuple[type[Any], ...], namespace: dict[str, Any], kwargs: dict[str, Any]) -> Self:\n def __getattr__(self, name: str) -> Any:\n def core_config(self, obj: Any) -> core_schema.CoreConfig:\n def dict_not_none(**kwargs: Any) -> Any:\n def __repr__(self):\n def __init__(self, config_wrapper: ConfigWrapper):\n def tail(self) -> ConfigWrapper:\n def push(self, config_wrapper: ConfigWrapper | ConfigDict | None) -> ContextManager[None]:\n def _context_manager() -> Iterator[None]:\ndef prepare_config(config: ConfigDict | dict[str, Any] | type[Any] | None) -> ConfigDict:\ndef check_deprecated(config_dict: ConfigDict) -> None:"
},
{
"identifier": "_core_metadata",
"path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_core_metadata.py",
"snippet": "class CoreMetadata(typing_extensions.TypedDict, total=False):\nclass CoreMetadataHandler:\n def __init__(self, schema: CoreSchemaOrField):\n def metadata(self) -> CoreMetadata:\ndef build_metadata_dict(\n *, # force keyword arguments to make it easier to modify this signature in a backwards-compatible way\n js_functions: list[GetJsonSchemaFunction] | None = None,\n js_annotation_functions: list[GetJsonSchemaFunction] | None = None,\n js_prefer_positional_arguments: bool | None = None,\n typed_dict_cls: type[Any] | None = None,\n initial_metadata: Any | None = None,\n) -> Any:"
},
{
"identifier": "_core_utils",
"path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_core_utils.py",
"snippet": "_CORE_SCHEMA_FIELD_TYPES = {'typed-dict-field', 'dataclass-field', 'model-field', 'computed-field'}\n_FUNCTION_WITH_INNER_SCHEMA_TYPES = {'function-before', 'function-after', 'function-wrap'}\n_LIST_LIKE_SCHEMA_WITH_ITEMS_TYPES = {'list', 'tuple-variable', 'set', 'frozenset'}\n_DEFINITIONS_CACHE_METADATA_KEY = 'pydantic.definitions_cache'\nNEEDS_APPLY_DISCRIMINATED_UNION_METADATA_KEY = 'pydantic.internal.needs_apply_discriminated_union'\nHAS_INVALID_SCHEMAS_METADATA_KEY = 'pydantic.internal.invalid'\nT = TypeVar('T')\ndef is_core_schema(\n schema: CoreSchemaOrField,\n) -> TypeGuard[CoreSchema]:\ndef is_core_schema_field(\n schema: CoreSchemaOrField,\n) -> TypeGuard[CoreSchemaField]:\ndef is_function_with_inner_schema(\n schema: CoreSchemaOrField,\n) -> TypeGuard[FunctionSchemaWithInnerSchema]:\ndef is_list_like_schema_with_items_schema(\n schema: CoreSchema,\n) -> TypeGuard[\ndef get_type_ref(type_: type[Any], args_override: tuple[type[Any], ...] | None = None) -> str:\ndef get_ref(s: core_schema.CoreSchema) -> None | str:\ndef collect_definitions(schema: core_schema.CoreSchema) -> dict[str, core_schema.CoreSchema]:\n def _record_valid_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\ndef define_expected_missing_refs(\n schema: core_schema.CoreSchema, allowed_missing_refs: set[str]\n) -> core_schema.CoreSchema | None:\ndef collect_invalid_schemas(schema: core_schema.CoreSchema) -> bool:\n def _is_schema_valid(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\n def __init__(self):\n def _build_schema_type_to_method(self) -> dict[core_schema.CoreSchemaType, Recurse]:\n def walk(self, schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema:\n def _walk(self, schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema:\n def _handle_other_schemas(self, schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema:\n def _handle_ser_schemas(self, ser_schema: core_schema.SerSchema, f: Walk) -> core_schema.SerSchema:\n def handle_definitions_schema(self, schema: core_schema.DefinitionsSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_list_schema(self, schema: core_schema.ListSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_set_schema(self, schema: core_schema.SetSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_frozenset_schema(self, schema: core_schema.FrozenSetSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_generator_schema(self, schema: core_schema.GeneratorSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_tuple_variable_schema(\n self, schema: core_schema.TupleVariableSchema | core_schema.TuplePositionalSchema, f: Walk\n ) -> core_schema.CoreSchema:\n def handle_tuple_positional_schema(\n self, schema: core_schema.TupleVariableSchema | core_schema.TuplePositionalSchema, f: Walk\n ) -> core_schema.CoreSchema:\n def handle_dict_schema(self, schema: core_schema.DictSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_function_schema(self, schema: AnyFunctionSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_union_schema(self, schema: core_schema.UnionSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_tagged_union_schema(self, schema: core_schema.TaggedUnionSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_chain_schema(self, schema: core_schema.ChainSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_lax_or_strict_schema(self, schema: core_schema.LaxOrStrictSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_json_or_python_schema(self, schema: core_schema.JsonOrPythonSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_model_fields_schema(self, schema: core_schema.ModelFieldsSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_typed_dict_schema(self, schema: core_schema.TypedDictSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_dataclass_args_schema(self, schema: core_schema.DataclassArgsSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_arguments_schema(self, schema: core_schema.ArgumentsSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_call_schema(self, schema: core_schema.CallSchema, f: Walk) -> core_schema.CoreSchema:\ndef walk_core_schema(schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema:\ndef simplify_schema_references(schema: core_schema.CoreSchema) -> core_schema.CoreSchema: # noqa: C901\n def collect_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\n def count_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\n def can_be_inlined(s: core_schema.DefinitionReferenceSchema, ref: str) -> bool:\n def inline_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\ndef _strip_metadata(schema: CoreSchema) -> CoreSchema:\n def strip_metadata(s: CoreSchema, recurse: Recurse) -> CoreSchema:\ndef pretty_print_core_schema(\n schema: CoreSchema,\n include_metadata: bool = False,\n) -> None:\ndef validate_core_schema(schema: CoreSchema) -> CoreSchema:\nclass _WalkCoreSchema:"
},
{
"identifier": "_decorators",
"path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_decorators.py",
"snippet": "class ValidatorDecoratorInfo:\nclass FieldValidatorDecoratorInfo:\nclass RootValidatorDecoratorInfo:\nclass FieldSerializerDecoratorInfo:\nclass ModelSerializerDecoratorInfo:\nclass ModelValidatorDecoratorInfo:\nclass PydanticDescriptorProxy(Generic[ReturnType]):\nclass Decorator(Generic[DecoratorInfoType]):\nclass DecoratorInfos:\n def __post_init__(self):\n def _call_wrapped_attr(self, func: Callable[[Any], None], *, name: str) -> PydanticDescriptorProxy[ReturnType]:\n def __get__(self, obj: object | None, obj_type: type[object] | None = None) -> PydanticDescriptorProxy[ReturnType]:\n def __set_name__(self, instance: Any, name: str) -> None:\n def __getattr__(self, __name: str) -> Any:\n def build(\n cls_: Any,\n *,\n cls_var_name: str,\n shim: Callable[[Any], Any] | None,\n info: DecoratorInfoType,\n ) -> Decorator[DecoratorInfoType]:\n def bind_to_cls(self, cls: Any) -> Decorator[DecoratorInfoType]:\ndef get_bases(tp: type[Any]) -> tuple[type[Any], ...]:\ndef mro(tp: type[Any]) -> tuple[type[Any], ...]:\ndef mro_for_bases(bases: tuple[type[Any], ...]) -> tuple[type[Any], ...]:\n def merge_seqs(seqs: list[deque[type[Any]]]) -> Iterable[type[Any]]:\ndef get_attribute_from_bases(tp: type[Any] | tuple[type[Any], ...], name: str) -> Any:\ndef get_attribute_from_base_dicts(tp: type[Any], name: str) -> Any:\n def build(model_dc: type[Any]) -> DecoratorInfos: # noqa: C901 (ignore complexity)\ndef inspect_validator(validator: Callable[..., Any], mode: FieldValidatorModes) -> bool:\ndef inspect_field_serializer(\n serializer: Callable[..., Any], mode: Literal['plain', 'wrap'], computed_field: bool = False\n) -> tuple[bool, bool]:\ndef inspect_annotated_serializer(serializer: Callable[..., Any], mode: Literal['plain', 'wrap']) -> bool:\ndef inspect_model_serializer(serializer: Callable[..., Any], mode: Literal['plain', 'wrap']) -> bool:\ndef _serializer_info_arg(mode: Literal['plain', 'wrap'], n_positional: int) -> bool | None:\ndef is_instance_method_from_sig(function: AnyDecoratorCallable) -> bool:\ndef ensure_classmethod_based_on_signature(function: AnyDecoratorCallable) -> Any:\ndef _is_classmethod_from_sig(function: AnyDecoratorCallable) -> bool:\ndef unwrap_wrapped_function(\n func: Any,\n *,\n unwrap_partial: bool = True,\n unwrap_class_static_method: bool = True,\n) -> Any:\ndef get_function_return_type(\n func: Any, explicit_return_type: Any, types_namespace: dict[str, Any] | None = None\n) -> Any:\ndef count_positional_params(sig: Signature) -> int:\ndef can_be_positional(param: Parameter) -> bool:\ndef ensure_property(f: Any) -> Any:"
},
{
"identifier": "_internal_dataclass",
"path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_internal_dataclass.py",
"snippet": ""
},
{
"identifier": "_mock_val_ser",
"path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_mock_val_ser.py",
"snippet": "class MockValSer(Generic[ValSer]):\n def __init__(\n self,\n error_message: str,\n *,\n code: PydanticErrorCodes,\n val_or_ser: Literal['validator', 'serializer'],\n attempt_rebuild: Callable[[], ValSer | None] | None = None,\n ) -> None:\n def __getattr__(self, item: str) -> None:\n def rebuild(self) -> ValSer | None:\ndef set_model_mocks(cls: type[BaseModel], cls_name: str, undefined_name: str = 'all referenced types') -> None:\n def attempt_rebuild_validator() -> SchemaValidator | None:\n def attempt_rebuild_serializer() -> SchemaSerializer | None:\ndef set_dataclass_mock_validator(cls: type[PydanticDataclass], cls_name: str, undefined_name: str) -> None:\n def attempt_rebuild() -> SchemaValidator | None:"
},
{
"identifier": "_schema_generation_shared",
"path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_schema_generation_shared.py",
"snippet": "class GenerateJsonSchemaHandler(GetJsonSchemaHandler):\nclass CallbackGetCoreSchemaHandler(GetCoreSchemaHandler):\n def __init__(self, generate_json_schema: GenerateJsonSchema, handler_override: HandlerOverride | None) -> None:\n def __call__(self, __core_schema: CoreSchemaOrField) -> JsonSchemaValue:\n def resolve_ref_schema(self, maybe_ref_json_schema: JsonSchemaValue) -> JsonSchemaValue:\n def __init__(\n self,\n handler: Callable[[Any], core_schema.CoreSchema],\n generate_schema: GenerateSchema,\n ref_mode: Literal['to-def', 'unpack'] = 'to-def',\n ) -> None:\n def __call__(self, __source_type: Any) -> core_schema.CoreSchema:\n def _get_types_namespace(self) -> dict[str, Any] | None:\n def generate_schema(self, __source_type: Any) -> core_schema.CoreSchema:\n def field_name(self) -> str | None:\n def resolve_ref_schema(self, maybe_ref_schema: core_schema.CoreSchema) -> core_schema.CoreSchema:"
},
{
"identifier": "_typing_extra",
"path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_typing_extra.py",
"snippet": " def origin_is_union(tp: type[Any] | None) -> bool:\n def origin_is_union(tp: type[Any] | None) -> bool:\ndef is_none_type(type_: Any) -> bool:\ndef is_callable_type(type_: type[Any]) -> bool:\ndef is_literal_type(type_: type[Any]) -> bool:\ndef literal_values(type_: type[Any]) -> tuple[Any, ...]:\ndef all_literal_values(type_: type[Any]) -> list[Any]:\ndef is_annotated(ann_type: Any) -> bool:\ndef is_namedtuple(type_: type[Any]) -> bool:\ndef is_new_type(type_: type[Any]) -> bool:\ndef _check_classvar(v: type[Any] | None) -> bool:\ndef is_classvar(ann_type: type[Any]) -> bool:\ndef _check_finalvar(v: type[Any] | None) -> bool:\ndef is_finalvar(ann_type: Any) -> bool:\ndef parent_frame_namespace(*, parent_depth: int = 2) -> dict[str, Any] | None:\ndef add_module_globals(obj: Any, globalns: dict[str, Any] | None = None) -> dict[str, Any]:\ndef get_cls_types_namespace(cls: type[Any], parent_namespace: dict[str, Any] | None = None) -> dict[str, Any]:\ndef get_cls_type_hints_lenient(obj: Any, globalns: dict[str, Any] | None = None) -> dict[str, Any]:\ndef eval_type_lenient(value: Any, globalns: dict[str, Any] | None, localns: dict[str, Any] | None) -> Any:\ndef get_function_type_hints(\n function: Callable[..., Any], *, include_keys: set[str] | None = None, types_namespace: dict[str, Any] | None = None\n) -> dict[str, Any]:\n def _make_forward_ref(\n arg: Any,\n is_argument: bool = True,\n *,\n is_class: bool = False,\n ) -> typing.ForwardRef:\n def get_type_hints( # noqa: C901\n obj: Any,\n globalns: dict[str, Any] | None = None,\n localns: dict[str, Any] | None = None,\n include_extras: bool = False,\n ) -> dict[str, Any]: # pragma: no cover\n def evaluate_fwd_ref(\n ref: ForwardRef, globalns: dict[str, Any] | None = None, localns: dict[str, Any] | None = None\n ) -> Any:\n def evaluate_fwd_ref(\n ref: ForwardRef, globalns: dict[str, Any] | None = None, localns: dict[str, Any] | None = None\n ) -> Any:\ndef is_dataclass(_cls: type[Any]) -> TypeGuard[type[StandardDataclass]]:\ndef origin_is_type_alias_type(origin: Any) -> TypeGuard[TypeAliasType]:\nLITERAL_TYPES: set[Any] = {Literal}\nNONE_TYPES: tuple[Any, ...] = (None, NoneType, *(tp[None] for tp in LITERAL_TYPES))"
},
{
"identifier": "GetJsonSchemaHandler",
"path": "backend/venv/lib/python3.10/site-packages/pydantic/annotated_handlers.py",
"snippet": "class GetJsonSchemaHandler:\n \"\"\"Handler to call into the next JSON schema generation function.\n\n Attributes:\n mode: Json schema mode, can be `validation` or `serialization`.\n \"\"\"\n\n mode: JsonSchemaMode\n\n def __call__(self, __core_schema: CoreSchemaOrField) -> JsonSchemaValue:\n \"\"\"Call the inner handler and get the JsonSchemaValue it returns.\n This will call the next JSON schema modifying function up until it calls\n into `pydantic.json_schema.GenerateJsonSchema`, which will raise a\n `pydantic.errors.PydanticInvalidForJsonSchema` error if it cannot generate\n a JSON schema.\n\n Args:\n __core_schema: A `pydantic_core.core_schema.CoreSchema`.\n\n Returns:\n JsonSchemaValue: The JSON schema generated by the inner JSON schema modify\n functions.\n \"\"\"\n raise NotImplementedError\n\n def resolve_ref_schema(self, __maybe_ref_json_schema: JsonSchemaValue) -> JsonSchemaValue:\n \"\"\"Get the real schema for a `{\"$ref\": ...}` schema.\n If the schema given is not a `$ref` schema, it will be returned as is.\n This means you don't have to check before calling this function.\n\n Args:\n __maybe_ref_json_schema: A JsonSchemaValue, ref based or not.\n\n Raises:\n LookupError: If the ref is not found.\n\n Returns:\n JsonSchemaValue: A JsonSchemaValue that has no `$ref`.\n \"\"\"\n raise NotImplementedError"
},
{
"identifier": "JsonSchemaExtraCallable",
"path": "backend/venv/lib/python3.10/site-packages/pydantic/config.py",
"snippet": "class ConfigDict(TypedDict, total=False):"
},
{
"identifier": "PydanticInvalidForJsonSchema",
"path": "backend/venv/lib/python3.10/site-packages/pydantic/errors.py",
"snippet": "class PydanticInvalidForJsonSchema(PydanticUserError):\n \"\"\"An error raised during failures to generate a JSON schema for some `CoreSchema`.\n\n Attributes:\n message: Description of the error.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message, code='invalid-for-json-schema')"
},
{
"identifier": "PydanticUserError",
"path": "backend/venv/lib/python3.10/site-packages/pydantic/errors.py",
"snippet": "class PydanticUserError(PydanticErrorMixin, TypeError):\n \"\"\"An error raised due to incorrect use of Pydantic.\"\"\""
}
] | import dataclasses
import inspect
import math
import re
import warnings
import pydantic_core
from collections import defaultdict
from copy import deepcopy
from dataclasses import is_dataclass
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Callable,
Counter,
Dict,
Hashable,
Iterable,
List,
NewType,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
from pydantic_core import CoreSchema, PydanticOmit, core_schema, to_jsonable_python
from pydantic_core.core_schema import ComputedField
from typing_extensions import Annotated, Literal, assert_never
from ._internal import (
_config,
_core_metadata,
_core_utils,
_decorators,
_internal_dataclass,
_mock_val_ser,
_schema_generation_shared,
_typing_extra,
)
from .annotated_handlers import GetJsonSchemaHandler
from .config import JsonSchemaExtraCallable
from .errors import PydanticInvalidForJsonSchema, PydanticUserError
from . import ConfigDict
from ._internal._core_utils import CoreSchemaField, CoreSchemaOrField
from ._internal._dataclasses import PydanticDataclass
from ._internal._schema_generation_shared import GetJsonSchemaFunction
from .main import BaseModel | 7,407 | field_json_schema = self.generate_inner(field).copy()
except PydanticOmit:
continue
if 'title' not in field_json_schema and self.field_title_should_be_set(field):
title = self.get_title_from_name(name)
field_json_schema['title'] = title
field_json_schema = self.handle_ref_overrides(field_json_schema)
properties[name] = field_json_schema
if required:
required_fields.append(name)
json_schema = {'type': 'object', 'properties': properties}
if required_fields:
json_schema['required'] = required_fields
return json_schema
def _get_alias_name(self, field: CoreSchemaField, name: str) -> str:
if field['type'] == 'computed-field':
alias: Any = field.get('alias', name)
elif self.mode == 'validation':
alias = field.get('validation_alias', name)
else:
alias = field.get('serialization_alias', name)
if isinstance(alias, str):
name = alias
elif isinstance(alias, list):
alias = cast('list[str] | str', alias)
for path in alias:
if isinstance(path, list) and len(path) == 1 and isinstance(path[0], str):
# Use the first valid single-item string path; the code that constructs the alias array
# should ensure the first such item is what belongs in the JSON schema
name = path[0]
break
else:
assert_never(alias)
return name
def typed_dict_field_schema(self, schema: core_schema.TypedDictField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a typed dict field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['schema'])
def dataclass_field_schema(self, schema: core_schema.DataclassField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a dataclass field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['schema'])
def model_field_schema(self, schema: core_schema.ModelField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a model field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['schema'])
def computed_field_schema(self, schema: core_schema.ComputedField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a computed field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['return_schema'])
def model_schema(self, schema: core_schema.ModelSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a model.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
# We do not use schema['model'].model_json_schema() here
# because it could lead to inconsistent refs handling, etc.
cls = cast('type[BaseModel]', schema['cls'])
config = cls.model_config
title = config.get('title')
with self._config_wrapper_stack.push(config):
json_schema = self.generate_inner(schema['schema'])
json_schema_extra = config.get('json_schema_extra')
if cls.__pydantic_root_model__:
root_json_schema_extra = cls.model_fields['root'].json_schema_extra
if json_schema_extra and root_json_schema_extra:
raise ValueError(
'"model_config[\'json_schema_extra\']" and "Field.json_schema_extra" on "RootModel.root"'
' field must not be set simultaneously'
)
if root_json_schema_extra:
json_schema_extra = root_json_schema_extra
json_schema = self._update_class_schema(json_schema, title, config.get('extra', None), cls, json_schema_extra)
return json_schema
def _update_class_schema(
self,
json_schema: JsonSchemaValue,
title: str | None,
extra: Literal['allow', 'ignore', 'forbid'] | None,
cls: type[Any],
| """
The `json_schema` module contains classes and functions to allow the way [JSON Schema](https://json-schema.org/)
is generated to be customized.
In general you shouldn't need to use this module directly; instead, you can
[`BaseModel.model_json_schema`][pydantic.BaseModel.model_json_schema] and
[`TypeAdapter.json_schema`][pydantic.TypeAdapter.json_schema].
"""
from __future__ import annotations as _annotations
if TYPE_CHECKING:
CoreSchemaOrFieldType = Literal[core_schema.CoreSchemaType, core_schema.CoreSchemaFieldType]
"""
A type alias for defined schema types that represents a union of
`core_schema.CoreSchemaType` and
`core_schema.CoreSchemaFieldType`.
"""
JsonSchemaValue = Dict[str, Any]
"""
A type alias for a JSON schema value. This is a dictionary of string keys to arbitrary values.
"""
JsonSchemaMode = Literal['validation', 'serialization']
"""
A type alias that represents the mode of a JSON schema; either 'validation' or 'serialization'.
For some types, the inputs to validation differ from the outputs of serialization. For example,
computed fields will only be present when serializing, and should not be provided when
validating. This flag provides a way to indicate whether you want the JSON schema required
for validation inputs, or that will be matched by serialization outputs.
"""
_MODE_TITLE_MAPPING: dict[JsonSchemaMode, str] = {'validation': 'Input', 'serialization': 'Output'}
def update_json_schema(schema: JsonSchemaValue, updates: dict[str, Any]) -> JsonSchemaValue:
"""Update a JSON schema by providing a dictionary of updates.
This function sets the provided key-value pairs in the schema and returns the updated schema.
Args:
schema: The JSON schema to update.
updates: A dictionary of key-value pairs to set in the schema.
Returns:
The updated JSON schema.
"""
schema.update(updates)
return schema
JsonSchemaWarningKind = Literal['skipped-choice', 'non-serializable-default']
"""
A type alias representing the kinds of warnings that can be emitted during JSON schema generation.
See [`GenerateJsonSchema.render_warning_message`][pydantic.json_schema.GenerateJsonSchema.render_warning_message]
for more details.
"""
class PydanticJsonSchemaWarning(UserWarning):
"""This class is used to emit warnings produced during JSON schema generation.
See the [`GenerateJsonSchema.emit_warning`][pydantic.json_schema.GenerateJsonSchema.emit_warning] and
[`GenerateJsonSchema.render_warning_message`][pydantic.json_schema.GenerateJsonSchema.render_warning_message]
methods for more details; these can be overridden to control warning behavior.
"""
# ##### JSON Schema Generation #####
DEFAULT_REF_TEMPLATE = '#/$defs/{model}'
"""The default format string used to generate reference names."""
# There are three types of references relevant to building JSON schemas:
# 1. core_schema "ref" values; these are not exposed as part of the JSON schema
# * these might look like the fully qualified path of a model, its id, or something similar
CoreRef = NewType('CoreRef', str)
# 2. keys of the "definitions" object that will eventually go into the JSON schema
# * by default, these look like "MyModel", though may change in the presence of collisions
# * eventually, we may want to make it easier to modify the way these names are generated
DefsRef = NewType('DefsRef', str)
# 3. the values corresponding to the "$ref" key in the schema
# * By default, these look like "#/$defs/MyModel", as in {"$ref": "#/$defs/MyModel"}
JsonRef = NewType('JsonRef', str)
CoreModeRef = Tuple[CoreRef, JsonSchemaMode]
JsonSchemaKeyT = TypeVar('JsonSchemaKeyT', bound=Hashable)
@dataclasses.dataclass(**_internal_dataclass.slots_true)
class _DefinitionsRemapping:
defs_remapping: dict[DefsRef, DefsRef]
json_remapping: dict[JsonRef, JsonRef]
@staticmethod
def from_prioritized_choices(
prioritized_choices: dict[DefsRef, list[DefsRef]],
defs_to_json: dict[DefsRef, JsonRef],
definitions: dict[DefsRef, JsonSchemaValue],
) -> _DefinitionsRemapping:
"""
This function should produce a remapping that replaces complex DefsRef with the simpler ones from the
prioritized_choices such that applying the name remapping would result in an equivalent JSON schema.
"""
# We need to iteratively simplify the definitions until we reach a fixed point.
# The reason for this is that outer definitions may reference inner definitions that get simplified
# into an equivalent reference, and the outer definitions won't be equivalent until we've simplified
# the inner definitions.
copied_definitions = deepcopy(definitions)
definitions_schema = {'$defs': copied_definitions}
for _iter in range(100): # prevent an infinite loop in the case of a bug, 100 iterations should be enough
# For every possible remapped DefsRef, collect all schemas that that DefsRef might be used for:
schemas_for_alternatives: dict[DefsRef, list[JsonSchemaValue]] = defaultdict(list)
for defs_ref in copied_definitions:
alternatives = prioritized_choices[defs_ref]
for alternative in alternatives:
schemas_for_alternatives[alternative].append(copied_definitions[defs_ref])
# Deduplicate the schemas for each alternative; the idea is that we only want to remap to a new DefsRef
# if it introduces no ambiguity, i.e., there is only one distinct schema for that DefsRef.
for defs_ref, schemas in schemas_for_alternatives.items():
schemas_for_alternatives[defs_ref] = _deduplicate_schemas(schemas_for_alternatives[defs_ref])
# Build the remapping
defs_remapping: dict[DefsRef, DefsRef] = {}
json_remapping: dict[JsonRef, JsonRef] = {}
for original_defs_ref in definitions:
alternatives = prioritized_choices[original_defs_ref]
# Pick the first alternative that has only one schema, since that means there is no collision
remapped_defs_ref = next(x for x in alternatives if len(schemas_for_alternatives[x]) == 1)
defs_remapping[original_defs_ref] = remapped_defs_ref
json_remapping[defs_to_json[original_defs_ref]] = defs_to_json[remapped_defs_ref]
remapping = _DefinitionsRemapping(defs_remapping, json_remapping)
new_definitions_schema = remapping.remap_json_schema({'$defs': copied_definitions})
if definitions_schema == new_definitions_schema:
# We've reached the fixed point
return remapping
definitions_schema = new_definitions_schema
raise PydanticInvalidForJsonSchema('Failed to simplify the JSON schema definitions')
def remap_defs_ref(self, ref: DefsRef) -> DefsRef:
return self.defs_remapping.get(ref, ref)
def remap_json_ref(self, ref: JsonRef) -> JsonRef:
return self.json_remapping.get(ref, ref)
def remap_json_schema(self, schema: Any) -> Any:
"""
Recursively update the JSON schema replacing all $refs
"""
if isinstance(schema, str):
# Note: this may not really be a JsonRef; we rely on having no collisions between JsonRefs and other strings
return self.remap_json_ref(JsonRef(schema))
elif isinstance(schema, list):
return [self.remap_json_schema(item) for item in schema]
elif isinstance(schema, dict):
for key, value in schema.items():
if key == '$ref' and isinstance(value, str):
schema['$ref'] = self.remap_json_ref(JsonRef(value))
elif key == '$defs':
schema['$defs'] = {
self.remap_defs_ref(DefsRef(key)): self.remap_json_schema(value)
for key, value in schema['$defs'].items()
}
else:
schema[key] = self.remap_json_schema(value)
return schema
class GenerateJsonSchema:
"""A class for generating JSON schemas.
This class generates JSON schemas based on configured parameters. The default schema dialect
is [https://json-schema.org/draft/2020-12/schema](https://json-schema.org/draft/2020-12/schema).
The class uses `by_alias` to configure how fields with
multiple names are handled and `ref_template` to format reference names.
Attributes:
schema_dialect: The JSON schema dialect used to generate the schema. See
[Declaring a Dialect](https://json-schema.org/understanding-json-schema/reference/schema.html#id4)
in the JSON Schema documentation for more information about dialects.
ignored_warning_kinds: Warnings to ignore when generating the schema. `self.render_warning_message` will
do nothing if its argument `kind` is in `ignored_warning_kinds`;
this value can be modified on subclasses to easily control which warnings are emitted.
by_alias: Whether or not to use field names when generating the schema.
ref_template: The format string used when generating reference names.
core_to_json_refs: A mapping of core refs to JSON refs.
core_to_defs_refs: A mapping of core refs to definition refs.
defs_to_core_refs: A mapping of definition refs to core refs.
json_to_defs_refs: A mapping of JSON refs to definition refs.
definitions: Definitions in the schema.
collisions: Definitions with colliding names. When collisions are detected, we choose a non-colliding
name during generation, but we also track the colliding tag so that it can be remapped for the first
occurrence at the end of the process.
defs_ref_fallbacks: Core refs to fallback definitions refs.
_schema_type_to_method: A mapping of schema types to generator methods.
_used: Set to `True` after generating a schema to avoid re-use issues.
mode: The schema mode.
Args:
by_alias: Whether or not to include field names.
ref_template: The format string to use when generating reference names.
Raises:
JsonSchemaError: If the instance of the class is inadvertently re-used after generating a schema.
"""
schema_dialect = 'https://json-schema.org/draft/2020-12/schema'
# `self.render_warning_message` will do nothing if its argument `kind` is in `ignored_warning_kinds`;
# this value can be modified on subclasses to easily control which warnings are emitted
ignored_warning_kinds: set[JsonSchemaWarningKind] = {'skipped-choice'}
def __init__(self, by_alias: bool = True, ref_template: str = DEFAULT_REF_TEMPLATE):
self.by_alias = by_alias
self.ref_template = ref_template
self.core_to_json_refs: dict[CoreModeRef, JsonRef] = {}
self.core_to_defs_refs: dict[CoreModeRef, DefsRef] = {}
self.defs_to_core_refs: dict[DefsRef, CoreModeRef] = {}
self.json_to_defs_refs: dict[JsonRef, DefsRef] = {}
self.definitions: dict[DefsRef, JsonSchemaValue] = {}
self._config_wrapper_stack = _config.ConfigWrapperStack(_config.ConfigWrapper({}))
self._mode: JsonSchemaMode = 'validation'
# The following includes a mapping of a fully-unique defs ref choice to a list of preferred
# alternatives, which are generally simpler, such as only including the class name.
# At the end of schema generation, we use these to produce a JSON schema with more human-readable
# definitions, which would also work better in a generated OpenAPI client, etc.
self._prioritized_defsref_choices: dict[DefsRef, list[DefsRef]] = {}
self._collision_counter: dict[str, int] = defaultdict(int)
self._collision_index: dict[str, int] = {}
self._schema_type_to_method = self.build_schema_type_to_method()
# When we encounter definitions we need to try to build them immediately
# so that they are available schemas that reference them
# But it's possible that CoreSchema was never going to be used
# (e.g. because the CoreSchema that references short circuits is JSON schema generation without needing
# the reference) so instead of failing altogether if we can't build a definition we
# store the error raised and re-throw it if we end up needing that def
self._core_defs_invalid_for_json_schema: dict[DefsRef, PydanticInvalidForJsonSchema] = {}
# This changes to True after generating a schema, to prevent issues caused by accidental re-use
# of a single instance of a schema generator
self._used = False
@property
def _config(self) -> _config.ConfigWrapper:
return self._config_wrapper_stack.tail
@property
def mode(self) -> JsonSchemaMode:
if self._config.json_schema_mode_override is not None:
return self._config.json_schema_mode_override
else:
return self._mode
def build_schema_type_to_method(
self,
) -> dict[CoreSchemaOrFieldType, Callable[[CoreSchemaOrField], JsonSchemaValue]]:
"""Builds a dictionary mapping fields to methods for generating JSON schemas.
Returns:
A dictionary containing the mapping of `CoreSchemaOrFieldType` to a handler method.
Raises:
TypeError: If no method has been defined for generating a JSON schema for a given pydantic core schema type.
"""
mapping: dict[CoreSchemaOrFieldType, Callable[[CoreSchemaOrField], JsonSchemaValue]] = {}
core_schema_types: list[CoreSchemaOrFieldType] = _typing_extra.all_literal_values(
CoreSchemaOrFieldType # type: ignore
)
for key in core_schema_types:
method_name = f"{key.replace('-', '_')}_schema"
try:
mapping[key] = getattr(self, method_name)
except AttributeError as e: # pragma: no cover
raise TypeError(
f'No method for generating JsonSchema for core_schema.type={key!r} '
f'(expected: {type(self).__name__}.{method_name})'
) from e
return mapping
def generate_definitions(
self, inputs: Sequence[tuple[JsonSchemaKeyT, JsonSchemaMode, core_schema.CoreSchema]]
) -> tuple[dict[tuple[JsonSchemaKeyT, JsonSchemaMode], JsonSchemaValue], dict[DefsRef, JsonSchemaValue]]:
"""Generates JSON schema definitions from a list of core schemas, pairing the generated definitions with a
mapping that links the input keys to the definition references.
Args:
inputs: A sequence of tuples, where:
- The first element is a JSON schema key type.
- The second element is the JSON mode: either 'validation' or 'serialization'.
- The third element is a core schema.
Returns:
A tuple where:
- The first element is a dictionary whose keys are tuples of JSON schema key type and JSON mode, and
whose values are the JSON schema corresponding to that pair of inputs. (These schemas may have
JsonRef references to definitions that are defined in the second returned element.)
- The second element is a dictionary whose keys are definition references for the JSON schemas
from the first returned element, and whose values are the actual JSON schema definitions.
Raises:
PydanticUserError: Raised if the JSON schema generator has already been used to generate a JSON schema.
"""
if self._used:
raise PydanticUserError(
'This JSON schema generator has already been used to generate a JSON schema. '
f'You must create a new instance of {type(self).__name__} to generate a new JSON schema.',
code='json-schema-already-used',
)
for key, mode, schema in inputs:
self._mode = mode
self.generate_inner(schema)
definitions_remapping = self._build_definitions_remapping()
json_schemas_map: dict[tuple[JsonSchemaKeyT, JsonSchemaMode], DefsRef] = {}
for key, mode, schema in inputs:
self._mode = mode
json_schema = self.generate_inner(schema)
json_schemas_map[(key, mode)] = definitions_remapping.remap_json_schema(json_schema)
json_schema = {'$defs': self.definitions}
json_schema = definitions_remapping.remap_json_schema(json_schema)
self._used = True
return json_schemas_map, _sort_json_schema(json_schema['$defs']) # type: ignore
def generate(self, schema: CoreSchema, mode: JsonSchemaMode = 'validation') -> JsonSchemaValue:
"""Generates a JSON schema for a specified schema in a specified mode.
Args:
schema: A Pydantic model.
mode: The mode in which to generate the schema. Defaults to 'validation'.
Returns:
A JSON schema representing the specified schema.
Raises:
PydanticUserError: If the JSON schema generator has already been used to generate a JSON schema.
"""
self._mode = mode
if self._used:
raise PydanticUserError(
'This JSON schema generator has already been used to generate a JSON schema. '
f'You must create a new instance of {type(self).__name__} to generate a new JSON schema.',
code='json-schema-already-used',
)
json_schema: JsonSchemaValue = self.generate_inner(schema)
json_ref_counts = self.get_json_ref_counts(json_schema)
# Remove the top-level $ref if present; note that the _generate method already ensures there are no sibling keys
ref = cast(JsonRef, json_schema.get('$ref'))
while ref is not None: # may need to unpack multiple levels
ref_json_schema = self.get_schema_from_definitions(ref)
if json_ref_counts[ref] > 1 or ref_json_schema is None:
# Keep the ref, but use an allOf to remove the top level $ref
json_schema = {'allOf': [{'$ref': ref}]}
else:
# "Unpack" the ref since this is the only reference
json_schema = ref_json_schema.copy() # copy to prevent recursive dict reference
json_ref_counts[ref] -= 1
ref = cast(JsonRef, json_schema.get('$ref'))
self._garbage_collect_definitions(json_schema)
definitions_remapping = self._build_definitions_remapping()
if self.definitions:
json_schema['$defs'] = self.definitions
json_schema = definitions_remapping.remap_json_schema(json_schema)
# For now, we will not set the $schema key. However, if desired, this can be easily added by overriding
# this method and adding the following line after a call to super().generate(schema):
# json_schema['$schema'] = self.schema_dialect
self._used = True
return _sort_json_schema(json_schema)
def generate_inner(self, schema: CoreSchemaOrField) -> JsonSchemaValue: # noqa: C901
"""Generates a JSON schema for a given core schema.
Args:
schema: The given core schema.
Returns:
The generated JSON schema.
"""
# If a schema with the same CoreRef has been handled, just return a reference to it
# Note that this assumes that it will _never_ be the case that the same CoreRef is used
# on types that should have different JSON schemas
if 'ref' in schema:
core_ref = CoreRef(schema['ref']) # type: ignore[typeddict-item]
core_mode_ref = (core_ref, self.mode)
if core_mode_ref in self.core_to_defs_refs and self.core_to_defs_refs[core_mode_ref] in self.definitions:
return {'$ref': self.core_to_json_refs[core_mode_ref]}
# Generate the JSON schema, accounting for the json_schema_override and core_schema_override
metadata_handler = _core_metadata.CoreMetadataHandler(schema)
def populate_defs(core_schema: CoreSchema, json_schema: JsonSchemaValue) -> JsonSchemaValue:
if 'ref' in core_schema:
core_ref = CoreRef(core_schema['ref']) # type: ignore[typeddict-item]
defs_ref, ref_json_schema = self.get_cache_defs_ref_schema(core_ref)
json_ref = JsonRef(ref_json_schema['$ref'])
self.json_to_defs_refs[json_ref] = defs_ref
# Replace the schema if it's not a reference to itself
# What we want to avoid is having the def be just a ref to itself
# which is what would happen if we blindly assigned any
if json_schema.get('$ref', None) != json_ref:
self.definitions[defs_ref] = json_schema
self._core_defs_invalid_for_json_schema.pop(defs_ref, None)
json_schema = ref_json_schema
return json_schema
def convert_to_all_of(json_schema: JsonSchemaValue) -> JsonSchemaValue:
if '$ref' in json_schema and len(json_schema.keys()) > 1:
# technically you can't have any other keys next to a "$ref"
# but it's an easy mistake to make and not hard to correct automatically here
json_schema = json_schema.copy()
ref = json_schema.pop('$ref')
json_schema = {'allOf': [{'$ref': ref}], **json_schema}
return json_schema
def handler_func(schema_or_field: CoreSchemaOrField) -> JsonSchemaValue:
"""Generate a JSON schema based on the input schema.
Args:
schema_or_field: The core schema to generate a JSON schema from.
Returns:
The generated JSON schema.
Raises:
TypeError: If an unexpected schema type is encountered.
"""
# Generate the core-schema-type-specific bits of the schema generation:
json_schema: JsonSchemaValue | None = None
if self.mode == 'serialization' and 'serialization' in schema_or_field:
ser_schema = schema_or_field['serialization'] # type: ignore
json_schema = self.ser_schema(ser_schema)
if json_schema is None:
if _core_utils.is_core_schema(schema_or_field) or _core_utils.is_core_schema_field(schema_or_field):
generate_for_schema_type = self._schema_type_to_method[schema_or_field['type']]
json_schema = generate_for_schema_type(schema_or_field)
else:
raise TypeError(f'Unexpected schema type: schema={schema_or_field}')
if _core_utils.is_core_schema(schema_or_field):
json_schema = populate_defs(schema_or_field, json_schema)
json_schema = convert_to_all_of(json_schema)
return json_schema
current_handler = _schema_generation_shared.GenerateJsonSchemaHandler(self, handler_func)
for js_modify_function in metadata_handler.metadata.get('pydantic_js_functions', ()):
def new_handler_func(
schema_or_field: CoreSchemaOrField,
current_handler: GetJsonSchemaHandler = current_handler,
js_modify_function: GetJsonSchemaFunction = js_modify_function,
) -> JsonSchemaValue:
json_schema = js_modify_function(schema_or_field, current_handler)
if _core_utils.is_core_schema(schema_or_field):
json_schema = populate_defs(schema_or_field, json_schema)
original_schema = current_handler.resolve_ref_schema(json_schema)
ref = json_schema.pop('$ref', None)
if ref and json_schema:
original_schema.update(json_schema)
return original_schema
current_handler = _schema_generation_shared.GenerateJsonSchemaHandler(self, new_handler_func)
for js_modify_function in metadata_handler.metadata.get('pydantic_js_annotation_functions', ()):
def new_handler_func(
schema_or_field: CoreSchemaOrField,
current_handler: GetJsonSchemaHandler = current_handler,
js_modify_function: GetJsonSchemaFunction = js_modify_function,
) -> JsonSchemaValue:
json_schema = js_modify_function(schema_or_field, current_handler)
if _core_utils.is_core_schema(schema_or_field):
json_schema = populate_defs(schema_or_field, json_schema)
json_schema = convert_to_all_of(json_schema)
return json_schema
current_handler = _schema_generation_shared.GenerateJsonSchemaHandler(self, new_handler_func)
json_schema = current_handler(schema)
if _core_utils.is_core_schema(schema):
json_schema = populate_defs(schema, json_schema)
json_schema = convert_to_all_of(json_schema)
return json_schema
# ### Schema generation methods
def any_schema(self, schema: core_schema.AnySchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches any value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return {}
def none_schema(self, schema: core_schema.NoneSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a None value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return {'type': 'null'}
def bool_schema(self, schema: core_schema.BoolSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a bool value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return {'type': 'boolean'}
def int_schema(self, schema: core_schema.IntSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches an Int value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema: dict[str, Any] = {'type': 'integer'}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.numeric)
json_schema = {k: v for k, v in json_schema.items() if v not in {math.inf, -math.inf}}
return json_schema
def float_schema(self, schema: core_schema.FloatSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a float value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema: dict[str, Any] = {'type': 'number'}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.numeric)
json_schema = {k: v for k, v in json_schema.items() if v not in {math.inf, -math.inf}}
return json_schema
def decimal_schema(self, schema: core_schema.DecimalSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a decimal value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema = self.str_schema(core_schema.str_schema())
if self.mode == 'validation':
multiple_of = schema.get('multiple_of')
le = schema.get('le')
ge = schema.get('ge')
lt = schema.get('lt')
gt = schema.get('gt')
json_schema = {
'anyOf': [
self.float_schema(
core_schema.float_schema(
allow_inf_nan=schema.get('allow_inf_nan'),
multiple_of=None if multiple_of is None else float(multiple_of),
le=None if le is None else float(le),
ge=None if ge is None else float(ge),
lt=None if lt is None else float(lt),
gt=None if gt is None else float(gt),
)
),
json_schema,
],
}
return json_schema
def str_schema(self, schema: core_schema.StringSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a string value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema = {'type': 'string'}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.string)
return json_schema
def bytes_schema(self, schema: core_schema.BytesSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a bytes value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema = {'type': 'string', 'format': 'base64url' if self._config.ser_json_bytes == 'base64' else 'binary'}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.bytes)
return json_schema
def date_schema(self, schema: core_schema.DateSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a date value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema = {'type': 'string', 'format': 'date'}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.date)
return json_schema
def time_schema(self, schema: core_schema.TimeSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a time value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return {'type': 'string', 'format': 'time'}
def datetime_schema(self, schema: core_schema.DatetimeSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a datetime value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return {'type': 'string', 'format': 'date-time'}
def timedelta_schema(self, schema: core_schema.TimedeltaSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a timedelta value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
if self._config.ser_json_timedelta == 'float':
return {'type': 'number'}
return {'type': 'string', 'format': 'duration'}
def literal_schema(self, schema: core_schema.LiteralSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a literal value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
expected = [v.value if isinstance(v, Enum) else v for v in schema['expected']]
# jsonify the expected values
expected = [to_jsonable_python(v) for v in expected]
if len(expected) == 1:
return {'const': expected[0]}
types = {type(e) for e in expected}
if types == {str}:
return {'enum': expected, 'type': 'string'}
elif types == {int}:
return {'enum': expected, 'type': 'integer'}
elif types == {float}:
return {'enum': expected, 'type': 'number'}
elif types == {bool}:
return {'enum': expected, 'type': 'boolean'}
elif types == {list}:
return {'enum': expected, 'type': 'array'}
# there is not None case because if it's mixed it hits the final `else`
# if it's a single Literal[None] then it becomes a `const` schema above
else:
return {'enum': expected}
def is_instance_schema(self, schema: core_schema.IsInstanceSchema) -> JsonSchemaValue:
"""Generates a JSON schema that checks if a value is an instance of a class, equivalent to Python's
`isinstance` method.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.handle_invalid_for_json_schema(schema, f'core_schema.IsInstanceSchema ({schema["cls"]})')
def is_subclass_schema(self, schema: core_schema.IsSubclassSchema) -> JsonSchemaValue:
"""Generates a JSON schema that checks if a value is a subclass of a class, equivalent to Python's `issubclass`
method.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
# Note: This is for compatibility with V1; you can override if you want different behavior.
return {}
def callable_schema(self, schema: core_schema.CallableSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a callable value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.handle_invalid_for_json_schema(schema, 'core_schema.CallableSchema')
def list_schema(self, schema: core_schema.ListSchema) -> JsonSchemaValue:
"""Returns a schema that matches a list schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
items_schema = {} if 'items_schema' not in schema else self.generate_inner(schema['items_schema'])
json_schema = {'type': 'array', 'items': items_schema}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.array)
return json_schema
def tuple_positional_schema(self, schema: core_schema.TuplePositionalSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a positional tuple schema e.g. `Tuple[int, str, bool]`.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema: JsonSchemaValue = {'type': 'array'}
json_schema['minItems'] = len(schema['items_schema'])
prefixItems = [self.generate_inner(item) for item in schema['items_schema']]
if prefixItems:
json_schema['prefixItems'] = prefixItems
if 'extras_schema' in schema:
json_schema['items'] = self.generate_inner(schema['extras_schema'])
else:
json_schema['maxItems'] = len(schema['items_schema'])
self.update_with_validations(json_schema, schema, self.ValidationsMapping.array)
return json_schema
def tuple_variable_schema(self, schema: core_schema.TupleVariableSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a variable tuple schema e.g. `Tuple[int, ...]`.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
items_schema = {} if 'items_schema' not in schema else self.generate_inner(schema['items_schema'])
json_schema = {'type': 'array', 'items': items_schema}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.array)
return json_schema
def set_schema(self, schema: core_schema.SetSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a set schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self._common_set_schema(schema)
def frozenset_schema(self, schema: core_schema.FrozenSetSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a frozenset schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self._common_set_schema(schema)
def _common_set_schema(self, schema: core_schema.SetSchema | core_schema.FrozenSetSchema) -> JsonSchemaValue:
items_schema = {} if 'items_schema' not in schema else self.generate_inner(schema['items_schema'])
json_schema = {'type': 'array', 'uniqueItems': True, 'items': items_schema}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.array)
return json_schema
def generator_schema(self, schema: core_schema.GeneratorSchema) -> JsonSchemaValue:
"""Returns a JSON schema that represents the provided GeneratorSchema.
Args:
schema: The schema.
Returns:
The generated JSON schema.
"""
items_schema = {} if 'items_schema' not in schema else self.generate_inner(schema['items_schema'])
json_schema = {'type': 'array', 'items': items_schema}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.array)
return json_schema
def dict_schema(self, schema: core_schema.DictSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a dict schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema: JsonSchemaValue = {'type': 'object'}
keys_schema = self.generate_inner(schema['keys_schema']).copy() if 'keys_schema' in schema else {}
keys_pattern = keys_schema.pop('pattern', None)
values_schema = self.generate_inner(schema['values_schema']).copy() if 'values_schema' in schema else {}
values_schema.pop('title', None) # don't give a title to the additionalProperties
if values_schema or keys_pattern is not None: # don't add additionalProperties if it's empty
if keys_pattern is None:
json_schema['additionalProperties'] = values_schema
else:
json_schema['patternProperties'] = {keys_pattern: values_schema}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.object)
return json_schema
def _function_schema(
self,
schema: _core_utils.AnyFunctionSchema,
) -> JsonSchemaValue:
if _core_utils.is_function_with_inner_schema(schema):
# This could be wrong if the function's mode is 'before', but in practice will often be right, and when it
# isn't, I think it would be hard to automatically infer what the desired schema should be.
return self.generate_inner(schema['schema'])
# function-plain
return self.handle_invalid_for_json_schema(
schema, f'core_schema.PlainValidatorFunctionSchema ({schema["function"]})'
)
def function_before_schema(self, schema: core_schema.BeforeValidatorFunctionSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a function-before schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self._function_schema(schema)
def function_after_schema(self, schema: core_schema.AfterValidatorFunctionSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a function-after schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self._function_schema(schema)
def function_plain_schema(self, schema: core_schema.PlainValidatorFunctionSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a function-plain schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self._function_schema(schema)
def function_wrap_schema(self, schema: core_schema.WrapValidatorFunctionSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a function-wrap schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self._function_schema(schema)
def default_schema(self, schema: core_schema.WithDefaultSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema with a default value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema = self.generate_inner(schema['schema'])
if 'default' not in schema:
return json_schema
default = schema['default']
# Note: if you want to include the value returned by the default_factory,
# override this method and replace the code above with:
# if 'default' in schema:
# default = schema['default']
# elif 'default_factory' in schema:
# default = schema['default_factory']()
# else:
# return json_schema
try:
encoded_default = self.encode_default(default)
except pydantic_core.PydanticSerializationError:
self.emit_warning(
'non-serializable-default',
f'Default value {default} is not JSON serializable; excluding default from JSON schema',
)
# Return the inner schema, as though there was no default
return json_schema
if '$ref' in json_schema:
# Since reference schemas do not support child keys, we wrap the reference schema in a single-case allOf:
return {'allOf': [json_schema], 'default': encoded_default}
else:
json_schema['default'] = encoded_default
return json_schema
def nullable_schema(self, schema: core_schema.NullableSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that allows null values.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
null_schema = {'type': 'null'}
inner_json_schema = self.generate_inner(schema['schema'])
if inner_json_schema == null_schema:
return null_schema
else:
# Thanks to the equality check against `null_schema` above, I think 'oneOf' would also be valid here;
# I'll use 'anyOf' for now, but it could be changed it if it would work better with some external tooling
return self.get_flattened_anyof([inner_json_schema, null_schema])
def union_schema(self, schema: core_schema.UnionSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that allows values matching any of the given schemas.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
generated: list[JsonSchemaValue] = []
choices = schema['choices']
for choice in choices:
# choice will be a tuple if an explicit label was provided
choice_schema = choice[0] if isinstance(choice, tuple) else choice
try:
generated.append(self.generate_inner(choice_schema))
except PydanticOmit:
continue
except PydanticInvalidForJsonSchema as exc:
self.emit_warning('skipped-choice', exc.message)
if len(generated) == 1:
return generated[0]
return self.get_flattened_anyof(generated)
def tagged_union_schema(self, schema: core_schema.TaggedUnionSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that allows values matching any of the given schemas, where
the schemas are tagged with a discriminator field that indicates which schema should be used to validate
the value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
generated: dict[str, JsonSchemaValue] = {}
for k, v in schema['choices'].items():
if isinstance(k, Enum):
k = k.value
try:
# Use str(k) since keys must be strings for json; while not technically correct,
# it's the closest that can be represented in valid JSON
generated[str(k)] = self.generate_inner(v).copy()
except PydanticOmit:
continue
except PydanticInvalidForJsonSchema as exc:
self.emit_warning('skipped-choice', exc.message)
one_of_choices = _deduplicate_schemas(generated.values())
json_schema: JsonSchemaValue = {'oneOf': one_of_choices}
# This reflects the v1 behavior; TODO: we should make it possible to exclude OpenAPI stuff from the JSON schema
openapi_discriminator = self._extract_discriminator(schema, one_of_choices)
if openapi_discriminator is not None:
json_schema['discriminator'] = {
'propertyName': openapi_discriminator,
'mapping': {k: v.get('$ref', v) for k, v in generated.items()},
}
return json_schema
def _extract_discriminator(
self, schema: core_schema.TaggedUnionSchema, one_of_choices: list[_JsonDict]
) -> str | None:
"""Extract a compatible OpenAPI discriminator from the schema and one_of choices that end up in the final
schema."""
openapi_discriminator: str | None = None
if isinstance(schema['discriminator'], str):
return schema['discriminator']
if isinstance(schema['discriminator'], list):
# If the discriminator is a single item list containing a string, that is equivalent to the string case
if len(schema['discriminator']) == 1 and isinstance(schema['discriminator'][0], str):
return schema['discriminator'][0]
# When an alias is used that is different from the field name, the discriminator will be a list of single
# str lists, one for the attribute and one for the actual alias. The logic here will work even if there is
# more than one possible attribute, and looks for whether a single alias choice is present as a documented
# property on all choices. If so, that property will be used as the OpenAPI discriminator.
for alias_path in schema['discriminator']:
if not isinstance(alias_path, list):
break # this means that the discriminator is not a list of alias paths
if len(alias_path) != 1:
continue # this means that the "alias" does not represent a single field
alias = alias_path[0]
if not isinstance(alias, str):
continue # this means that the "alias" does not represent a field
alias_is_present_on_all_choices = True
for choice in one_of_choices:
while '$ref' in choice:
assert isinstance(choice['$ref'], str)
choice = self.get_schema_from_definitions(JsonRef(choice['$ref'])) or {}
properties = choice.get('properties', {})
if not isinstance(properties, dict) or alias not in properties:
alias_is_present_on_all_choices = False
break
if alias_is_present_on_all_choices:
openapi_discriminator = alias
break
return openapi_discriminator
def chain_schema(self, schema: core_schema.ChainSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a core_schema.ChainSchema.
When generating a schema for validation, we return the validation JSON schema for the first step in the chain.
For serialization, we return the serialization JSON schema for the last step in the chain.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
step_index = 0 if self.mode == 'validation' else -1 # use first step for validation, last for serialization
return self.generate_inner(schema['steps'][step_index])
def lax_or_strict_schema(self, schema: core_schema.LaxOrStrictSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that allows values matching either the lax schema or the
strict schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
# TODO: Need to read the default value off of model config or whatever
use_strict = schema.get('strict', False) # TODO: replace this default False
# If your JSON schema fails to generate it is probably
# because one of the following two branches failed.
if use_strict:
return self.generate_inner(schema['strict_schema'])
else:
return self.generate_inner(schema['lax_schema'])
def json_or_python_schema(self, schema: core_schema.JsonOrPythonSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that allows values matching either the JSON schema or the
Python schema.
The JSON schema is used instead of the Python schema. If you want to use the Python schema, you should override
this method.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['json_schema'])
def typed_dict_schema(self, schema: core_schema.TypedDictSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a typed dict.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
total = schema.get('total', True)
named_required_fields: list[tuple[str, bool, CoreSchemaField]] = [
(name, self.field_is_required(field, total), field)
for name, field in schema['fields'].items()
if self.field_is_present(field)
]
if self.mode == 'serialization':
named_required_fields.extend(self._name_required_computed_fields(schema.get('computed_fields', [])))
config = _get_typed_dict_config(schema)
with self._config_wrapper_stack.push(config):
json_schema = self._named_required_fields_schema(named_required_fields)
extra = config.get('extra', 'ignore')
if extra == 'forbid':
json_schema['additionalProperties'] = False
elif extra == 'allow':
json_schema['additionalProperties'] = True
return json_schema
@staticmethod
def _name_required_computed_fields(
computed_fields: list[ComputedField],
) -> list[tuple[str, bool, core_schema.ComputedField]]:
return [(field['property_name'], True, field) for field in computed_fields]
def _named_required_fields_schema(
self, named_required_fields: Sequence[tuple[str, bool, CoreSchemaField]]
) -> JsonSchemaValue:
properties: dict[str, JsonSchemaValue] = {}
required_fields: list[str] = []
for name, required, field in named_required_fields:
if self.by_alias:
name = self._get_alias_name(field, name)
try:
field_json_schema = self.generate_inner(field).copy()
except PydanticOmit:
continue
if 'title' not in field_json_schema and self.field_title_should_be_set(field):
title = self.get_title_from_name(name)
field_json_schema['title'] = title
field_json_schema = self.handle_ref_overrides(field_json_schema)
properties[name] = field_json_schema
if required:
required_fields.append(name)
json_schema = {'type': 'object', 'properties': properties}
if required_fields:
json_schema['required'] = required_fields
return json_schema
def _get_alias_name(self, field: CoreSchemaField, name: str) -> str:
if field['type'] == 'computed-field':
alias: Any = field.get('alias', name)
elif self.mode == 'validation':
alias = field.get('validation_alias', name)
else:
alias = field.get('serialization_alias', name)
if isinstance(alias, str):
name = alias
elif isinstance(alias, list):
alias = cast('list[str] | str', alias)
for path in alias:
if isinstance(path, list) and len(path) == 1 and isinstance(path[0], str):
# Use the first valid single-item string path; the code that constructs the alias array
# should ensure the first such item is what belongs in the JSON schema
name = path[0]
break
else:
assert_never(alias)
return name
def typed_dict_field_schema(self, schema: core_schema.TypedDictField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a typed dict field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['schema'])
def dataclass_field_schema(self, schema: core_schema.DataclassField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a dataclass field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['schema'])
def model_field_schema(self, schema: core_schema.ModelField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a model field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['schema'])
def computed_field_schema(self, schema: core_schema.ComputedField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a computed field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['return_schema'])
def model_schema(self, schema: core_schema.ModelSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a model.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
# We do not use schema['model'].model_json_schema() here
# because it could lead to inconsistent refs handling, etc.
cls = cast('type[BaseModel]', schema['cls'])
config = cls.model_config
title = config.get('title')
with self._config_wrapper_stack.push(config):
json_schema = self.generate_inner(schema['schema'])
json_schema_extra = config.get('json_schema_extra')
if cls.__pydantic_root_model__:
root_json_schema_extra = cls.model_fields['root'].json_schema_extra
if json_schema_extra and root_json_schema_extra:
raise ValueError(
'"model_config[\'json_schema_extra\']" and "Field.json_schema_extra" on "RootModel.root"'
' field must not be set simultaneously'
)
if root_json_schema_extra:
json_schema_extra = root_json_schema_extra
json_schema = self._update_class_schema(json_schema, title, config.get('extra', None), cls, json_schema_extra)
return json_schema
def _update_class_schema(
self,
json_schema: JsonSchemaValue,
title: str | None,
extra: Literal['allow', 'ignore', 'forbid'] | None,
cls: type[Any], | json_schema_extra: dict[str, Any] | JsonSchemaExtraCallable | None, | 9 | 2023-10-23 18:09:28+00:00 | 12k |
f0uriest/quadax | tests/test_adaptive.py | [
{
"identifier": "romberg",
"path": "quadax/romberg.py",
"snippet": "def romberg(\n fun,\n interval,\n args=(),\n full_output=False,\n epsabs=1.4e-8,\n epsrel=1.4e-8,\n divmax=20,\n norm=jnp.inf,\n):\n \"\"\"Romberg integration of a callable function or method.\n\n Returns the integral of `fun` (a function of one variable) over `interval`.\n\n Good for non-smooth or piecewise smooth integrands.\n\n Not recommended for infinite intervals, or functions with singularities.\n\n Parameters\n ----------\n fun : callable\n Function to integrate, should have a signature of the form\n ``fun(x, *args)`` -> float, Array. Should be JAX transformable.\n interval : array-like\n Lower and upper limits of integration. Use np.inf to denote infinite intervals.\n args : tuple\n additional arguments passed to fun\n full_output : bool, optional\n If True, return the full state of the integrator. See below for more\n information.\n epsabs, epsrel : float\n Absolute and relative tolerances. If I1 and I2 are two\n successive approximations to the integral, algorithm terminates\n when abs(I1-I2) < max(epsabs, epsrel*|I2|)\n divmax : int, optional\n Maximum order of extrapolation. Default is 20.\n Total number of function evaluations will be at\n most 2**divmax + 1\n norm : int, callable\n Norm to use for measuring error for vector valued integrands. No effect if the\n integrand is scalar valued. If an int, uses p-norm of the given order, otherwise\n should be callable.\n\n Returns\n -------\n y : float, Array\n Approximation to the integral\n info : QuadratureInfo\n Named tuple with the following fields:\n\n * err : (float) Estimate of the error in the approximation.\n * neval : (int) Total number of function evaluations.\n * status : (int) Flag indicating reason for termination. status of 0 means\n normal termination, any other value indicates a possible error. A human\n readable message can be obtained by ``print(quadax.STATUS[status])``\n * info : (dict or None) Other information returned by the algorithm.\n Only present if ``full_output`` is True. Contains the following:\n\n * table : (ndarray, size(dixmax+1, divmax+1, ...)) Estimate of the integral\n from each level of discretization and each step of extrapolation.\n\n Notes\n -----\n Due to limitations on dynamically sized arrays in JAX, this algorithm is fully\n sequential and does not vectorize integrand evaluations, so may not be the most\n efficient on GPU/TPU.\n\n Also, it is currently only forward mode differentiable.\n\n \"\"\"\n errorif(\n len(interval) != 2,\n NotImplementedError,\n \"Romberg integration with breakpoints not supported\",\n )\n _norm = norm if callable(norm) else lambda x: jnp.linalg.norm(x.flatten(), ord=norm)\n # map a, b -> [-1, 1]\n fun, interval = map_interval(fun, interval)\n vfunc = wrap_func(fun, args)\n a, b = interval\n f = jax.eval_shape(vfunc, (a + b / 2))\n\n result = jnp.zeros((divmax + 1, divmax + 1, *f.shape), f.dtype)\n result = result.at[0, 0].set(vfunc(a) + vfunc(b))\n neval = 2\n err = jnp.inf\n state = (result, 1, neval, err)\n\n def ncond(state):\n result, n, neval, err = state\n return (n < divmax + 1) & (\n err > jnp.maximum(epsabs, epsrel * _norm(result[n, n]))\n )\n\n def nloop(state):\n # loop over outer number of subdivisions\n result, n, neval, err = state\n h = (b - a) / 2**n\n s = jnp.zeros(f.shape, f.dtype)\n\n def sloop(i, s):\n # loop to evaluate fun. Can't be vectorized due to different number\n # of evals per nloop step\n s += vfunc(a + h * (2 * i - 1))\n return s\n\n result = result.at[n, 0].set(\n 0.5 * result[n - 1, 0]\n + h * jax.lax.fori_loop(1, (2**n) // 2 + 1, sloop, s)\n )\n neval += (2**n) // 2\n\n def mloop(m, result):\n # richardson extrapolation\n temp = 1 / (4.0**m - 1.0) * (result[n, m - 1] - result[n - 1, m - 1])\n result = result.at[n, m].set(result[n, m - 1] + temp)\n return result\n\n result = jax.lax.fori_loop(1, n + 1, mloop, result)\n err = _norm(result[n, n] - result[n - 1, n - 1])\n return result, n + 1, neval, err\n\n result, n, neval, err = bounded_while_loop(ncond, nloop, state, divmax + 1)\n\n y = result[n - 1, n - 1]\n status = 2 * (err > jnp.maximum(epsabs, epsrel * _norm(y)))\n info = result if full_output else None\n out = QuadratureInfo(err, neval, status, info)\n return y, out"
},
{
"identifier": "rombergts",
"path": "quadax/romberg.py",
"snippet": "def rombergts(\n fun,\n interval,\n args=(),\n full_output=False,\n epsabs=1.4e-8,\n epsrel=1.4e-8,\n divmax=20,\n norm=jnp.inf,\n):\n \"\"\"Romberg integration with tanh-sinh (aka double exponential) transformation.\n\n Returns the integral of `fun` (a function of one variable) over `interval`.\n\n Performs well for functions with singularities at the endpoints or integration\n over infinite intervals. May be slightly less efficient than ``quadgk`` or\n ``quadcc`` for smooth integrands.\n\n Parameters\n ----------\n fun : callable\n Function to integrate, should have a signature of the form\n ``fun(x, *args)`` -> float, Array. Should be JAX transformable.\n interval : array-like\n Lower and upper limits of integration. Use np.inf to denote infinite intervals.\n args : tuple\n additional arguments passed to fun\n full_output : bool, optional\n If True, return the full state of the integrator. See below for more\n information.\n epsabs, epsrel : float\n Absolute and relative tolerances. If I1 and I2 are two\n successive approximations to the integral, algorithm terminates\n when abs(I1-I2) < max(epsabs, epsrel*|I2|)\n divmax : int, optional\n Maximum order of extrapolation. Default is 20.\n Total number of function evaluations will be at\n most 2**divmax + 1\n norm : int, callable\n Norm to use for measuring error for vector valued integrands. No effect if the\n integrand is scalar valued. If an int, uses p-norm of the given order, otherwise\n should be callable.\n\n\n Returns\n -------\n y : float, Array\n Approximation to the integral\n info : QuadratureInfo\n Named tuple with the following fields:\n\n * err : (float) Estimate of the error in the approximation.\n * neval : (int) Total number of function evaluations.\n * status : (int) Flag indicating reason for termination. status of 0 means\n normal termination, any other value indicates a possible error. A human\n readable message can be obtained by ``print(quadax.STATUS[status])``\n * info : (dict or None) Other information returned by the algorithm.\n Only present if ``full_output`` is True. Contains the following:\n\n * table : (ndarray, size(dixmax+1, divmax+1, ...)) Estimate of the integral\n from each level of discretization and each step of extrapolation.\n\n Notes\n -----\n Due to limitations on dynamically sized arrays in JAX, this algorithm is fully\n sequential and does not vectorize integrand evaluations, so may not be the most\n efficient on GPU/TPU.\n\n Also, it is currently only forward mode differentiable.\n\n \"\"\"\n fun, interval = tanhsinh_transform(fun, interval)\n return romberg(fun, interval, args, full_output, epsabs, epsrel, divmax, norm)"
},
{
"identifier": "quadcc",
"path": "quadax/adaptive.py",
"snippet": "def quadcc(\n fun,\n interval,\n args=(),\n full_output=False,\n epsabs=1.4e-8,\n epsrel=1.4e-8,\n max_ninter=50,\n order=32,\n norm=jnp.inf,\n):\n \"\"\"Global adaptive quadrature using Clenshaw-Curtis rule.\n\n Integrate fun from `interval[0]` to `interval[-1]` using a h-adaptive scheme with\n error estimate. Breakpoints can be specified in `interval` where integration\n difficulty may occur.\n\n A good general purpose integrator for most reasonably well behaved functions over\n finite or infinite intervals.\n\n Parameters\n ----------\n fun : callable\n Function to integrate, should have a signature of the form\n ``fun(x, *args)`` -> float, Array. Should be JAX transformable.\n interval : array-like\n Lower and upper limits of integration with possible breakpoints. Use np.inf to\n denote infinite intervals.\n args : tuple, optional\n Extra arguments passed to fun.\n full_output : bool, optional\n If True, return the full state of the integrator. See below for more\n information.\n epsabs, epsrel : float, optional\n Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to\n obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``\n where ``i`` = integral of `fun` over `interval`, and ``result`` is the\n numerical approximation.\n max_ninter : int, optional\n An upper bound on the number of sub-intervals used in the adaptive\n algorithm.\n order : {8, 16, 32, 64, 128, 256}\n Order of local integration rule.\n norm : int, callable\n Norm to use for measuring error for vector valued integrands. No effect if the\n integrand is scalar valued. If an int, uses p-norm of the given order, otherwise\n should be callable.\n\n Returns\n -------\n y : float, Array\n The integral of fun from `a` to `b`.\n info : QuadratureInfo\n Named tuple with the following fields:\n\n * err : (float) Estimate of the error in the approximation.\n * neval : (int) Total number of function evaluations.\n * status : (int) Flag indicating reason for termination. status of 0 means\n normal termination, any other value indicates a possible error. A human\n readable message can be obtained by ``print(quadax.STATUS[status])``\n * info : (dict or None) Other information returned by the algorithm.\n Only present if ``full_output`` is True. Contains the following:\n\n * 'ninter' : (int) The number, K, of sub-intervals produced in the\n subdivision process.\n * 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the left end points of the (remapped) sub-intervals\n in the partition of the integration range.\n * 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the right end points of the (remapped) sub-intervals.\n * 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the integral approximations on the sub-intervals.\n * 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the moduli of the absolute error estimates on the\n sub-intervals.\n\n Notes\n -----\n Adaptive algorithms are inherently somewhat sequential, so perfect parallelism\n is generally not achievable. The local quadrature rule vmaps integrand evaluation at\n ``order`` points, so using higher order methods will generally be more efficient on\n GPU/TPU.\n\n \"\"\"\n y, info = adaptive_quadrature(\n fixed_quadcc,\n fun,\n interval,\n args,\n full_output,\n epsabs,\n epsrel,\n max_ninter,\n n=order,\n norm=norm,\n )\n info = QuadratureInfo(info.err, info.neval * order, info.status, info.info)\n return y, info"
},
{
"identifier": "quadgk",
"path": "quadax/adaptive.py",
"snippet": "def quadgk(\n fun,\n interval,\n args=(),\n full_output=False,\n epsabs=1.4e-8,\n epsrel=1.4e-8,\n max_ninter=50,\n order=21,\n norm=jnp.inf,\n):\n \"\"\"Global adaptive quadrature using Gauss-Konrod rule.\n\n Integrate fun from `interval[0]` to `interval[-1]` using a h-adaptive scheme with\n error estimate. Breakpoints can be specified in `interval` where integration\n difficulty may occur.\n\n Basically the same as ``scipy.integrate.quad`` but without extrapolation. A good\n general purpose integrator for most reasonably well behaved functions over finite\n or infinite intervals.\n\n Parameters\n ----------\n fun : callable\n Function to integrate, should have a signature of the form\n ``fun(x, *args)`` -> float, Array. Should be JAX transformable.\n interval : array-like\n Lower and upper limits of integration with possible breakpoints. Use np.inf to\n denote infinite intervals.\n args : tuple, optional\n Extra arguments passed to fun.\n full_output : bool, optional\n If True, return the full state of the integrator. See below for more\n information.\n epsabs, epsrel : float, optional\n Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to\n obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``\n where ``i`` = integral of `fun` over `interval`, and ``result`` is the\n numerical approximation.\n max_ninter : int, optional\n An upper bound on the number of sub-intervals used in the adaptive\n algorithm.\n order : {15, 21, 31, 41, 51, 61}\n Order of local integration rule.\n norm : int, callable\n Norm to use for measuring error for vector valued integrands. No effect if the\n integrand is scalar valued. If an int, uses p-norm of the given order, otherwise\n should be callable.\n\n Returns\n -------\n y : float, Array\n The integral of fun from `a` to `b`.\n info : QuadratureInfo\n Named tuple with the following fields:\n\n * err : (float) Estimate of the error in the approximation.\n * neval : (int) Total number of function evaluations.\n * status : (int) Flag indicating reason for termination. status of 0 means\n normal termination, any other value indicates a possible error. A human\n readable message can be obtained by ``print(quadax.STATUS[status])``\n * info : (dict or None) Other information returned by the algorithm.\n Only present if ``full_output`` is True. Contains the following:\n\n * 'ninter' : (int) The number, K, of sub-intervals produced in the\n subdivision process.\n * 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the left end points of the (remapped) sub-intervals\n in the partition of the integration range.\n * 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the right end points of the (remapped) sub-intervals.\n * 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the integral approximations on the sub-intervals.\n * 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the moduli of the absolute error estimates on the\n sub-intervals.\n\n Notes\n -----\n Adaptive algorithms are inherently somewhat sequential, so perfect parallelism\n is generally not achievable. The local quadrature rule vmaps integrand evaluation at\n ``order`` points, so using higher order methods will generally be more efficient on\n GPU/TPU.\n\n \"\"\"\n y, info = adaptive_quadrature(\n fixed_quadgk,\n fun,\n interval,\n args,\n full_output,\n epsabs,\n epsrel,\n max_ninter,\n n=order,\n norm=norm,\n )\n info = QuadratureInfo(info.err, info.neval * order, info.status, info.info)\n return y, info"
},
{
"identifier": "quadts",
"path": "quadax/adaptive.py",
"snippet": "def quadts(\n fun,\n interval,\n args=(),\n full_output=False,\n epsabs=1.4e-8,\n epsrel=1.4e-8,\n max_ninter=50,\n order=61,\n norm=jnp.inf,\n):\n \"\"\"Global adaptive quadrature using trapezoidal tanh-sinh rule.\n\n Integrate fun from `interval[0]` to `interval[-1]` using a h-adaptive scheme with\n error estimate. Breakpoints can be specified in `interval` where integration\n difficulty may occur.\n\n Especially good for integrands with singular behavior at an endpoint.\n\n Parameters\n ----------\n fun : callable\n Function to integrate, should have a signature of the form\n ``fun(x, *args)`` -> float, Array. Should be JAX transformable.\n interval : array-like\n Lower and upper limits of integration with possible breakpoints. Use np.inf to\n denote infinite intervals.\n args : tuple, optional\n Extra arguments passed to fun.\n full_output : bool, optional\n If True, return the full state of the integrator. See below for more\n information.\n epsabs, epsrel : float, optional\n Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to\n obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``\n where ``i`` = integral of `fun` over `interval`, and ``result`` is the\n numerical approximation.\n max_ninter : int, optional\n An upper bound on the number of sub-intervals used in the adaptive\n algorithm.\n order : {41, 61, 81, 101}\n Order of local integration rule.\n norm : int, callable\n Norm to use for measuring error for vector valued integrands. No effect if the\n integrand is scalar valued. If an int, uses p-norm of the given order, otherwise\n should be callable.\n\n Returns\n -------\n y : float, Array\n The integral of fun from `a` to `b`.\n info : QuadratureInfo\n Named tuple with the following fields:\n\n * err : (float) Estimate of the error in the approximation.\n * neval : (int) Total number of function evaluations.\n * status : (int) Flag indicating reason for termination. status of 0 means\n normal termination, any other value indicates a possible error. A human\n readable message can be obtained by ``print(quadax.STATUS[status])``\n * info : (dict or None) Other information returned by the algorithm.\n Only present if ``full_output`` is True. Contains the following:\n\n * 'ninter' : (int) The number, K, of sub-intervals produced in the\n subdivision process.\n * 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the left end points of the (remapped) sub-intervals\n in the partition of the integration range.\n * 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the right end points of the (remapped) sub-intervals.\n * 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the integral approximations on the sub-intervals.\n * 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the moduli of the absolute error estimates on the\n sub-intervals.\n\n Notes\n -----\n Adaptive algorithms are inherently somewhat sequential, so perfect parallelism\n is generally not achievable. The local quadrature rule vmaps integrand evaluation at\n ``order`` points, so using higher order methods will generally be more efficient on\n GPU/TPU.\n\n \"\"\"\n y, info = adaptive_quadrature(\n fixed_quadts,\n fun,\n interval,\n args,\n full_output,\n epsabs,\n epsrel,\n max_ninter,\n n=order,\n norm=norm,\n )\n info = QuadratureInfo(info.err, info.neval * order, info.status, info.info)\n return y, info"
}
] | import jax.numpy as jnp
import numpy as np
import pytest
import scipy
from jax.config import config as jax_config
from quadax import quadcc, quadgk, quadts, romberg, rombergts | 8,417 | y, info = quadts(
prob["fun"],
prob["interval"],
epsabs=tol,
epsrel=tol,
**kwargs,
)
assert info.status == status
if status == 0:
assert info.err < max(tol, tol * np.max(np.abs(y)))
np.testing.assert_allclose(
y,
prob["val"],
rtol=fudge * tol,
atol=fudge * tol,
err_msg=f"problem {i}, tol={tol}",
)
def test_prob0(self):
"""Test for example problem #0."""
self._base(0, 1e-4)
self._base(0, 1e-8)
self._base(0, 1e-12)
def test_prob1(self):
"""Test for example problem #1."""
self._base(1, 1e-4)
self._base(1, 1e-8)
self._base(1, 1e-12)
def test_prob2(self):
"""Test for example problem #2."""
self._base(2, 1e-4, order=41)
self._base(2, 1e-8, order=41)
self._base(2, 1e-12, order=41)
def test_prob3(self):
"""Test for example problem #3."""
self._base(3, 1e-4, order=61)
self._base(3, 1e-8, order=61)
self._base(3, 1e-12, order=61)
def test_prob4(self):
"""Test for example problem #4."""
self._base(4, 1e-4, order=81)
self._base(4, 1e-8, order=81)
self._base(4, 1e-12, order=81)
def test_prob5(self):
"""Test for example problem #5."""
self._base(5, 1e-4, order=101)
self._base(5, 1e-8, order=101)
self._base(5, 1e-12, order=101)
def test_prob6(self):
"""Test for example problem #6."""
self._base(6, 1e-4)
self._base(6, 1e-8)
self._base(6, 1e-12, 1e4)
def test_prob7(self):
"""Test for example problem #7."""
self._base(7, 1e-4)
self._base(7, 1e-8)
self._base(7, 1e-12)
def test_prob8(self):
"""Test for example problem #8."""
self._base(8, 1e-4)
self._base(8, 1e-8)
self._base(8, 1e-12)
def test_prob9(self):
"""Test for example problem #9."""
self._base(9, 1e-4)
self._base(9, 1e-8, 10)
self._base(9, 1e-12, 1e4)
def test_prob10(self):
"""Test for example problem #10."""
self._base(10, 1e-4)
self._base(10, 1e-8)
self._base(10, 1e-12)
def test_prob11(self):
"""Test for example problem #11."""
self._base(11, 1e-4)
self._base(11, 1e-8)
self._base(11, 1e-12, 1e4)
def test_prob12(self):
"""Test for example problem #12."""
self._base(12, 1e-4)
self._base(12, 1e-8)
self._base(12, 1e-12)
def test_prob13(self):
"""Test for example problem #13."""
self._base(13, 1e-4)
self._base(13, 1e-8)
self._base(13, 1e-12)
def test_prob14(self):
"""Test for example problem #14."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
def test_prob15(self):
"""Test for example problem #15."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
class TestRombergTS:
"""Tests for tanh-sinh quadrature with adaptive refinement."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i]
| """Tests for adaptive quadrature routines."""
jax_config.update("jax_enable_x64", True)
example_problems = [
# problem 0
{"fun": lambda t: t * jnp.log(1 + t), "interval": [0, 1], "val": 1 / 4},
# problem 1
{
"fun": lambda t: t**2 * jnp.arctan(t),
"interval": [0, 1],
"val": (jnp.pi - 2 + 2 * jnp.log(2)) / 12,
},
# problem 2
{
"fun": lambda t: jnp.exp(t) * jnp.cos(t),
"interval": [0, jnp.pi / 2],
"val": (jnp.exp(jnp.pi / 2) - 1) / 2,
},
# problem 3
{
"fun": lambda t: jnp.arctan(jnp.sqrt(2 + t**2))
/ ((1 + t**2) * jnp.sqrt(2 + t**2)),
"interval": [0, 1],
"val": 5 * jnp.pi**2 / 96,
},
# problem 4
{"fun": lambda t: jnp.sqrt(t) * jnp.log(t), "interval": [0, 1], "val": -4 / 9},
# problem 5
{"fun": lambda t: jnp.sqrt(1 - t**2), "interval": [0, 1], "val": jnp.pi / 4},
# problem 6
{
"fun": lambda t: jnp.sqrt(t) / jnp.sqrt(1 - t**2),
"interval": [0, 1],
"val": 2
* jnp.sqrt(jnp.pi)
* scipy.special.gamma(3 / 4)
/ scipy.special.gamma(1 / 4),
},
# problem 7
{"fun": lambda t: jnp.log(t) ** 2, "interval": [0, 1], "val": 2},
# problem 8
{
"fun": lambda t: jnp.log(jnp.cos(t)),
"interval": [0, jnp.pi / 2],
"val": -jnp.pi * jnp.log(2) / 2,
},
# problem 9
{
"fun": lambda t: jnp.sqrt(jnp.tan(t)),
"interval": [0, jnp.pi / 2],
"val": jnp.pi * jnp.sqrt(2) / 2,
},
# problem 10
{"fun": lambda t: 1 / (1 + t**2), "interval": [0, jnp.inf], "val": jnp.pi / 2},
# problem 11
{
"fun": lambda t: jnp.exp(-t) / jnp.sqrt(t),
"interval": [0, jnp.inf],
"val": jnp.sqrt(jnp.pi),
},
# problem 12
{
"fun": lambda t: jnp.exp(-(t**2) / 2),
"interval": [-jnp.inf, jnp.inf],
"val": jnp.sqrt(2 * jnp.pi),
},
# problem 13
{"fun": lambda t: jnp.exp(-t) * jnp.cos(t), "interval": [0, jnp.inf], "val": 1 / 2},
# problem 14 - vector valued integrand made of up problems 0 and 1
{
"fun": lambda t: jnp.array([t * jnp.log(1 + t), t**2 * jnp.arctan(t)]),
"interval": [0, 1],
"val": jnp.array([1 / 4, (jnp.pi - 2 + 2 * jnp.log(2)) / 12]),
},
# problem 15 - intergral with breakpoints
{
"fun": lambda t: jnp.log((t - 1) ** 2),
"interval": [0, 1, 2],
"val": -4,
},
]
class TestQuadGK:
"""Tests for Gauss-Konrod quadrature."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i]
status = kwargs.pop("status", 0)
y, info = quadgk(
prob["fun"],
prob["interval"],
epsabs=tol,
epsrel=tol,
**kwargs,
)
assert info.status == status
if status == 0:
assert info.err < max(tol, tol * np.max(np.abs(y)))
np.testing.assert_allclose(
y,
prob["val"],
rtol=fudge * tol,
atol=fudge * tol,
err_msg=f"problem {i}, tol={tol}",
)
def test_prob0(self):
"""Test for example problem #0."""
self._base(0, 1e-4, order=21)
self._base(0, 1e-8, order=21)
self._base(0, 1e-12, order=21)
def test_prob1(self):
"""Test for example problem #1."""
self._base(1, 1e-4, order=31)
self._base(1, 1e-8, order=31)
self._base(1, 1e-12, order=31)
def test_prob2(self):
"""Test for example problem #2."""
self._base(2, 1e-4, order=41)
self._base(2, 1e-8, order=41)
self._base(2, 1e-12, order=41)
def test_prob3(self):
"""Test for example problem #3."""
self._base(3, 1e-4, order=51)
self._base(3, 1e-8, order=51)
self._base(3, 1e-12, order=51)
def test_prob4(self):
"""Test for example problem #4."""
self._base(4, 1e-4, order=61)
self._base(4, 1e-8, order=61)
self._base(4, 1e-12, order=61)
def test_prob5(self):
"""Test for example problem #5."""
self._base(5, 1e-4, order=21)
self._base(5, 1e-8, order=21)
self._base(5, 1e-12, order=21)
def test_prob6(self):
"""Test for example problem #6."""
self._base(6, 1e-4, order=15)
self._base(6, 1e-8, 100, order=15)
self._base(6, 1e-12, 1e5, order=15, max_ninter=100, status=8)
def test_prob7(self):
"""Test for example problem #7."""
self._base(7, 1e-4, order=61)
self._base(7, 1e-8, order=61)
self._base(7, 1e-12, order=61, status=4)
def test_prob8(self):
"""Test for example problem #8."""
self._base(8, 1e-4, order=51)
self._base(8, 1e-8, order=51)
self._base(8, 1e-12, order=51, status=4)
def test_prob9(self):
"""Test for example problem #9."""
self._base(9, 1e-4, order=15)
self._base(9, 1e-8, 100, order=15)
self._base(9, 1e-12, 1e4, order=15, max_ninter=100, status=8)
def test_prob10(self):
"""Test for example problem #10."""
self._base(10, 1e-4, order=15)
self._base(10, 1e-8, order=15)
self._base(10, 1e-12, order=15)
def test_prob11(self):
"""Test for example problem #11."""
self._base(11, 1e-4, order=21)
self._base(11, 1e-8, 100, order=21)
self._base(11, 1e-12, 1e4, order=21, status=8, max_ninter=100)
def test_prob12(self):
"""Test for example problem #12."""
self._base(12, 1e-4, order=15)
self._base(12, 1e-8, order=15)
self._base(12, 1e-12, order=15)
def test_prob13(self):
"""Test for example problem #13."""
self._base(13, 1e-4, order=31)
self._base(13, 1e-8, order=31)
self._base(13, 1e-12, order=31)
def test_prob14(self):
"""Test for example problem #14."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
def test_prob15(self):
"""Test for example problem #15."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
class TestQuadCC:
"""Tests for Clenshaw-Curtis quadrature."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i]
status = kwargs.pop("status", 0)
y, info = quadcc(
prob["fun"],
prob["interval"],
epsabs=tol,
epsrel=tol,
**kwargs,
)
assert info.status == status
if status == 0:
assert info.err < max(tol, tol * np.max(np.abs(y)))
np.testing.assert_allclose(
y,
prob["val"],
rtol=fudge * tol,
atol=fudge * tol,
err_msg=f"problem {i}, tol={tol}",
)
def test_prob0(self):
"""Test for example problem #0."""
self._base(0, 1e-4, order=32)
self._base(0, 1e-8, order=32)
self._base(0, 1e-12, order=32)
def test_prob1(self):
"""Test for example problem #1."""
self._base(1, 1e-4, order=64)
self._base(1, 1e-8, order=64)
self._base(1, 1e-12, order=64)
def test_prob2(self):
"""Test for example problem #2."""
self._base(2, 1e-4, order=128)
self._base(2, 1e-8, order=128)
self._base(2, 1e-12, order=128)
def test_prob3(self):
"""Test for example problem #3."""
self._base(3, 1e-4, order=256)
self._base(3, 1e-8, order=256)
self._base(3, 1e-12, order=256)
def test_prob4(self):
"""Test for example problem #4."""
self._base(4, 1e-4, order=8)
self._base(4, 1e-8, order=8)
self._base(4, 1e-12, order=8, max_ninter=100)
def test_prob5(self):
"""Test for example problem #5."""
self._base(5, 1e-4, order=16)
self._base(5, 1e-8, order=16)
self._base(5, 1e-12, order=16)
def test_prob6(self):
"""Test for example problem #6."""
self._base(6, 1e-4)
self._base(6, 1e-8, 100)
self._base(6, 1e-12, 1e5, max_ninter=100, status=8)
def test_prob7(self):
"""Test for example problem #7."""
self._base(7, 1e-4)
self._base(7, 1e-8, 10)
self._base(7, 1e-12, status=8)
def test_prob8(self):
"""Test for example problem #8."""
self._base(8, 1e-4)
self._base(8, 1e-8)
self._base(8, 1e-12, status=8)
def test_prob9(self):
"""Test for example problem #9."""
self._base(9, 1e-4)
self._base(9, 1e-8, max_ninter=100, status=8)
self._base(9, 1e-12, 1e4, max_ninter=100, status=8)
def test_prob10(self):
"""Test for example problem #10."""
self._base(10, 1e-4)
self._base(10, 1e-8)
self._base(10, 1e-12, 10)
def test_prob11(self):
"""Test for example problem #11."""
self._base(11, 1e-4)
self._base(11, 1e-8, 100)
self._base(11, 1e-12, 1e4, status=8)
def test_prob12(self):
"""Test for example problem #12."""
self._base(12, 1e-4)
self._base(12, 1e-8)
self._base(12, 1e-12)
def test_prob13(self):
"""Test for example problem #13."""
self._base(13, 1e-4)
self._base(13, 1e-8)
self._base(13, 1e-12)
def test_prob14(self):
"""Test for example problem #14."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
def test_prob15(self):
"""Test for example problem #15."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
class TestQuadTS:
"""Tests for adaptive tanh-sinh quadrature."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i]
status = kwargs.pop("status", 0)
y, info = quadts(
prob["fun"],
prob["interval"],
epsabs=tol,
epsrel=tol,
**kwargs,
)
assert info.status == status
if status == 0:
assert info.err < max(tol, tol * np.max(np.abs(y)))
np.testing.assert_allclose(
y,
prob["val"],
rtol=fudge * tol,
atol=fudge * tol,
err_msg=f"problem {i}, tol={tol}",
)
def test_prob0(self):
"""Test for example problem #0."""
self._base(0, 1e-4)
self._base(0, 1e-8)
self._base(0, 1e-12)
def test_prob1(self):
"""Test for example problem #1."""
self._base(1, 1e-4)
self._base(1, 1e-8)
self._base(1, 1e-12)
def test_prob2(self):
"""Test for example problem #2."""
self._base(2, 1e-4, order=41)
self._base(2, 1e-8, order=41)
self._base(2, 1e-12, order=41)
def test_prob3(self):
"""Test for example problem #3."""
self._base(3, 1e-4, order=61)
self._base(3, 1e-8, order=61)
self._base(3, 1e-12, order=61)
def test_prob4(self):
"""Test for example problem #4."""
self._base(4, 1e-4, order=81)
self._base(4, 1e-8, order=81)
self._base(4, 1e-12, order=81)
def test_prob5(self):
"""Test for example problem #5."""
self._base(5, 1e-4, order=101)
self._base(5, 1e-8, order=101)
self._base(5, 1e-12, order=101)
def test_prob6(self):
"""Test for example problem #6."""
self._base(6, 1e-4)
self._base(6, 1e-8)
self._base(6, 1e-12, 1e4)
def test_prob7(self):
"""Test for example problem #7."""
self._base(7, 1e-4)
self._base(7, 1e-8)
self._base(7, 1e-12)
def test_prob8(self):
"""Test for example problem #8."""
self._base(8, 1e-4)
self._base(8, 1e-8)
self._base(8, 1e-12)
def test_prob9(self):
"""Test for example problem #9."""
self._base(9, 1e-4)
self._base(9, 1e-8, 10)
self._base(9, 1e-12, 1e4)
def test_prob10(self):
"""Test for example problem #10."""
self._base(10, 1e-4)
self._base(10, 1e-8)
self._base(10, 1e-12)
def test_prob11(self):
"""Test for example problem #11."""
self._base(11, 1e-4)
self._base(11, 1e-8)
self._base(11, 1e-12, 1e4)
def test_prob12(self):
"""Test for example problem #12."""
self._base(12, 1e-4)
self._base(12, 1e-8)
self._base(12, 1e-12)
def test_prob13(self):
"""Test for example problem #13."""
self._base(13, 1e-4)
self._base(13, 1e-8)
self._base(13, 1e-12)
def test_prob14(self):
"""Test for example problem #14."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
def test_prob15(self):
"""Test for example problem #15."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
class TestRombergTS:
"""Tests for tanh-sinh quadrature with adaptive refinement."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i] | y, info = rombergts( | 1 | 2023-10-24 04:44:34+00:00 | 12k |
zju3dv/nr_in_a_room | models_neurecon/neus_multi_rendering.py | [
{
"identifier": "NeuS",
"path": "models_neurecon/neus.py",
"snippet": "class NeuS(nn.Module):\n def __init__(\n self,\n variance_init=0.05,\n speed_factor=1.0,\n input_ch=3,\n input_obj_ch=0,\n input_light_ch=0,\n input_appearance_ch=0,\n W_geo_feat=-1,\n use_outside_nerf=False,\n obj_bounding_radius=1.0,\n surface_cfg=dict(),\n radiance_cfg=dict(),\n ):\n super().__init__()\n\n self.ln_s = nn.Parameter(\n data=torch.Tensor([-np.log(variance_init) / speed_factor]),\n requires_grad=True,\n )\n self.speed_factor = speed_factor\n\n # ------- surface network\n self.implicit_surface = ImplicitSurface(\n W_geo_feat=W_geo_feat,\n input_ch=input_ch,\n input_obj_ch=input_obj_ch,\n obj_bounding_size=obj_bounding_radius,\n **surface_cfg,\n )\n\n # ------- radiance network\n if W_geo_feat < 0:\n W_geo_feat = self.implicit_surface.W\n self.radiance_net = RadianceNet(\n W_geo_feat=W_geo_feat,\n input_light_ch=input_light_ch,\n input_appearance_ch=input_appearance_ch,\n **radiance_cfg,\n )\n\n # -------- outside nerf++\n if use_outside_nerf:\n self.nerf_outside = NeRF(\n input_ch=4, multires=10, multires_view=4, use_view_dirs=True\n )\n\n def forward_radiance(\n self,\n x: torch.Tensor,\n obj_code: torch.Tensor,\n light_code: torch.Tensor,\n view_dirs: torch.Tensor,\n appearance_code: torch.Tensor = None,\n ):\n _, nablas, geometry_feature = self.implicit_surface.forward_with_nablas(\n x, obj_code\n )\n radiance = self.radiance_net.forward(\n x,\n light_code,\n view_dirs,\n nablas,\n geometry_feature,\n appearance_code=appearance_code,\n )\n return radiance\n\n def forward_s(self):\n return torch.exp(self.ln_s * self.speed_factor)\n\n def forward(\n self,\n x: torch.Tensor,\n obj_code: torch.Tensor,\n light_code: torch.Tensor,\n view_dirs: torch.Tensor,\n appearance_code: torch.Tensor = None,\n ):\n sdf, nablas, geometry_feature = self.implicit_surface.forward_with_nablas(\n x, obj_code\n )\n radiances = self.radiance_net.forward(\n x,\n light_code,\n view_dirs,\n nablas,\n geometry_feature,\n appearance_code=appearance_code,\n )\n return radiances, sdf, nablas"
},
{
"identifier": "volume_render",
"path": "models_neurecon/neus.py",
"snippet": "def volume_render(\n rays_o,\n rays_d,\n model: NeuS,\n obj_code=None,\n light_code=None,\n appearance_code=None,\n obj_bounding_radius=1.0,\n batched=False,\n batched_info={},\n # render algorithm config\n calc_normal=False,\n use_view_dirs=True,\n rayschunk=65536,\n netchunk=1048576,\n white_bkgd=False,\n near_bypass: Optional[torch.Tensor] = None,\n far_bypass: Optional[torch.Tensor] = None,\n # render function config\n detailed_output=True,\n show_progress=False,\n # sampling related\n perturb=False, # config whether do stratified sampling\n fixed_s_recp=1 / 64.0,\n N_samples=64,\n N_importance=64,\n N_outside=0, # whether to use outside nerf\n # upsample related\n upsample_algo=\"official_solution\",\n N_nograd_samples=2048,\n N_upsample_iters=4,\n skip_accumulation=False, # skip accumulation and directly output opacity and radiance\n **dummy_kwargs # just place holder\n):\n \"\"\"\n input:\n rays_o: [(B,) N_rays, 3]\n rays_d: [(B,) N_rays, 3] NOTE: not normalized. contains info about ratio of len(this ray)/len(principle ray)\n \"\"\"\n # we add obj_code, which may break the batched\n assert batched == False\n device = rays_o.device\n if batched:\n DIM_BATCHIFY = 1\n B = rays_d.shape[0] # batch_size\n flat_vec_shape = [B, -1, 3]\n else:\n DIM_BATCHIFY = 0\n flat_vec_shape = [-1, 3]\n\n rays_o = torch.reshape(rays_o, flat_vec_shape).float()\n rays_d = torch.reshape(rays_d, flat_vec_shape).float()\n # NOTE: already normalized\n rays_d = F.normalize(rays_d, dim=-1)\n\n batchify_query = functools.partial(\n train_util.batchify_query, chunk=netchunk, dim_batchify=DIM_BATCHIFY\n )\n\n # ---------------\n # Render a ray chunk\n # ---------------\n def render_rayschunk(\n rays_o: torch.Tensor,\n rays_d: torch.Tensor,\n near: torch.Tensor,\n far: torch.Tensor,\n obj_code: torch.Tensor = None,\n light_code: torch.Tensor = None,\n appearance_code: torch.Tensor = None,\n ):\n # rays_o: [(B), N_rays, 3]\n # rays_d: [(B), N_rays, 3]\n\n # [(B), N_rays] x 2\n # near, far = rend_util.near_far_from_sphere(rays_o, rays_d, r=obj_bounding_radius)\n # if near_bypass is not None:\n # near = near_bypass * torch.ones_like(near).to(device)\n # if far_bypass is not None:\n # far = far_bypass * torch.ones_like(far).to(device)\n\n if use_view_dirs:\n view_dirs = rays_d\n else:\n view_dirs = None\n\n prefix_batch = [B] if batched else []\n N_rays = rays_o.shape[-2]\n\n # ---------------\n # Sample points on the rays\n # ---------------\n\n # ---------------\n # Coarse Points\n\n # [(B), N_rays, N_samples]\n # d_coarse = torch.linspace(near, far, N_samples).float().to(device)\n # d_coarse = d_coarse.view([*[1]*len(prefix_batch), 1, N_samples]).repeat([*prefix_batch, N_rays, 1])\n _t = torch.linspace(0, 1, N_samples).float().to(device)\n d_coarse = near * (1 - _t) + far * _t\n\n if obj_code is not None:\n obj_code = obj_code.unsqueeze(1) # [N_rays, 1, N_obj_ch]\n if light_code is not None:\n light_code = light_code.unsqueeze(1) # [N_rays, 1, N_light_ch]\n if appearance_code is not None:\n appearance_code = appearance_code.unsqueeze(1) # [N_rays, 1, N_light_ch]\n\n # ---------------\n # Up Sampling\n with torch.no_grad():\n if upsample_algo == \"official_solution\":\n _d = d_coarse\n # [(B), N_rays, N_sample, 3]\n # N_rays, N_obj_ch = obj_code.shape\n # obj_code = obj_code.view(N_rays, 1, N_obj_ch)\n\n _sdf = batchify_query(\n model.implicit_surface.forward,\n rays_o.unsqueeze(-2) + _d.unsqueeze(-1) * rays_d.unsqueeze(-2),\n None if obj_code is None else obj_code.expand(-1, _d.shape[1], -1),\n )\n for i in range(N_upsample_iters):\n prev_sdf, next_sdf = _sdf[..., :-1], _sdf[..., 1:]\n prev_z_vals, next_z_vals = _d[..., :-1], _d[..., 1:]\n mid_sdf = (prev_sdf + next_sdf) * 0.5\n dot_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5)\n prev_dot_val = torch.cat(\n [\n torch.zeros_like(dot_val[..., :1], device=device),\n dot_val[..., :-1],\n ],\n dim=-1,\n ) # jianfei: prev_slope, right shifted\n dot_val = torch.stack(\n [prev_dot_val, dot_val], dim=-1\n ) # jianfei: concat prev_slope with slope\n dot_val, _ = torch.min(\n dot_val, dim=-1, keepdim=False\n ) # jianfei: find the minimum of prev_slope and current slope. (forward diff vs. backward diff., or the prev segment's slope vs. this segment's slope)\n dot_val = dot_val.clamp(-10.0, 0.0)\n\n dist = next_z_vals - prev_z_vals\n prev_esti_sdf = mid_sdf - dot_val * dist * 0.5\n next_esti_sdf = mid_sdf + dot_val * dist * 0.5\n\n prev_cdf = cdf_Phi_s(prev_esti_sdf, 64 * (2**i))\n next_cdf = cdf_Phi_s(next_esti_sdf, 64 * (2**i))\n alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5)\n _w = alpha_to_w(alpha)\n d_fine = rend_util.sample_pdf(\n _d, _w, N_importance // N_upsample_iters, det=not perturb\n )\n _d = torch.cat([_d, d_fine], dim=-1)\n sdf_fine = batchify_query(\n model.implicit_surface.forward,\n rays_o.unsqueeze(-2)\n + d_fine.unsqueeze(-1) * rays_d.unsqueeze(-2),\n None\n if obj_code is None\n else obj_code.expand(-1, d_fine.shape[1], -1),\n )\n _sdf = torch.cat([_sdf, sdf_fine], dim=-1)\n _d, d_sort_indices = torch.sort(_d, dim=-1)\n _sdf = torch.gather(_sdf, DIM_BATCHIFY + 1, d_sort_indices)\n d_all = _d\n else:\n raise NotImplementedError\n\n # ------------------\n # Calculate Points\n # [(B), N_rays, N_samples+N_importance, 3]\n pts = rays_o[..., None, :] + rays_d[..., None, :] * d_all[..., :, None]\n # [(B), N_rays, N_pts-1, 3]\n # pts_mid = 0.5 * (pts[..., 1:, :] + pts[..., :-1, :])\n d_mid = 0.5 * (d_all[..., 1:] + d_all[..., :-1])\n pts_mid = rays_o[..., None, :] + rays_d[..., None, :] * d_mid[..., :, None]\n\n # ------------------\n # Inside Scene\n # ------------------\n # sdf, nablas, _ = model.implicit_surface.forward_with_nablas(pts)\n sdf, nablas, _ = batchify_query(\n model.implicit_surface.forward_with_nablas,\n pts,\n None if obj_code is None else obj_code.expand(-1, pts.shape[1], -1),\n )\n # [(B), N_ryas, N_pts], [(B), N_ryas, N_pts-1]\n cdf, opacity_alpha = sdf_to_alpha(sdf, model.forward_s())\n # radiances = model.forward_radiance(pts_mid, view_dirs_mid)\n _, N_sample_mid, _ = pts_mid.shape\n radiances = batchify_query(\n model.forward_radiance,\n pts_mid,\n None if obj_code is None else obj_code.expand(-1, pts_mid.shape[1], -1),\n None if light_code is None else light_code.expand(-1, pts_mid.shape[1], -1),\n view_dirs.unsqueeze(-2).expand_as(pts_mid) if use_view_dirs else None,\n None\n if appearance_code is None\n else appearance_code.expand(-1, pts_mid.shape[1], -1),\n )\n\n # ------------------\n # Outside Scene\n # ------------------\n if N_outside > 0:\n assert False, \"obj_code not implemented\"\n _t = torch.linspace(0, 1, N_outside + 2)[..., 1:-1].float().to(device)\n d_vals_out = far / torch.flip(_t, dims=[-1])\n if perturb:\n _mids = 0.5 * (d_vals_out[..., 1:] + d_vals_out[..., :-1])\n _upper = torch.cat([_mids, d_vals_out[..., -1:]], -1)\n _lower = torch.cat([d_vals_out[..., :1], _mids], -1)\n _t_rand = torch.rand(_upper.shape).float().to(device)\n d_vals_out = _lower + (_upper - _lower) * _t_rand\n\n d_vals_out = torch.cat([d_mid, d_vals_out], dim=-1) # already sorted\n pts_out = (\n rays_o[..., None, :] + rays_d[..., None, :] * d_vals_out[..., :, None]\n )\n r = pts_out.norm(dim=-1, keepdim=True)\n x_out = torch.cat([pts_out / r, 1.0 / r], dim=-1)\n views_out = (\n view_dirs.unsqueeze(-2).expand_as(x_out[..., :3])\n if use_view_dirs\n else None\n )\n\n sigma_out, radiance_out = batchify_query(\n model.nerf_outside.forward, x_out, views_out\n )\n dists = d_vals_out[..., 1:] - d_vals_out[..., :-1]\n dists = torch.cat(\n [dists, 1e10 * torch.ones(dists[..., :1].shape).to(device)], dim=-1\n )\n alpha_out = 1 - torch.exp(\n -F.softplus(sigma_out) * dists\n ) # use softplus instead of relu as NeuS's official repo\n\n # --------------\n # Ray Integration\n # --------------\n # [(B), N_rays, N_pts-1]\n if N_outside > 0:\n assert False, \"obj_code not implemented\"\n N_pts_1 = d_mid.shape[-1]\n # [(B), N_ryas, N_pts-1]\n mask_inside = pts_mid.norm(dim=-1) <= obj_bounding_radius\n # [(B), N_ryas, N_pts-1]\n alpha_in = (\n opacity_alpha * mask_inside.float()\n + alpha_out[..., :N_pts_1] * (~mask_inside).float()\n )\n # [(B), N_ryas, N_pts-1 + N_outside]\n opacity_alpha = torch.cat([alpha_in, alpha_out[..., N_pts_1:]], dim=-1)\n\n # [(B), N_ryas, N_pts-1, 3]\n radiance_in = (\n radiances * mask_inside.float()[..., None]\n + radiance_out[..., :N_pts_1, :] * (~mask_inside).float()[..., None]\n )\n # [(B), N_ryas, N_pts-1 + N_outside, 3]\n radiances = torch.cat([radiance_in, radiance_out[..., N_pts_1:, :]], dim=-2)\n d_final = d_vals_out\n else:\n d_final = d_mid\n\n if skip_accumulation:\n return {\n \"z_vals\": d_final,\n \"opacity\": opacity_alpha,\n \"radiances\": radiances,\n }\n\n # [(B), N_ryas, N_pts-1 + N_outside]\n visibility_weights = alpha_to_w(opacity_alpha)\n # [(B), N_rays]\n rgb_map = torch.sum(visibility_weights[..., None] * radiances, -2)\n # depth_map = torch.sum(visibility_weights * d_mid, -1)\n # NOTE: to get the correct depth map, the sum of weights must be 1!\n depth_map = torch.sum(\n visibility_weights\n / (visibility_weights.sum(-1, keepdim=True) + 1e-10)\n * d_final,\n -1,\n )\n acc_map = torch.sum(visibility_weights, -1)\n\n if white_bkgd:\n rgb_map = rgb_map + (1.0 - acc_map[..., None])\n\n ret_i = OrderedDict(\n [\n (\"rgb\", rgb_map), # [(B), N_rays, 3]\n (\"depth_volume\", depth_map), # [(B), N_rays]\n # ('depth_surface', d_pred_out), # [(B), N_rays]\n (\"mask_volume\", acc_map), # [(B), N_rays]\n ]\n )\n\n if calc_normal:\n normals_map = F.normalize(nablas, dim=-1)\n N_pts = min(visibility_weights.shape[-1], normals_map.shape[-2])\n normals_map = (\n normals_map[..., :N_pts, :] * visibility_weights[..., :N_pts, None]\n ).sum(dim=-2)\n ret_i[\"normals_volume\"] = normals_map\n\n if detailed_output:\n ret_i[\"implicit_nablas\"] = nablas\n ret_i[\"implicit_surface\"] = sdf\n ret_i[\"radiance\"] = radiances\n ret_i[\"alpha\"] = opacity_alpha\n ret_i[\"cdf\"] = cdf\n ret_i[\"visibility_weights\"] = visibility_weights\n ret_i[\"d_final\"] = d_final\n if N_outside > 0:\n assert False, \"obj_code not implemented\"\n ret_i[\"sigma_out\"] = sigma_out\n ret_i[\"radiance_out\"] = radiance_out\n\n return ret_i\n\n ret = {}\n for i in tqdm(\n range(0, rays_o.shape[DIM_BATCHIFY], rayschunk), disable=not show_progress\n ):\n if obj_code is not None:\n obj_code_chunk = (\n obj_code[:, i : i + rayschunk]\n if batched\n else obj_code[i : i + rayschunk]\n )\n else:\n obj_code_chunk = None\n if light_code is not None:\n light_code_chunk = (\n light_code[:, i : i + rayschunk]\n if batched\n else light_code[i : i + rayschunk]\n )\n else:\n light_code_chunk = None\n\n if appearance_code is not None:\n appearance_code_chunk = (\n appearance_code[:, i : i + rayschunk]\n if batched\n else appearance_code[i : i + rayschunk]\n )\n else:\n appearance_code_chunk = None\n\n ret_i = render_rayschunk(\n rays_o=rays_o[:, i : i + rayschunk]\n if batched\n else rays_o[i : i + rayschunk],\n rays_d=rays_d[:, i : i + rayschunk]\n if batched\n else rays_d[i : i + rayschunk],\n near=near_bypass[:, i : i + rayschunk]\n if batched\n else near_bypass[i : i + rayschunk],\n far=far_bypass[:, i : i + rayschunk]\n if batched\n else far_bypass[i : i + rayschunk],\n obj_code=obj_code_chunk,\n light_code=light_code_chunk,\n appearance_code=appearance_code_chunk,\n )\n for k, v in ret_i.items():\n if k not in ret:\n ret[k] = []\n ret[k].append(v)\n for k, v in ret.items():\n ret[k] = torch.cat(v, DIM_BATCHIFY)\n\n if skip_accumulation:\n return ret\n\n return ret[\"rgb\"], ret[\"depth_volume\"], ret"
},
{
"identifier": "ImplicitSurface",
"path": "models_neurecon/base.py",
"snippet": "class ImplicitSurface(nn.Module):\n def __init__(\n self,\n W=256,\n D=8,\n skips=[4],\n W_geo_feat=256,\n input_ch=3,\n input_obj_ch=0,\n radius_init=1.0,\n radius_init_inside_out=1.0,\n obj_bounding_size=2.0,\n geometric_init=True,\n inside_out=False,\n embed_multires=6,\n weight_norm=True,\n use_siren=False,\n ):\n \"\"\"\n W_geo_feat: to set whether to use nerf-like geometry feature or IDR-like geometry feature.\n set to -1: nerf-like, the output feature is the second to last level's feature of the geometry network.\n set to >0: IDR-like ,the output feature is the last part of the geometry network's output.\n \"\"\"\n super().__init__()\n # occ_net_list = [\n # nn.Sequential(\n # nn.Linear(input_ch, W),\n # nn.Softplus(),\n # )\n # ] + [\n # nn.Sequential(\n # nn.Linear(W, W),\n # nn.Softplus()\n # ) for _ in range(D-2)\n # ] + [\n # nn.Linear(W, 1)\n # ]\n self.radius_init = radius_init\n self.radius_init_inside_out = radius_init_inside_out\n self.register_buffer(\n \"obj_bounding_size\", torch.tensor([obj_bounding_size]).float()\n )\n self.geometric_init = geometric_init\n self.D = D\n self.W = W\n self.W_geo_feat = W_geo_feat\n if use_siren:\n assert len(skips) == 0, \"do not use skips for siren\"\n self.register_buffer(\n \"is_pretrained\", torch.tensor([False], dtype=torch.bool)\n )\n self.skips = skips\n self.use_siren = use_siren\n self.embed_fn, input_ch = get_embedder(embed_multires)\n input_ch += input_obj_ch\n self.input_obj_ch = input_obj_ch\n\n surface_fc_layers = []\n # NOTE: as in IDR/NeuS, the network's has D+1 layers\n for l in range(D + 1):\n # decide out_dim\n if l == D:\n if W_geo_feat > 0:\n out_dim = 1 + W_geo_feat\n else:\n out_dim = 1\n elif (l + 1) in self.skips:\n out_dim = (\n W - input_ch\n ) # recude output dim before the skips layers, as in IDR / NeuS\n else:\n out_dim = W\n\n # decide in_dim\n if l == 0:\n in_dim = input_ch\n else:\n in_dim = W\n\n if l != D:\n if use_siren:\n layer = SirenLayer(in_dim, out_dim, is_first=(l == 0))\n else:\n # NOTE: beta=100 is important! Otherwise, the initial output would all be > 10, and there is not initial sphere.\n layer = DenseLayer(\n in_dim, out_dim, activation=nn.Softplus(beta=100)\n )\n else:\n layer = nn.Linear(in_dim, out_dim)\n\n # if true preform preform geometric initialization\n if geometric_init and not use_siren:\n # --------------\n # sphere init, as in SAL / IDR.\n # --------------\n if l == D:\n if inside_out:\n nn.init.normal_(\n layer.weight,\n mean=-np.sqrt(np.pi) / np.sqrt(in_dim),\n std=0.0001,\n )\n nn.init.constant_(layer.bias, radius_init_inside_out)\n else:\n nn.init.normal_(\n layer.weight,\n mean=np.sqrt(np.pi) / np.sqrt(in_dim),\n std=0.0001,\n )\n nn.init.constant_(layer.bias, -radius_init)\n elif embed_multires > 0 and l == 0:\n torch.nn.init.constant_(layer.bias, 0.0)\n torch.nn.init.constant_(\n layer.weight[:, 3:], 0.0\n ) # let the initial weights for octaves to be 0.\n torch.nn.init.normal_(\n layer.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim)\n )\n elif embed_multires > 0 and l in self.skips:\n torch.nn.init.constant_(layer.bias, 0.0)\n torch.nn.init.normal_(\n layer.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim)\n )\n torch.nn.init.constant_(\n layer.weight[:, -(input_ch - 3) :], 0.0\n ) # NOTE: this contrains the concat order to be [h, x_embed]\n else:\n nn.init.constant_(layer.bias, 0.0)\n nn.init.normal_(layer.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\n\n if weight_norm:\n layer = nn.utils.weight_norm(layer)\n\n surface_fc_layers.append(layer)\n\n self.surface_fc_layers = nn.ModuleList(surface_fc_layers)\n\n def pretrain_hook(self, configs={}):\n configs[\"target_radius\"] = self.radius_init\n # TODO: more flexible, bbox-like scene bound.\n configs[\"obj_bounding_size\"] = self.obj_bounding_size.item()\n if self.geometric_init and self.use_siren and not self.is_pretrained:\n pretrain_siren_sdf(self, **configs)\n self.is_pretrained = ~self.is_pretrained\n return True\n return False\n\n def forward(self, x: torch.Tensor, obj_code: torch.Tensor = None, return_h=False):\n if self.input_obj_ch > 0:\n x = self.embed_fn(x)\n x = torch.cat([x, obj_code], -1)\n else:\n # x: [1, N_rays, 3]\n x = self.embed_fn(x)\n # assert obj_code is None\n\n h = x\n for i in range(self.D):\n if i in self.skips:\n # NOTE: concat order can not change! there are special operations taken in intialization.\n h = torch.cat([h, x], dim=-1) / np.sqrt(2)\n h = self.surface_fc_layers[i](h)\n\n out = self.surface_fc_layers[-1](h)\n\n if self.W_geo_feat > 0:\n h = out[..., 1:]\n out = out[..., :1].squeeze(-1)\n else:\n out = out.squeeze(-1)\n if return_h:\n return out, h\n else:\n return out\n\n def forward_with_nablas(\n self,\n x: torch.Tensor,\n obj_code: torch.Tensor = None,\n has_grad_bypass: bool = None,\n ):\n has_grad = (\n torch.is_grad_enabled() if has_grad_bypass is None else has_grad_bypass\n )\n # force enabling grad for normal calculation\n with torch.enable_grad():\n x = x.requires_grad_(True)\n implicit_surface_val, h = self.forward(x, obj_code=obj_code, return_h=True)\n nabla = autograd.grad(\n implicit_surface_val,\n x,\n torch.ones_like(implicit_surface_val, device=x.device),\n create_graph=has_grad,\n retain_graph=has_grad,\n only_inputs=True,\n )[0]\n if not has_grad:\n implicit_surface_val = implicit_surface_val.detach()\n nabla = nabla.detach()\n h = h.detach()\n return implicit_surface_val, nabla, h"
}
] | import ipdb
import torch
import sys
import os
import copy
from typing import List, Dict, Any
from einops import rearrange, reduce, repeat
from models_neurecon.neus import NeuS, volume_render
from models_neurecon.base import ImplicitSurface | 7,451 |
sys.path.append(os.getcwd()) # noqa
def volume_rendering_multi_neus(
results,
typ,
z_vals_list,
rgbs_list,
alphas_list,
noise_std,
white_back,
obj_ids_list=None,
):
N_objs = len(z_vals_list)
# order via z_vals
z_vals = torch.cat(z_vals_list, 1) # (N_rays, N_samples*N_objs)
rgbs = torch.cat(rgbs_list, 1) # (N_rays, N_samples*N_objs, 3)
alphas = torch.cat(alphas_list, 1) # (N_rays, N_samples*N_objs)
z_vals, idx_sorted = torch.sort(z_vals, -1)
for i in range(3):
rgbs[:, :, i] = torch.gather(rgbs[:, :, i].clone(), dim=1, index=idx_sorted)
alphas = torch.gather(alphas, dim=1, index=idx_sorted)
# record object ids for recovering weights of each object after sorting
if obj_ids_list != None:
obj_ids = torch.cat(obj_ids_list, -1)
results[f"obj_ids_{typ}"] = torch.gather(obj_ids, dim=1, index=idx_sorted)
alphas_shifted = torch.cat(
[torch.ones_like(alphas[:, :1]), 1 - alphas + 1e-10], -1
) # [1, 1-a1, 1-a2, ...]
weights = alphas * torch.cumprod(alphas_shifted[:, :-1], -1) # (N_rays, N_samples_)
weights_sum = reduce(
weights, "n1 n2 -> n1", "sum"
) # (N_rays), the accumulated opacity along the rays
# equals "1 - (1-a1)(1-a2)...(1-an)" mathematically
# results[f"weights_{typ}"] = weights
results[f"opacity_{typ}"] = weights_sum
# results[f"z_vals_{typ}"] = z_vals
rgb_map = reduce(
rearrange(weights, "n1 n2 -> n1 n2 1") * rgbs, "n1 n2 c -> n1 c", "sum"
)
depth_map = reduce(weights * z_vals, "n1 n2 -> n1", "sum")
if white_back:
rgb_map = rgb_map + 1 - weights_sum.unsqueeze(-1)
results[f"rgb_{typ}"] = rgb_map
results[f"depth_{typ}"] = depth_map
# adopt from neurecon/ray_casting.py
def sphere_tracing_surface_points(
|
sys.path.append(os.getcwd()) # noqa
def volume_rendering_multi_neus(
results,
typ,
z_vals_list,
rgbs_list,
alphas_list,
noise_std,
white_back,
obj_ids_list=None,
):
N_objs = len(z_vals_list)
# order via z_vals
z_vals = torch.cat(z_vals_list, 1) # (N_rays, N_samples*N_objs)
rgbs = torch.cat(rgbs_list, 1) # (N_rays, N_samples*N_objs, 3)
alphas = torch.cat(alphas_list, 1) # (N_rays, N_samples*N_objs)
z_vals, idx_sorted = torch.sort(z_vals, -1)
for i in range(3):
rgbs[:, :, i] = torch.gather(rgbs[:, :, i].clone(), dim=1, index=idx_sorted)
alphas = torch.gather(alphas, dim=1, index=idx_sorted)
# record object ids for recovering weights of each object after sorting
if obj_ids_list != None:
obj_ids = torch.cat(obj_ids_list, -1)
results[f"obj_ids_{typ}"] = torch.gather(obj_ids, dim=1, index=idx_sorted)
alphas_shifted = torch.cat(
[torch.ones_like(alphas[:, :1]), 1 - alphas + 1e-10], -1
) # [1, 1-a1, 1-a2, ...]
weights = alphas * torch.cumprod(alphas_shifted[:, :-1], -1) # (N_rays, N_samples_)
weights_sum = reduce(
weights, "n1 n2 -> n1", "sum"
) # (N_rays), the accumulated opacity along the rays
# equals "1 - (1-a1)(1-a2)...(1-an)" mathematically
# results[f"weights_{typ}"] = weights
results[f"opacity_{typ}"] = weights_sum
# results[f"z_vals_{typ}"] = z_vals
rgb_map = reduce(
rearrange(weights, "n1 n2 -> n1 n2 1") * rgbs, "n1 n2 c -> n1 c", "sum"
)
depth_map = reduce(weights * z_vals, "n1 n2 -> n1", "sum")
if white_back:
rgb_map = rgb_map + 1 - weights_sum.unsqueeze(-1)
results[f"rgb_{typ}"] = rgb_map
results[f"depth_{typ}"] = depth_map
# adopt from neurecon/ray_casting.py
def sphere_tracing_surface_points( | implicit_surface: ImplicitSurface, | 2 | 2023-10-15 08:41:29+00:00 | 12k |
chenxn2020/GOSE | GOSEfinetune/models/layoutlmv2/modeling_layoutlmv2.py | [
{
"identifier": "ReOutput",
"path": "GOSEfinetune/utils.py",
"snippet": "class ReOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n entities: Optional[Dict] = None\n relations: Optional[Dict] = None\n pred_relations: Optional[Dict] = None"
},
{
"identifier": "LayoutLMv2Config",
"path": "GOSEfinetune/models/layoutlmv2/configuration_layoutlmv2.py",
"snippet": "class LayoutLMv2Config(LayoutLMConfig):\n model_type = \"layoutlmv2\"\n\n def __init__(\n self,\n vocab_size=30522,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02,\n layer_norm_eps=1e-12,\n pad_token_id=0,\n gradient_checkpointing=False,\n max_2d_position_embeddings=1024,\n max_rel_pos=128,\n rel_pos_bins=32,\n fast_qkv=True,\n max_rel_2d_pos=256,\n rel_2d_pos_bins=64,\n convert_sync_batchnorm=True,\n image_feature_pool_shape=[7, 7, 256],\n coordinate_size=128,\n shape_size=128,\n has_relative_attention_bias=True,\n has_spatial_attention_bias=True,\n has_visual_segment_embedding=False,\n **kwargs\n ):\n super().__init__(\n vocab_size=vocab_size,\n hidden_size=hidden_size,\n num_hidden_layers=num_hidden_layers,\n num_attention_heads=num_attention_heads,\n intermediate_size=intermediate_size,\n hidden_act=hidden_act,\n hidden_dropout_prob=hidden_dropout_prob,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n max_position_embeddings=max_position_embeddings,\n type_vocab_size=type_vocab_size,\n initializer_range=initializer_range,\n layer_norm_eps=layer_norm_eps,\n pad_token_id=pad_token_id,\n gradient_checkpointing=gradient_checkpointing,\n **kwargs,\n )\n self.max_2d_position_embeddings = max_2d_position_embeddings\n self.max_rel_pos = max_rel_pos\n self.rel_pos_bins = rel_pos_bins\n self.fast_qkv = fast_qkv\n self.max_rel_2d_pos = max_rel_2d_pos\n self.rel_2d_pos_bins = rel_2d_pos_bins\n self.convert_sync_batchnorm = convert_sync_batchnorm\n self.image_feature_pool_shape = image_feature_pool_shape\n self.coordinate_size = coordinate_size\n self.shape_size = shape_size\n self.has_relative_attention_bias = has_relative_attention_bias\n self.has_spatial_attention_bias = has_spatial_attention_bias\n self.has_visual_segment_embedding = has_visual_segment_embedding"
},
{
"identifier": "add_layoutlmv2_config",
"path": "GOSEfinetune/models/layoutlmv2/detectron2_config.py",
"snippet": "def add_layoutlmv2_config(cfg):\n _C = cfg\n # -----------------------------------------------------------------------------\n # Config definition\n # -----------------------------------------------------------------------------\n _C.MODEL.MASK_ON = True\n\n # When using pre-trained models in Detectron1 or any MSRA models,\n # std has been absorbed into its conv1 weights, so the std needs to be set 1.\n # Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)\n _C.MODEL.PIXEL_STD = [57.375, 57.120, 58.395]\n\n # ---------------------------------------------------------------------------- #\n # Backbone options\n # ---------------------------------------------------------------------------- #\n _C.MODEL.BACKBONE.NAME = \"build_resnet_fpn_backbone\"\n\n # ---------------------------------------------------------------------------- #\n # FPN options\n # ---------------------------------------------------------------------------- #\n # Names of the input feature maps to be used by FPN\n # They must have contiguous power of 2 strides\n # e.g., [\"res2\", \"res3\", \"res4\", \"res5\"]\n _C.MODEL.FPN.IN_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n\n # ---------------------------------------------------------------------------- #\n # Anchor generator options\n # ---------------------------------------------------------------------------- #\n # Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input.\n # Format: list[list[float]]. SIZES[i] specifies the list of sizes\n # to use for IN_FEATURES[i]; len(SIZES) == len(IN_FEATURES) must be true,\n # or len(SIZES) == 1 is true and size list SIZES[0] is used for all\n # IN_FEATURES.\n _C.MODEL.ANCHOR_GENERATOR.SIZES = [[32], [64], [128], [256], [512]]\n\n # ---------------------------------------------------------------------------- #\n # RPN options\n # ---------------------------------------------------------------------------- #\n # Names of the input feature maps to be used by RPN\n # e.g., [\"p2\", \"p3\", \"p4\", \"p5\", \"p6\"] for FPN\n _C.MODEL.RPN.IN_FEATURES = [\"p2\", \"p3\", \"p4\", \"p5\", \"p6\"]\n # Number of top scoring RPN proposals to keep before applying NMS\n # When FPN is used, this is *per FPN level* (not total)\n _C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 2000\n _C.MODEL.RPN.PRE_NMS_TOPK_TEST = 1000\n # Number of top scoring RPN proposals to keep after applying NMS\n # When FPN is used, this limit is applied per level and then again to the union\n # of proposals from all levels\n # NOTE: When FPN is used, the meaning of this config is different from Detectron1.\n # It means per-batch topk in Detectron1, but per-image topk here.\n # See the \"find_top_rpn_proposals\" function for details.\n _C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 1000\n _C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000\n\n # ---------------------------------------------------------------------------- #\n # ROI HEADS options\n # ---------------------------------------------------------------------------- #\n _C.MODEL.ROI_HEADS.NAME = \"StandardROIHeads\"\n # Number of foreground classes\n _C.MODEL.ROI_HEADS.NUM_CLASSES = 5\n # Names of the input feature maps to be used by ROI heads\n # Currently all heads (box, mask, ...) use the same input feature map list\n # e.g., [\"p2\", \"p3\", \"p4\", \"p5\"] is commonly used for FPN\n _C.MODEL.ROI_HEADS.IN_FEATURES = [\"p2\", \"p3\", \"p4\", \"p5\"]\n\n # ---------------------------------------------------------------------------- #\n # Box Head\n # ---------------------------------------------------------------------------- #\n # C4 don't use head name option\n # Options for non-C4 models: FastRCNNConvFCHead,\n _C.MODEL.ROI_BOX_HEAD.NAME = \"FastRCNNConvFCHead\"\n _C.MODEL.ROI_BOX_HEAD.NUM_FC = 2\n _C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14\n\n # ---------------------------------------------------------------------------- #\n # Mask Head\n # ---------------------------------------------------------------------------- #\n _C.MODEL.ROI_MASK_HEAD.NAME = \"MaskRCNNConvUpsampleHead\"\n _C.MODEL.ROI_MASK_HEAD.NUM_CONV = 4 # The number of convs in the mask head\n _C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 7\n\n # ---------------------------------------------------------------------------- #\n # ResNe[X]t options (ResNets = {ResNet, ResNeXt}\n # Note that parts of a resnet may be used for both the backbone and the head\n # These options apply to both\n # ---------------------------------------------------------------------------- #\n _C.MODEL.RESNETS.DEPTH = 101\n _C.MODEL.RESNETS.SIZES = [[32], [64], [128], [256], [512]]\n _C.MODEL.RESNETS.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]\n _C.MODEL.RESNETS.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"] # res4 for C4 backbone, res2..5 for FPN backbone\n\n # Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt\n _C.MODEL.RESNETS.NUM_GROUPS = 32\n\n # Baseline width of each group.\n # Scaling this parameters will scale the width of all bottleneck layers.\n _C.MODEL.RESNETS.WIDTH_PER_GROUP = 8\n\n # Place the stride 2 conv on the 1x1 filter\n # Use True only for the original MSRA ResNet; use False for C2 and Torch models\n _C.MODEL.RESNETS.STRIDE_IN_1X1 = False"
},
{
"identifier": "GOSE",
"path": "GOSEfinetune/modules/decoders/gose.py",
"snippet": "class GOSE(nn.Module):\n def __init__(self, args):\n super().__init__()\n #(rounds,num_heads)\n # self.rounds = 4\n self.args = args\n self.rounds = args.rounds+1\n self.norm = False\n if args.backbone_name == 'lilt':\n self.hidden_size = 960\n elif args.backbone_name == 'xlm':\n self.hidden_size = 768\n self.hidden_dropout_prob = 0.5\n #默认only-mean pooling\n self.pooling_mode = args.pooling_mode\n self.use_gam = args.use_gam\n self.loss_fct = CrossEntropyLoss()\n self.use_prefix = args.use_prefix\n #---对global-attention使用稀疏注意力\n self.use_global_mask = args.use_global_mask\n #--------\n self.use_gate = args.use_gate\n print(f\"**********************************Backbone: {args.backbone_name}****************************\")\n print(f\"**********************************Use_GAM: {self.use_gam}************************************\")\n print(f\"**********************************Use_Prefix: {self.use_prefix}********************************\")\n print(f\"**********************************Use_Gate: {self.use_gate}************************************\")\n # print(f\"**********************************Use_Global_Mask: {self.use_global_mask}**********************\")\n print(f\"**********************************Pooling_Mode: {self.pooling_mode}****************************\")\n print(f\"**********************************Iterative_Rounds: {self.rounds-1}****************************\")\n print(f\"**************************************************************\")\n print(f\"**********************************No_Iteration: {self.args.no_it}********************************\")\n print(f\"**********************************No_Global: {self.args.no_global}********************************\")\n print(f\"**********************************Window_size: {self.args.window_size}********************************\")\n # self.mode = 'only-mean'\n # self.mode = 'only-max'\n # self.mode = 'attn-max'\n\n\n \n self.dropout = nn.Dropout(self.hidden_dropout_prob)\n self.elu=nn.ELU()\n self.biaffine = BiaffineAttention(self.hidden_size//2 , 2)\n self.ffn = nn.Linear(2, self.hidden_size//2)\n self.ffn_key = nn.Linear(self.hidden_size, self.hidden_size//2)\n self.ffn_value = nn.Linear(self.hidden_size, self.hidden_size//2)\n\n # attention config\n self.dim = self.hidden_size //2\n self.num_heads = 1\n self.num_tokens = 8 # max_len = 8\n self.window_size = args.window_size # 8 # window_size * S = H \n self.qkv_bias = False\n self.drop = 0\n self.attn_drop = 0\n self.drop_path = 0\n self.max_len = args.max_len #64\n self.norm1 = nn.LayerNorm(self.dim)\n self.norm2 = nn.LayerNorm(self.dim)\n self.global_token_num = args.global_token_num\n print(f\"**********************************Global_token: {self.global_token_num}****************************\")\n self.global_token = nn.Parameter(torch.zeros(1, self.global_token_num, self.hidden_size //2))\n self.attn = Attention(self.dim,num_heads=self.num_heads, num_tokens=self.num_tokens, \n window_size=self.window_size,qkv_bias=self.qkv_bias, \n attn_drop=self.attn_drop, proj_drop=self.drop, args=args)\n\n self.cnt = 0\n self.loss_fcn = CrossEntropyLoss()\n self.normal = True\n self.dummy_vec = nn.Parameter(torch.Tensor(1, self.hidden_size//2))\n nn.init.normal_(self.dummy_vec)\n #----gate\n self.gru = GRU(self.hidden_size//2) \n #---layout-prefix-tuning\n self.axis_dis_fn = nn.Linear(1, self.hidden_size//12)\n self.axis_angle_fn = nn.Linear(1, self.hidden_size//12)\n \n def create_global_mask(self):\n global_mask = torch.zeros(self.global_token_num, self.max_len, self.max_len).cuda()\n step = self.num_tokens\n for idx in range(self.global_token_num):\n row_ids = idx // self.num_tokens\n column_ids = idx % self.num_tokens\n row_start = row_ids * step\n column_start = column_ids * step\n global_mask[idx, row_start:row_start+self.num_tokens,:] = 1\n global_mask[idx, :, column_start:column_start+self.num_tokens] = 1\n return global_mask\n \n def get_entities_kv_index_list(self, entities):\n\n M = self.max_len\n entities_label = entities['label']\n\n entities_key_index = [index for index,label in enumerate(entities_label) if label == 1 ]\n entities_value_index = [index for index,label in enumerate(entities_label) if label == 2 ] \n key_num, value_num = len(entities_key_index),len(entities_value_index)\n '''\n in re.py\n if len(all_possible_relations) == 0:\n all_possible_relations = set([(0, 1)])\n '''\n if key_num * value_num == 0:\n #print(\"all_possible_relations == 0\")\n entities_key_index = [0]\n entities_value_index = [1]\n if key_num > M :\n entities_key_index = entities_key_index[:M]\n self.normal = False\n if value_num > M :\n entities_value_index = entities_value_index[:M]\n self.normal = False\n\n return entities_key_index, entities_value_index\n\n \n def forward(self, hidden_state, entities,relations, bbox):\n #if self.cnt == 30: set the num + 1 which failed\n # from IPython import embed;embed()\n self.cnt += 1\n B ,_ ,H = hidden_state.shape\n M = self.max_len\n device = hidden_state.device\n\n loss = 0\n all_pred_relations = []\n\n # B len(entities)\n # entities_label = torch.stack([torch.tensor(dict['label']) for dict in entities],dim=0)\n # padding to max_len M 64\n \n key_repr_list = []\n value_repr_list = []\n key_mask_list = []\n value_mask_list = []\n key_bbox_list, value_bbox_list = [], []\n for b in range(B):\n #key_repr ~ N,H -> 64,H/2\n #value_repr ~ M,H -> 64,H/2\n if len(entities[b][\"start\"]) <= 2:\n entities[b] = {\"end\": [1, 1], \"label\": [0, 0], \"start\": [0, 0]}\n \n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n entities_first_token_index = torch.tensor(entities[b]['start'])\n \n entities_key_first_token_index = entities_first_token_index[entities_key_index]\n entities_value_first_token_index = entities_first_token_index[entities_value_index]\n key_repr = hidden_state[b][entities_key_first_token_index,:]\n value_repr = hidden_state[b][entities_value_first_token_index,:]\n \n key_num,value_num = key_repr.shape[0],value_repr.shape[0]\n # padding key_repr key_num,H -> max_len,H\n # generate mask shape like max_len,H\n \n key_mask_list.append(torch.tensor([[1.]] * key_num + [[0.]] * (M - key_num),device=device).repeat(1,H//2))\n value_mask_list.append(torch.tensor([[1.]] * value_num + [[0.]] * (M - value_num),device=device).repeat(1,H//2))\n # padding key_repr key_num,H -> max_len,H\n key_repr_list.append(F.pad(key_repr,(0, 0, 0, M - key_num)))\n value_repr_list.append(F.pad(value_repr,(0, 0, 0, M - value_num)))\n #----得到kv实体的bbox\n key_bbox = bbox[b][entities_key_first_token_index]\n value_bbox = bbox[b][entities_value_first_token_index]\n key_bbox_list.append(F.pad(key_bbox,(0, 0, 0, M - key_num)))\n value_bbox_list.append(F.pad(value_bbox,(0, 0, 0, M - value_num)))\n\n # batch max_len hidden_size\n key_repr = torch.stack(key_repr_list,dim=0) \n key_mask = torch.stack(key_mask_list,dim=0)\n \n value_repr = torch.stack(value_repr_list,dim=0)\n value_mask = torch.stack(value_mask_list,dim=0)\n \n\n #key_mask * value_mask -> table_mask B,M,H * B,M,H -> B M M H\n table_mask = key_mask.unsqueeze(2).repeat(1,1,M,1)\\\n *value_mask.unsqueeze(1).repeat(1,M,1,1)\n #---global_mask\n if self.use_global_mask:\n self.global_mask = self.create_global_mask()\n global_mask = self.global_mask.unsqueeze(0).repeat(B,1,1,1) #shape[bsz,global_token_num,M,M]\n # global_mask = global_mask.view(B, self.global_token_num, -1)\n else:\n global_mask = None\n \n \n key_mask = key_mask[:,:,0].bool()\n value_mask = value_mask[:,:,0].bool()\n key_ffn = self.ffn_key(key_repr)\n value_ffn = self.ffn_value(value_repr)\n \n if self.norm == True:\n key_ffn = self.norm1(key_repr)\n value_ffn = self.norm1(value_repr)\n global_token = self.global_token.expand(B, -1, -1)\n key_bbox = torch.stack(key_bbox_list, dim=0) \n value_bbox = torch.stack(value_bbox_list, dim=0) \n layout_repr = self.calc_layout(key_bbox, value_bbox)\n layout_repr = layout_repr * table_mask\n layout_repr = layout_repr.view(B,M*M,H//2)\n for i in range(self.rounds):\n '''\n method 1 with biaffine \n \n table_mask.shape B M M H/2 -> B M M H (M=64)\n table_logits.shape B M M H/2 -> B M M 2\n B M M 2 -> B M M H\n attention input B (64+1)*64 384\n table input 64 * 64 \n window_size 8\n token_num 64/8 * 64/8 = 64\n '''\n #key_ffn = self.ffn_key(key_repr)\n #value_ffn = self.ffn_value(value_repr)\n #key_ffn = self.ffn_key(key_ffn)\n #value_ffn = self.ffn_value(value_ffn)\n \n table_logits = self.biaffine(key_ffn.unsqueeze(2).repeat(1,1,M,1),\n value_ffn.unsqueeze(1).repeat(1,M,1,1))\n if i < self.rounds-1:\n table_logits = self.ffn(table_logits) * table_mask\n \n if self.use_gam:\n table_logits = table_logits.view(B,M*M,H//2)\n \n table_logits = torch.cat((global_token, table_logits), dim=1)\n if self.use_prefix:\n table_logits = self.attn(table_logits, M, M, table_mask, global_mask, layout_prefix=layout_repr, key_num=key_num, value_num=value_num)\n else:\n table_logits = self.attn(table_logits, M, M, table_mask, global_mask, layout_prefix=None)\n global_token_new = table_logits[:,:self.global_token_num,:]\n global_token = global_token + global_token_new\n table_logits = table_logits[:,self.global_token_num:,:]\n table_logits = table_logits.view(B,M,M,H//2)\n table_logits = table_logits * table_mask\n key_new, value_new = self.get_new_repr(table_logits, key_mask, value_mask)\n if self.norm == True:\n key_new = self.norm2(key_new)\n value_new = self.norm2(value_new)\n if self.use_gate:\n key_ffn = self.gru(key_ffn,key_new)\n value_ffn = self.gru(value_ffn,value_new)\n \n elif self.args.no_it:\n key_ffn = key_new\n value_ffn = value_new\n elif self.args.use_add:\n key_ffn = key_ffn + key_new\n value_ffn = value_ffn + value_new \n else:\n table_logits = table_logits * table_mask[:,:,:,:2]\n\n # table_logits M N 2\n # table_logits.unsqueeze(0)\n # batch_table_logits = table_logits if batch_table_logits == None else torch.cat((batch_table_logits,table_logits),dim=0)\n\n loss = self.get_loss(table_logits,entities,relations,key_mask,value_mask)\n all_pred_relations = self.get_predicted_relations(table_logits,entities,key_mask,value_mask, bbox)\n return loss,all_pred_relations\n \n def calc_layout(self, head_bbox, tail_bbox):\n bsz, num, _ = head_bbox.shape\n head_bbox = head_bbox.unsqueeze(2).repeat(1,1,num,1)\n tail_bbox = tail_bbox.unsqueeze(1).repeat(1,num,1,1)\n \n #-----中心点坐标特征\n head_bbox_center = torch.div(torch.cat(((head_bbox[:,:,:,0]+head_bbox[:,:,:,2]).view(-1,1), (head_bbox[:,:,:,1]+head_bbox[:,:,:,3]).view(-1,1)),dim=1), 2)\n tail_bbox_center = torch.div(torch.cat(((tail_bbox[:,:,:,0]+tail_bbox[:,:,:,2]).view(-1,1), (tail_bbox[:,:,:,1]+tail_bbox[:,:,:,3]).view(-1,1)),dim=1), 2)\n head_tail_center_dis, hea_tail_center_angle = self.axis_features(head_bbox_center, tail_bbox_center)\n head_tail_center_dis_feature = self.axis_dis_fn(head_tail_center_dis)\n head_tail_center_angle_feature = self.axis_angle_fn(hea_tail_center_angle)\n #-----左上点坐标特征\n head_bbox_left_top = torch.cat((head_bbox[:,:,:, 0].view(-1,1), head_bbox[:,:,:, 1].view(-1,1)), dim=1)\n tail_bbox_left_top = torch.cat((tail_bbox[:,:,:, 0].view(-1,1), tail_bbox[:,:,:, 1].view(-1,1)), dim=1)\n head_tail_lt_dis, hea_tail_lt_angle = self.axis_features(head_bbox_left_top, tail_bbox_left_top)\n head_tail_lt_dis_feature = self.axis_dis_fn(head_tail_lt_dis)\n hea_tail_lt_angle_feature = self.axis_angle_fn(hea_tail_lt_angle)\n #-----右下点坐标特征\n head_bbox_right_down = torch.cat((head_bbox[:,:,:, 2].view(-1,1), head_bbox[:,:,:, 3].view(-1,1)), dim=1)\n tail_bbox_right_down = torch.cat((tail_bbox[:,:,:, 2].view(-1,1), tail_bbox[:,:,:, 3].view(-1,1)), dim=1)\n head_tail_rd_dis, hea_tail_rd_angle = self.axis_features(head_bbox_right_down, tail_bbox_right_down)\n head_tail_rd_dis_feature = self.axis_dis_fn(head_tail_rd_dis)\n hea_tail_rd_angle_feature = self.axis_angle_fn(hea_tail_rd_angle)\n layout_repr = torch.cat(\n (head_tail_center_dis_feature, head_tail_center_angle_feature\n , head_tail_lt_dis_feature, hea_tail_lt_angle_feature\n , head_tail_rd_dis_feature, hea_tail_rd_angle_feature\n ),\n dim=-1\n )\n layout_repr = layout_repr.view(bsz, num, num, -1) \n return layout_repr\n \n \n \n def axis_features(self, tmp_bbox_1, tmp_bbox_2):\n tmp_bbox_distance = torch.pow(torch.sum(torch.pow(tmp_bbox_1 - tmp_bbox_2, 2), dim=1), 0.5) #欧氏距离\n tmp_bbox_distance = tmp_bbox_distance.view(-1, 1)\n ##########计算角度\n head_tail_x = tmp_bbox_1[:, 0] - tmp_bbox_2[:, 0]\n head_tail_y = tmp_bbox_1[:, 1] - tmp_bbox_2[:, 1]\n tmp_bbox_angle = torch.div(torch.atan2(head_tail_y, head_tail_x), 3.1416) #正切的角度\n tmp_bbox_angle = tmp_bbox_angle.view(-1, 1)\n return torch.div(tmp_bbox_distance, 1000), tmp_bbox_angle\n\n \n \n \n def get_new_repr(self, table_logits, key_mask, value_mask):\n key_repr_list = []\n value_repr_list = []\n bs,_,_,_ = table_logits.shape\n for b in range(bs):\n logit = table_logits[b][key_mask[b]]\n logit = logit[:,value_mask[b]]\n key_num, value_num, _ = logit.shape\n if self.pooling_mode == 'max':\n key_repr = logit.max(dim=1).values \n value_repr = logit.max(dim=0).values \n else:\n key_repr = logit.mean(dim=1)\n value_repr = logit.mean(dim=0)\n key_repr_list.append(F.pad(key_repr,(0, 0, 0, self.max_len - key_num)))\n value_repr_list.append(F.pad(value_repr,(0, 0, 0, self.max_len - value_num)))\n key_new = torch.stack(key_repr_list,dim=0) \n value_new = torch.stack(value_repr_list,dim=0)\n return key_new, value_new\n \n def get_predicted_relations(self, logists,entities,key_mask,value_mask,bbox):\n all_pred_relations = []\n #logits.shape B,M,N,2\n #here is one batch so no dim B\n B,N,M,_=logists.shape\n for b in range(B):\n\n pred_relations = []\n logist = logists[b][key_mask[b]]\n logist = logist[:,value_mask[b]]\n N,M,_ = logist.shape\n \n #---index指的是序列中的第几个实体\n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n \n # if len(entities_key_index) > 64 or len(entities_value_index) > 64:\n # from IPython import embed;embed();exit()\n \n for index in range(M*N):\n key = index // M\n value = index % M\n pred_label = logist[key][value].argmax(-1)\n\n if pred_label == 0:\n continue\n \n rel = {}\n rel[\"head_id\"] = entities_key_index[key]\n rel[\"head\"] = (entities[b][\"start\"][rel[\"head_id\"]], entities[b][\"end\"][rel[\"head_id\"]])\n rel[\"head_type\"] = entities[b][\"label\"][rel[\"head_id\"]]\n\n rel[\"tail_id\"] = entities_value_index[value]\n rel[\"tail\"] = (entities[b][\"start\"][rel[\"tail_id\"]], entities[b][\"end\"][rel[\"tail_id\"]])\n rel[\"tail_type\"] = entities[b][\"label\"][rel[\"tail_id\"]]\n rel[\"type\"] = 1\n key_bbox_left_top = bbox[b][entities[b][\"start\"][rel[\"head_id\"]]].tolist()[:2]\n value_bbox_left_top = bbox[b][entities[b][\"start\"][rel[\"tail_id\"]]].tolist()[:2]\n rel[\"link\"] = (tuple(key_bbox_left_top), tuple(value_bbox_left_top))\n #--------\n pred_relations.append(rel)\n all_pred_relations.append(pred_relations)\n \n return all_pred_relations\n \n \n def get_loss(self,logists,entities,relations,key_mask,value_mask):\n #mask B M M H\n device = logists.device\n loss = 0\n B = key_mask.shape[0]\n all_logits = []\n all_labels = []\n for b in range(B):\n # 64,64 -> N,M\n logist = logists[b][key_mask[b]]\n logist = logist[:,value_mask[b]]\n N,M,_ = logist.shape\n\n\n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n \n entities_key_list = relations[b]['head']\n entities_value_list = relations[b]['tail']\n\n labels = torch.zeros(N*M).to(device).view(N,M)\n \n for i in range(len(entities_key_list)):\n try:\n key = entities_key_index.index(entities_key_list[i])\n value = entities_value_index.index(entities_value_list[i])\n labels[key][value] = 1\n except:\n continue\n \n \n labels = labels.view(-1).to(dtype=torch.long)\n logist = logist.view(N*M,-1).to(dtype=torch.float)\n all_logits.append(logist)\n all_labels.append(labels)\n all_logits = torch.cat(all_logits, 0)\n all_labels = torch.cat(all_labels, 0)\n loss = self.loss_fcn(all_logits+1e-10, all_labels)\n if (torch.isnan(loss).sum().item() > 0):\n loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)\n \n return loss"
}
] | import math
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import detectron2
from torch import nn
from torch.nn import CrossEntropyLoss
from detectron2.modeling import META_ARCH_REGISTRY
from transformers import PreTrainedModel
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
TokenClassifierOutput,
)
from transformers.modeling_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMIntermediate as LayoutLMv2Intermediate
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMOutput as LayoutLMv2Output
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMPooler as LayoutLMv2Pooler
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMSelfOutput as LayoutLMv2SelfOutput
from transformers.utils import logging
from ...utils import ReOutput
from .configuration_layoutlmv2 import LayoutLMv2Config
from .detectron2_config import add_layoutlmv2_config
from ...modules.decoders.gose import GOSE | 9,920 | last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class LayoutLMv2ForTokenClassification(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
def forward(
self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
seq_length = input_ids.size(1)
sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class LayoutLMv2ForRelationExtraction(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.decoder = config.decoder_name
self.decoder = config.decoder_name
if self.decoder == 're':
self.extractor = REDecoder(config, config.hidden_size)
elif self.decoder == 'gose':
self.extractor = GOSE(config)
self.init_weights()
def forward(
self,
input_ids,
bbox,
labels=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
entities=None,
relations=None,
):
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
seq_length = input_ids.size(1)
sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
sequence_output = self.dropout(sequence_output)
loss, pred_relations = self.extractor(sequence_output, entities, relations, bbox)
| # coding=utf-8
logger = logging.get_logger(__name__)
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"layoutlmv2-base-uncased",
"layoutlmv2-large-uncased",
]
LayoutLMv2LayerNorm = torch.nn.LayerNorm
class LayoutLMv2Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super(LayoutLMv2Embeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = LayoutLMv2LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def _cal_spatial_position_embeddings(self, bbox):
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
spatial_position_embeddings = torch.cat(
[
left_position_embeddings,
upper_position_embeddings,
right_position_embeddings,
lower_position_embeddings,
h_position_embeddings,
w_position_embeddings,
],
dim=-1,
)
return spatial_position_embeddings
class LayoutLMv2SelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.fast_qkv = config.fast_qkv
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if config.fast_qkv:
self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False)
self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
else:
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def compute_qkv(self, hidden_states):
if self.fast_qkv:
qkv = self.qkv_linear(hidden_states)
q, k, v = torch.chunk(qkv, 3, dim=-1)
if q.ndimension() == self.q_bias.ndimension():
q = q + self.q_bias
v = v + self.v_bias
else:
_sz = (1,) * (q.ndimension() - 1) + (-1,)
q = q + self.q_bias.view(*_sz)
v = v + self.v_bias.view(*_sz)
else:
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
return q, k, v
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
q, k, v = self.compute_qkv(hidden_states)
# (B, L, H*D) -> (B, H, L, D)
query_layer = self.transpose_for_scores(q)
key_layer = self.transpose_for_scores(k)
value_layer = self.transpose_for_scores(v)
query_layer = query_layer / math.sqrt(self.attention_head_size)
# [BSZ, NAT, L, L]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.has_relative_attention_bias:
attention_scores += rel_pos
if self.has_spatial_attention_bias:
attention_scores += rel_2d_pos
attention_scores = attention_scores.float().masked_fill_(attention_mask.to(torch.bool), float("-inf"))
attention_probs = F.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class LayoutLMv2Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LayoutLMv2SelfAttention(config)
self.output = LayoutLMv2SelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class LayoutLMv2Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LayoutLMv2Attention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = LayoutLMv2Attention(config)
self.intermediate = LayoutLMv2Intermediate(config)
self.output = LayoutLMv2Output(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
ret = 0
if bidirectional:
num_buckets //= 2
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class LayoutLMv2Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LayoutLMv2Layer(config) for _ in range(config.num_hidden_layers)])
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if self.has_relative_attention_bias:
self.rel_pos_bins = config.rel_pos_bins
self.max_rel_pos = config.max_rel_pos
self.rel_pos_onehot_size = config.rel_pos_bins
self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config.num_attention_heads, bias=False)
if self.has_spatial_attention_bias:
self.max_rel_2d_pos = config.max_rel_2d_pos
self.rel_2d_pos_bins = config.rel_2d_pos_bins
self.rel_2d_pos_onehot_size = config.rel_2d_pos_bins
self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
def _cal_1d_pos_emb(self, hidden_states, position_ids):
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat,
num_buckets=self.rel_pos_bins,
max_distance=self.max_rel_pos,
)
rel_pos = F.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
rel_pos = rel_pos.contiguous()
return rel_pos
def _cal_2d_pos_emb(self, hidden_states, bbox):
position_coord_x = bbox[:, :, 0]
position_coord_y = bbox[:, :, 3]
rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
rel_pos_x = relative_position_bucket(
rel_pos_x_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_y = relative_position_bucket(
rel_pos_y_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2)
rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2)
rel_pos_x = rel_pos_x.contiguous()
rel_pos_y = rel_pos_y.contiguous()
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
bbox=None,
position_ids=None,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class LayoutLMv2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LayoutLMv2Config
pretrained_model_archive_map = LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST
base_model_prefix = "layoutlmv2"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, LayoutLMv2LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def my_convert_sync_batchnorm(module, process_group=None):
# same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d`
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
module_output = module
if isinstance(module, detectron2.layers.FrozenBatchNorm2d):
module_output = torch.nn.SyncBatchNorm(
num_features=module.num_features,
eps=module.eps,
affine=True,
track_running_stats=True,
process_group=process_group,
)
module_output.weight = torch.nn.Parameter(module.weight)
module_output.bias = torch.nn.Parameter(module.bias)
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device)
for name, child in module.named_children():
module_output.add_module(name, my_convert_sync_batchnorm(child, process_group))
del module
return module_output
class VisualBackbone(nn.Module):
def __init__(self, config):
super().__init__()
self.cfg = detectron2.config.get_cfg()
add_layoutlmv2_config(self.cfg)
meta_arch = self.cfg.MODEL.META_ARCHITECTURE
model = META_ARCH_REGISTRY.get(meta_arch)(self.cfg)
assert isinstance(model.backbone, detectron2.modeling.backbone.FPN)
self.backbone = model.backbone
if (
config.convert_sync_batchnorm
and torch.distributed.is_available()
and torch.distributed.is_initialized()
and torch.distributed.get_rank() > -1
):
self_rank = torch.distributed.get_rank()
node_size = torch.cuda.device_count()
world_size = torch.distributed.get_world_size()
assert world_size % node_size == 0
node_global_ranks = [
list(range(i * node_size, (i + 1) * node_size)) for i in range(world_size // node_size)
]
sync_bn_groups = [
torch.distributed.new_group(ranks=node_global_ranks[i]) for i in range(world_size // node_size)
]
node_rank = self_rank // node_size
assert self_rank in node_global_ranks[node_rank]
self.backbone = my_convert_sync_batchnorm(self.backbone, process_group=sync_bn_groups[node_rank])
assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD)
num_channels = len(self.cfg.MODEL.PIXEL_MEAN)
self.register_buffer(
"pixel_mean",
torch.Tensor(self.cfg.MODEL.PIXEL_MEAN).view(num_channels, 1, 1),
)
self.register_buffer("pixel_std", torch.Tensor(self.cfg.MODEL.PIXEL_STD).view(num_channels, 1, 1))
self.out_feature_key = "p2"
if torch.is_deterministic():
logger.warning("using `AvgPool2d` instead of `AdaptiveAvgPool2d`")
input_shape = (224, 224)
backbone_stride = self.backbone.output_shape()[self.out_feature_key].stride
self.pool = nn.AvgPool2d(
(
math.ceil(math.ceil(input_shape[0] / backbone_stride) / config.image_feature_pool_shape[0]),
math.ceil(math.ceil(input_shape[1] / backbone_stride) / config.image_feature_pool_shape[1]),
)
)
else:
self.pool = nn.AdaptiveAvgPool2d(config.image_feature_pool_shape[:2])
if len(config.image_feature_pool_shape) == 2:
config.image_feature_pool_shape.append(self.backbone.output_shape()[self.out_feature_key].channels)
assert self.backbone.output_shape()[self.out_feature_key].channels == config.image_feature_pool_shape[2]
def forward(self, images):
images_input = ((images if torch.is_tensor(images) else images.tensor) - self.pixel_mean) / self.pixel_std
features = self.backbone(images_input)
features = features[self.out_feature_key]
features = self.pool(features).flatten(start_dim=2).transpose(1, 2).contiguous()
return features
class LayoutLMv2Model(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super(LayoutLMv2Model, self).__init__(config)
self.config = config
self.has_visual_segment_embedding = config.has_visual_segment_embedding
self.embeddings = LayoutLMv2Embeddings(config)
self.visual = VisualBackbone(config)
self.visual_proj = nn.Linear(config.image_feature_pool_shape[-1], config.hidden_size)
if self.has_visual_segment_embedding:
self.visual_segment_embedding = nn.Parameter(nn.Embedding(1, config.hidden_size).weight[0])
self.visual_LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.visual_dropout = nn.Dropout(config.hidden_dropout_prob)
self.encoder = LayoutLMv2Encoder(config)
self.pooler = LayoutLMv2Pooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def _calc_text_embeddings(self, input_ids, bbox, position_ids, token_type_ids):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.embeddings.word_embeddings(input_ids)
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._cal_spatial_position_embeddings(bbox)
token_type_embeddings = self.embeddings.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + spatial_position_embeddings + token_type_embeddings
embeddings = self.embeddings.LayerNorm(embeddings)
embeddings = self.embeddings.dropout(embeddings)
return embeddings
def _calc_img_embeddings(self, image, bbox, position_ids):
visual_embeddings = self.visual_proj(self.visual(image))
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._cal_spatial_position_embeddings(bbox)
embeddings = visual_embeddings + position_embeddings + spatial_position_embeddings
if self.has_visual_segment_embedding:
embeddings += self.visual_segment_embedding
embeddings = self.visual_LayerNorm(embeddings)
embeddings = self.visual_dropout(embeddings)
return embeddings
def forward(
self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
visual_shape = list(input_shape)
visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]
visual_shape = torch.Size(visual_shape)
final_shape = list(input_shape)
final_shape[1] += visual_shape[1]
final_shape = torch.Size(final_shape)
visual_bbox_x = (
torch.arange(
0,
1000 * (self.config.image_feature_pool_shape[1] + 1),
1000,
device=device,
dtype=bbox.dtype,
)
// self.config.image_feature_pool_shape[1]
)
visual_bbox_y = (
torch.arange(
0,
1000 * (self.config.image_feature_pool_shape[0] + 1),
1000,
device=device,
dtype=bbox.dtype,
)
// self.config.image_feature_pool_shape[0]
)
visual_bbox = torch.stack(
[
visual_bbox_x[:-1].repeat(self.config.image_feature_pool_shape[0], 1),
visual_bbox_y[:-1].repeat(self.config.image_feature_pool_shape[1], 1).transpose(0, 1),
visual_bbox_x[1:].repeat(self.config.image_feature_pool_shape[0], 1),
visual_bbox_y[1:].repeat(self.config.image_feature_pool_shape[1], 1).transpose(0, 1),
],
dim=-1,
).view(-1, bbox.size(-1))
visual_bbox = visual_bbox.repeat(final_shape[0], 1, 1)
final_bbox = torch.cat([bbox, visual_bbox], dim=1)
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
visual_attention_mask = torch.ones(visual_shape, device=device)
final_attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if position_ids is None:
seq_length = input_shape[1]
position_ids = self.embeddings.position_ids[:, :seq_length]
position_ids = position_ids.expand_as(input_ids)
visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(
input_shape[0], 1
)
final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
text_layout_emb = self._calc_text_embeddings(
input_ids=input_ids,
bbox=bbox,
token_type_ids=token_type_ids,
position_ids=position_ids,
)
visual_emb = self._calc_img_embeddings(
image=image,
bbox=visual_bbox,
position_ids=visual_position_ids,
)
final_emb = torch.cat([text_layout_emb, visual_emb], dim=1)
extended_attention_mask = final_attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
final_emb,
extended_attention_mask,
bbox=final_bbox,
position_ids=final_position_ids,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class LayoutLMv2ForTokenClassification(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
def forward(
self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
seq_length = input_ids.size(1)
sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class LayoutLMv2ForRelationExtraction(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.decoder = config.decoder_name
self.decoder = config.decoder_name
if self.decoder == 're':
self.extractor = REDecoder(config, config.hidden_size)
elif self.decoder == 'gose':
self.extractor = GOSE(config)
self.init_weights()
def forward(
self,
input_ids,
bbox,
labels=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
entities=None,
relations=None,
):
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
seq_length = input_ids.size(1)
sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
sequence_output = self.dropout(sequence_output)
loss, pred_relations = self.extractor(sequence_output, entities, relations, bbox)
| return ReOutput( | 0 | 2023-10-19 14:36:32+00:00 | 12k |
mklissa/dceo | dopamine/discrete_domains/run_experiment.py | [
{
"identifier": "dqn_agent",
"path": "dopamine/agents/dqn/dqn_agent.py",
"snippet": "NATURE_DQN_OBSERVATION_SHAPE = atari_lib.NATURE_DQN_OBSERVATION_SHAPE\nNATURE_DQN_DTYPE = atari_lib.NATURE_DQN_DTYPE\nNATURE_DQN_STACK_SIZE = atari_lib.NATURE_DQN_STACK_SIZE\ndef linearly_decaying_epsilon(decay_period, step, warmup_steps, epsilon):\ndef identity_epsilon(unused_decay_period, unused_step, unused_warmup_steps,\n epsilon):\n def __init__(self,\n sess,\n num_actions,\n observation_shape=atari_lib.NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=atari_lib.NATURE_DQN_DTYPE,\n stack_size=atari_lib.NATURE_DQN_STACK_SIZE,\n network=atari_lib.NatureDQNNetwork,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n tf_device='/cpu:*',\n eval_mode=False,\n use_staging=False,\n max_tf_checkpoints_to_keep=4,\n optimizer=tf.compat.v1.train.RMSPropOptimizer(\n learning_rate=0.00025,\n decay=0.95,\n momentum=0.0,\n epsilon=0.00001,\n centered=True),\n summary_writer=None,\n summary_writing_frequency=500,\n allow_partial_reload=False):\n def _create_network(self, name):\n def _build_networks(self):\n def _build_replay_buffer(self, use_staging):\n def _build_target_q_op(self):\n def _build_train_op(self):\n def _build_sync_op(self):\n def begin_episode(self, observation):\n def step(self, reward, observation):\n def end_episode(self, reward):\n def _select_action(self):\n def _train_step(self):\n def _record_observation(self, observation):\n def _store_transition(self, last_observation, action, reward, is_terminal):\n def _reset_state(self):\n def bundle_and_checkpoint(self, checkpoint_dir, iteration_number):\n def unbundle(self, checkpoint_dir, iteration_number, bundle_dictionary):\nclass DQNAgent(object):"
},
{
"identifier": "implicit_quantile_agent",
"path": "dopamine/agents/implicit_quantile/implicit_quantile_agent.py",
"snippet": "class ImplicitQuantileAgent(rainbow_agent.RainbowAgent):\n def __init__(self,\n sess,\n num_actions,\n network=atari_lib.ImplicitQuantileNetwork,\n kappa=1.0,\n num_tau_samples=32,\n num_tau_prime_samples=32,\n num_quantile_samples=32,\n quantile_embedding_dim=64,\n double_dqn=False,\n summary_writer=None,\n summary_writing_frequency=500):\n def _create_network(self, name):\n def _build_networks(self):\n def _build_target_quantile_values_op(self):\n def _build_train_op(self):"
},
{
"identifier": "rainbow_agent",
"path": "dopamine/agents/rainbow/rainbow_agent.py",
"snippet": "class RainbowAgent(dqn_agent.DQNAgent):\n def __init__(self,\n sess,\n num_actions,\n observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=dqn_agent.NATURE_DQN_DTYPE,\n stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,\n network=atari_lib.RainbowNetwork,\n num_atoms=51,\n vmin=None,\n vmax=10.,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n replay_scheme='prioritized',\n tf_device='/cpu:*',\n use_staging=False,\n optimizer=tf.compat.v1.train.AdamOptimizer(\n learning_rate=0.00025, epsilon=0.0003125),\n summary_writer=None,\n summary_writing_frequency=500):\n def _create_network(self, name):\n def _build_replay_buffer(self, use_staging):\n def _build_target_distribution(self):\n def _build_train_op(self):\n def _store_transition(self,\n last_observation,\n action,\n reward,\n is_terminal,\n priority=None):\ndef project_distribution(supports, weights, target_support,\n validate_args=False):"
},
{
"identifier": "atari_lib",
"path": "dopamine/discrete_domains/atari_lib.py",
"snippet": "NATURE_DQN_OBSERVATION_SHAPE = (84, 84) # Size of downscaled Atari 2600 frame.\nNATURE_DQN_DTYPE = tf.uint8 # DType of Atari 2600 observations.\nNATURE_DQN_STACK_SIZE = 4 # Number of frames in the state stack.\ndef create_atari_environment(game_name=None, sticky_actions=True):\ndef maybe_transform_variable_names(variables, legacy_checkpoint_load=False):\n def __init__(self, num_actions, name=None):\n def call(self, state):\n def __init__(self, num_actions, num_atoms, support, name=None):\n def kernel_initializer():\n def call(self, state):\n def __init__(self, num_actions, quantile_embedding_dim, name=None):\n def kernel_initializer():\n def call(self, state, num_quantiles):\n def __init__(self, environment, frame_skip=4, terminal_on_life_loss=False,\n screen_size=84):\n def observation_space(self):\n def action_space(self):\n def reward_range(self):\n def metadata(self):\n def close(self):\n def reset(self):\n def render(self, mode):\n def step(self, action):\n def _fetch_grayscale_observation(self, output):\n def _pool_and_resize(self):\nclass NatureDQNNetwork(tf.keras.Model):\nclass RainbowNetwork(tf.keras.Model):\nclass ImplicitQuantileNetwork(tf.keras.Model):\nclass AtariPreprocessing(object):"
},
{
"identifier": "checkpointer",
"path": "dopamine/discrete_domains/checkpointer.py",
"snippet": "def get_latest_checkpoint_number(base_directory,\n override_number=None,\n sentinel_file_identifier='checkpoint'):\n def extract_iteration(x):\n def __init__(self, base_directory, checkpoint_file_prefix='ckpt',\n sentinel_file_identifier='checkpoint', checkpoint_frequency=1,\n checkpoint_duration=4,\n keep_every=None):\n def _generate_filename(self, file_prefix, iteration_number):\n def _save_data_to_file(self, data, filename):\n def save_checkpoint(self, iteration_number, data):\n def _clean_up_old_checkpoints(self, iteration_number):\n def _load_data_from_file(self, filename):\n def load_checkpoint(self, iteration_number):\nclass Checkpointer(object):"
},
{
"identifier": "iteration_statistics",
"path": "dopamine/discrete_domains/iteration_statistics.py",
"snippet": "class IterationStatistics(object):\n def __init__(self):\n def append(self, data_pairs):"
},
{
"identifier": "logger",
"path": "dopamine/discrete_domains/logger.py",
"snippet": "class Logger(object):\n def __init__(self, logging_dir, logs_duration=4):\n def __setitem__(self, key, value):\n def _generate_filename(self, filename_prefix, iteration_number):\n def log_to_file(self, filename_prefix, iteration_number):\n def is_logging_enabled(self):"
},
{
"identifier": "dqn_agent",
"path": "dopamine/jax/agents/dqn/dqn_agent.py",
"snippet": "NATURE_DQN_OBSERVATION_SHAPE = dqn_agent.NATURE_DQN_OBSERVATION_SHAPE\nNATURE_DQN_DTYPE = jnp.uint8\nNATURE_DQN_STACK_SIZE = dqn_agent.NATURE_DQN_STACK_SIZE\ndef create_optimizer(name='adam', learning_rate=6.25e-5, beta1=0.9, beta2=0.999,\n eps=1.5e-4, centered=False):\ndef train(network_def, online_params, target_params, optimizer, optimizer_state,\n states, actions, next_states, rewards, terminals, cumulative_gamma,\n loss_type='mse'):\n def loss_fn(params, target):\n def q_online(state):\n def q_target(state):\ndef target_q(target_network, next_states, rewards, terminals, cumulative_gamma):\ndef linearly_decaying_epsilon(decay_period, step, warmup_steps, epsilon):\ndef select_action(network_def, params, state, rng, num_actions, eval_mode,\n epsilon_eval, epsilon_train, epsilon_decay_period,\n training_steps, min_replay_history, epsilon_fn):\n def __init__(self,\n num_actions,\n observation_shape=NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=NATURE_DQN_DTYPE,\n stack_size=NATURE_DQN_STACK_SIZE,\n network=networks.NatureDQNNetwork,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n eval_mode=False,\n optimizer='adam',\n summary_writer=None,\n summary_writing_frequency=500,\n allow_partial_reload=False,\n seed=None,\n loss_type='mse',\n preprocess_fn=None,\n collector_allowlist=('tensorboard',)):\n def _build_networks_and_optimizer(self):\n def _build_replay_buffer(self):\n def _sample_from_replay_buffer(self):\n def _sync_weights(self):\n def _reset_state(self):\n def _record_observation(self, observation):\n def begin_episode(self, observation):\n def step(self, reward, observation):\n def end_episode(self, reward, terminal=True):\n def _train_step(self):\n def _store_transition(self,\n last_observation,\n action,\n reward,\n is_terminal,\n *args,\n priority=None,\n episode_end=False):\n def bundle_and_checkpoint(self, checkpoint_dir, iteration_number):\n def unbundle(self, checkpoint_dir, iteration_number, bundle_dictionary):\n def set_collector_dispatcher(self, collector_dispatcher):\nclass JaxDQNAgent(object):"
},
{
"identifier": "full_rainbow_agent",
"path": "dopamine/jax/agents/full_rainbow/full_rainbow_agent.py",
"snippet": "def zero_epsilon(unused_decay_period, unused_step, unused_warmup_steps,\n unused_epsilon):\ndef select_action(network_def, params, state, rng, num_actions, eval_mode,\n epsilon_eval, epsilon_train, epsilon_decay_period,\n training_steps, min_replay_history, epsilon_fn, support):\ndef get_logits(model, states, rng):\ndef get_q_values(model, states, rng):\ndef train(network_def, online_params, target_params, optimizer, optimizer_state,\n states, actions, next_states, rewards, terminals, loss_weights,\n support, cumulative_gamma, double_dqn, distributional, mse_loss, rng):\n def q_online(state, key):\n def q_target(state, key):\n def loss_fn(params, target, loss_multipliers):\n def q_online(state, key):\ndef target_output(model, target_network, next_states, rewards, terminals,\n support, cumulative_gamma, double_dqn, distributional, rng):\n def __init__(self,\n num_actions,\n noisy=True,\n dueling=True,\n double_dqn=True,\n distributional=True,\n mse_loss=False,\n num_updates_per_train_step=1,\n network=networks.FullRainbowNetwork,\n num_atoms=51,\n vmax=10.,\n vmin=None,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n replay_scheme='prioritized',\n summary_writer=None,\n seed=None,\n preprocess_fn=None):\n def _build_networks_and_optimizer(self):\n def _build_replay_buffer(self):\n def _training_step_update(self):\n def _store_transition(self,\n last_observation,\n action,\n reward,\n is_terminal,\n *args,\n priority=None,\n episode_end=False):\n def _train_step(self):\n def begin_episode(self, observation):\n def step(self, reward, observation):\nclass JaxFullRainbowAgent(dqn_agent.JaxDQNAgent):"
},
{
"identifier": "full_rainbow_dceo",
"path": "dopamine/jax/agents/full_rainbow/full_rainbow_dceo.py",
"snippet": "class Option:\nclass JaxFullRainbowAgentDCEO(dqn_agent.JaxDQNAgent):\n def __init__(self, online_params, target_network_params, optimizer_state):\ndef zero_epsilon(unused_decay_period, unused_step, unused_warmup_steps,\n unused_epsilon):\ndef act(network_def, params, state, rng, \n num_actions, eval_mode, support, epsilon):\ndef select_action(network_def, params, state, rng, num_actions, eval_mode,\n epsilon_eval, epsilon_train, epsilon_decay_period,\n training_steps, min_replay_history, epsilon_fn, support,\n option_term, option_prob, dur, cur_opt, num_options, options):\ndef get_logits(model, states, rng):\ndef get_q_values(model, states, rng):\ndef train(network_def, online_params, target_params, optimizer, optimizer_state,\n states, actions, next_states, rewards, terminals, loss_weights,\n support, cumulative_gamma, double_dqn, distributional, mse_loss, rng):\n def q_online(state, key):\n def q_target(state, key):\n def loss_fn(params, target, loss_multipliers):\n def q_online(state, key):\ndef target_output(model, target_network, next_states, rewards, terminals,\n support, cumulative_gamma, double_dqn, distributional, rng):\n def __init__(self,\n num_actions,\n noisy=False,\n dueling=True,\n double_dqn=True,\n distributional=True,\n mse_loss=False,\n num_updates_per_train_step=1,\n network=networks.FullRainbowNetwork,\n rep_network=networks.NatureDQNNetwork,\n num_atoms=51,\n vmax=10.,\n vmin=None,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n replay_scheme='prioritized',\n summary_writer=None,\n seed=None,\n preprocess_fn=None,\n num_options=0,\n option_prob=0.0,\n option_duration=10,\n rep_dim=10,\n log_transform=True):\n def get_rep(params, state):\n def neg_loss_fn(phi_u, phi_v):\n def train_rep(rep_params, optimizer, optimizer_state, states):\n def loss_fn(params):\n def rep_online(state):\n def _build_networks_and_optimizer(self):\n def _build_replay_buffer(self):\n def _training_step_update(self):\n def get_loss_weights(self,):\n def _sync_option_weights(self, option):\n def _rep_sample_from_replay_buffer(self,):\n def _store_transition(self,\n last_observation,\n action,\n reward,\n is_terminal,\n *args,\n priority=None,\n episode_end=False):\n def _train_step(self):\n def begin_episode(self, observation):\n def step(self, reward, observation):"
},
{
"identifier": "implicit_quantile_agent",
"path": "dopamine/jax/agents/implicit_quantile/implicit_quantile_agent.py",
"snippet": "def target_quantile_values(network_def, online_params, target_params,\n next_states, rewards, terminals,\n num_tau_prime_samples, num_quantile_samples,\n cumulative_gamma, double_dqn, rng):\ndef train(network_def, online_params, target_params, optimizer, optimizer_state,\n states, actions, next_states, rewards, terminals, num_tau_samples,\n num_tau_prime_samples, num_quantile_samples, cumulative_gamma,\n double_dqn, kappa, rng):\n def loss_fn(params, rng_input, target_quantile_vals):\n def online(state, key):\ndef select_action(network_def, params, state, rng, num_quantile_samples,\n num_actions, eval_mode, epsilon_eval, epsilon_train,\n epsilon_decay_period, training_steps, min_replay_history,\n epsilon_fn):\n def __init__(self,\n num_actions,\n observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=dqn_agent.NATURE_DQN_DTYPE,\n stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,\n network=networks.ImplicitQuantileNetwork,\n kappa=1.0,\n num_tau_samples=32,\n num_tau_prime_samples=32,\n num_quantile_samples=32,\n quantile_embedding_dim=64,\n double_dqn=False,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n replay_scheme='prioritized',\n optimizer='adam',\n summary_writer=None,\n summary_writing_frequency=500,\n seed=None):\n def _build_networks_and_optimizer(self):\n def begin_episode(self, observation):\n def step(self, reward, observation):\n def _train_step(self):\nclass JaxImplicitQuantileAgent(dqn_agent.JaxDQNAgent):"
},
{
"identifier": "quantile_agent",
"path": "dopamine/jax/agents/quantile/quantile_agent.py",
"snippet": "def target_distribution(target_network, next_states, rewards, terminals,\n cumulative_gamma):\ndef train(network_def, online_params, target_params, optimizer, optimizer_state,\n states, actions, next_states, rewards, terminals, kappa, num_atoms,\n cumulative_gamma):\n def loss_fn(params, target):\n def q_online(state):\n def q_target(state):\n def __init__(self,\n num_actions,\n observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=dqn_agent.NATURE_DQN_DTYPE,\n stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,\n network=networks.QuantileNetwork,\n kappa=1.0,\n num_atoms=200,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=50000,\n update_period=4,\n target_update_period=10000,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n epsilon_train=0.1,\n epsilon_eval=0.05,\n epsilon_decay_period=1000000,\n replay_scheme='prioritized',\n optimizer='adam',\n summary_writer=None,\n summary_writing_frequency=500,\n seed=None,\n allow_partial_reload=False):\n def _build_networks_and_optimizer(self):\n def _build_replay_buffer(self):\n def _train_step(self):\nclass JaxQuantileAgent(dqn_agent.JaxDQNAgent):"
},
{
"identifier": "rainbow_agent",
"path": "dopamine/jax/agents/rainbow/rainbow_agent.py",
"snippet": "def train(network_def, online_params, target_params, optimizer, optimizer_state,\n states, actions, next_states, rewards, terminals, loss_weights,\n support, cumulative_gamma):\n def loss_fn(params, target, loss_multipliers):\n def q_online(state):\n def q_target(state):\ndef target_distribution(target_network, next_states, rewards, terminals,\n support, cumulative_gamma):\ndef select_action(network_def, params, state, rng, num_actions, eval_mode,\n epsilon_eval, epsilon_train, epsilon_decay_period,\n training_steps, min_replay_history, epsilon_fn, support):\n def __init__(self,\n num_actions,\n observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=dqn_agent.NATURE_DQN_DTYPE,\n stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,\n network=networks.RainbowNetwork,\n num_atoms=51,\n vmin=None,\n vmax=10.,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n replay_scheme='prioritized',\n optimizer='adam',\n seed=None,\n summary_writer=None,\n summary_writing_frequency=500,\n allow_partial_reload=False):\n def _build_networks_and_optimizer(self):\n def _build_replay_buffer(self):\n def begin_episode(self, observation):\n def step(self, reward, observation):\n def _train_step(self):\ndef project_distribution(supports, weights, target_support):\nclass JaxRainbowAgent(dqn_agent.JaxDQNAgent):"
},
{
"identifier": "collector_dispatcher",
"path": "dopamine/metrics/collector_dispatcher.py",
"snippet": "AVAILABLE_COLLECTORS = {\n 'console': console_collector.ConsoleCollector,\n 'pickle': pickle_collector.PickleCollector,\n 'tensorboard': tensorboard_collector.TensorboardCollector,\n}\ndef add_collector(name: str, constructor: CollectorConstructorType) -> None:\n def __init__(\n self,\n base_dir: Optional[str],\n # TODO(psc): Consider using sets instead.\n collectors: Sequence[str] = ('console', 'pickle', 'tensorboard')):\n def write(\n self,\n statistics: Sequence[statistics_instance.StatisticsInstance],\n collector_allowlist: Sequence[str] = ()) -> None:\n def flush(self) -> None:\n def close(self) -> None:\nclass CollectorDispatcher(object):"
},
{
"identifier": "statistics_instance",
"path": "dopamine/metrics/statistics_instance.py",
"snippet": "class StatisticsInstance:"
}
] | import os
import sys
import time
import gin.tf
import numpy as np
import tensorflow as tf
from absl import logging
from dopamine.agents.dqn import dqn_agent
from dopamine.agents.implicit_quantile import implicit_quantile_agent
from dopamine.agents.rainbow import rainbow_agent
from dopamine.discrete_domains import atari_lib
from dopamine.discrete_domains import checkpointer
from dopamine.discrete_domains import iteration_statistics
from dopamine.discrete_domains import logger
from dopamine.jax.agents.dqn import dqn_agent as jax_dqn_agent
from dopamine.jax.agents.full_rainbow import full_rainbow_agent
from dopamine.jax.agents.full_rainbow import full_rainbow_dceo
from dopamine.jax.agents.implicit_quantile import implicit_quantile_agent as jax_implicit_quantile_agent
from dopamine.jax.agents.quantile import quantile_agent as jax_quantile_agent
from dopamine.jax.agents.rainbow import rainbow_agent as jax_rainbow_agent
from dopamine.metrics import collector_dispatcher
from dopamine.metrics import statistics_instance | 7,802 | self._end_episode(reward, is_terminal)
action = self._agent.begin_episode(observation)
else:
action = self._agent.step(reward, observation)
self._end_episode(reward, is_terminal)
return step_number, total_reward
def _run_one_phase(self, min_steps, statistics, run_mode_str):
"""Runs the agent/environment loop until a desired number of steps.
We follow the Machado et al., 2017 convention of running full episodes,
and terminating once we've run a minimum number of steps.
Args:
min_steps: int, minimum number of steps to generate in this phase.
statistics: `IterationStatistics` object which records the experimental
results.
run_mode_str: str, describes the run mode for this agent.
Returns:
Tuple containing the number of steps taken in this phase (int), the sum of
returns (float), and the number of episodes performed (int).
"""
step_count = 0
num_episodes = 0
sum_returns = 0.
while step_count < min_steps:
episode_length, episode_return = self._run_one_episode()
statistics.append({
'{}_episode_lengths'.format(run_mode_str): episode_length,
'{}_episode_returns'.format(run_mode_str): episode_return
})
step_count += episode_length
sum_returns += episode_return
num_episodes += 1
if self._fine_grained_print_to_console:
# We use sys.stdout.write instead of logging so as to flush frequently
# without generating a line break.
sys.stdout.write('Steps executed: {} '.format(step_count) +
'Episode length: {} '.format(episode_length) +
'Return: {}\r'.format(episode_return))
sys.stdout.flush()
return step_count, sum_returns, num_episodes
def _run_train_phase(self, statistics):
"""Run training phase.
Args:
statistics: `IterationStatistics` object which records the experimental
results. Note - This object is modified by this method.
Returns:
num_episodes: int, The number of episodes run in this phase.
average_reward: float, The average reward generated in this phase.
average_steps_per_second: float, The average number of steps per second.
"""
# Perform the training phase, during which the agent learns.
self._agent.eval_mode = False
start_time = time.time()
number_steps, sum_returns, num_episodes = self._run_one_phase(
self._training_steps, statistics, 'train')
average_return = sum_returns / num_episodes if num_episodes > 0 else 0.0
statistics.append({'train_average_return': average_return})
time_delta = time.time() - start_time
average_steps_per_second = number_steps / time_delta
statistics.append(
{'train_average_steps_per_second': average_steps_per_second})
logging.info('Average undiscounted return per training episode: %.2f',
average_return)
logging.info('Average training steps per second: %.2f',
average_steps_per_second)
return num_episodes, average_return, average_steps_per_second
def _run_eval_phase(self, statistics):
"""Run evaluation phase.
Args:
statistics: `IterationStatistics` object which records the experimental
results. Note - This object is modified by this method.
Returns:
num_episodes: int, The number of episodes run in this phase.
average_reward: float, The average reward generated in this phase.
"""
# Perform the evaluation phase -- no learning.
self._agent.eval_mode = True
_, sum_returns, num_episodes = self._run_one_phase(
self._evaluation_steps, statistics, 'eval')
average_return = sum_returns / num_episodes if num_episodes > 0 else 0.0
logging.info('Average undiscounted return per evaluation episode: %.2f',
average_return)
statistics.append({'eval_average_return': average_return})
return num_episodes, average_return
def _run_one_iteration(self, iteration):
"""Runs one iteration of agent/environment interaction.
An iteration involves running several episodes until a certain number of
steps are obtained. The interleaving of train/eval phases implemented here
are to match the implementation of (Mnih et al., 2015).
Args:
iteration: int, current iteration number, used as a global_step for saving
Tensorboard summaries.
Returns:
A dict containing summary statistics for this iteration.
"""
statistics = iteration_statistics.IterationStatistics()
logging.info('Starting iteration %d', iteration)
num_episodes_train, average_reward_train, average_steps_per_second = (
self._run_train_phase(statistics))
num_episodes_eval, average_reward_eval = self._run_eval_phase(
statistics)
if self._has_collector_dispatcher:
self._collector_dispatcher.write([
| # coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining classes and helper methods for general agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def load_gin_configs(gin_files, gin_bindings):
"""Loads gin configuration files.
Args:
gin_files: list, of paths to the gin configuration files for this
experiment.
gin_bindings: list, of gin parameter bindings to override the values in
the config files.
"""
gin.parse_config_files_and_bindings(gin_files,
bindings=gin_bindings,
skip_unknown=False)
@gin.configurable
def create_agent(sess, environment, agent_name=None, summary_writer=None,
debug_mode=False):
"""Creates an agent.
Args:
sess: A `tf.compat.v1.Session` object for running associated ops.
environment: A gym environment (e.g. Atari 2600).
agent_name: str, name of the agent to create.
summary_writer: A Tensorflow summary writer to pass to the agent
for in-agent training statistics in Tensorboard.
debug_mode: bool, whether to output Tensorboard summaries. If set to true,
the agent will output in-episode statistics to Tensorboard. Disabled by
default as this results in slower training.
Returns:
agent: An RL agent.
Raises:
ValueError: If `agent_name` is not in supported list.
"""
assert agent_name is not None
if not debug_mode:
summary_writer = None
if agent_name.startswith('dqn'):
return dqn_agent.DQNAgent(sess, num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'rainbow':
return rainbow_agent.RainbowAgent(
sess, num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'implicit_quantile':
return implicit_quantile_agent.ImplicitQuantileAgent(
sess, num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'jax_dqn':
return jax_dqn_agent.JaxDQNAgent(num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'jax_quantile':
return jax_quantile_agent.JaxQuantileAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'jax_rainbow':
return jax_rainbow_agent.JaxRainbowAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'full_rainbow':
return full_rainbow_agent.JaxFullRainbowAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'full_rainbow_dceo':
return full_rainbow_dceo.JaxFullRainbowAgentDCEO(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'jax_implicit_quantile':
return jax_implicit_quantile_agent.JaxImplicitQuantileAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
else:
raise ValueError('Unknown agent: {}'.format(agent_name))
@gin.configurable
def create_runner(base_dir, schedule='continuous_train_and_eval'):
"""Creates an experiment Runner.
Args:
base_dir: str, base directory for hosting all subdirectories.
schedule: string, which type of Runner to use.
Returns:
runner: A `Runner` like object.
Raises:
ValueError: When an unknown schedule is encountered.
"""
assert base_dir is not None
# Continuously runs training and evaluation until max num_iterations is hit.
if schedule == 'continuous_train_and_eval':
return Runner(base_dir, create_agent)
# Continuously runs training until max num_iterations is hit.
elif schedule == 'continuous_train':
return TrainRunner(base_dir, create_agent)
else:
raise ValueError('Unknown schedule: {}'.format(schedule))
@gin.configurable
class Runner(object):
"""Object that handles running Dopamine experiments.
Here we use the term 'experiment' to mean simulating interactions between the
agent and the environment and reporting some statistics pertaining to these
interactions.
A simple scenario to train a DQN agent is as follows:
```python
import dopamine.discrete_domains.atari_lib
base_dir = '/tmp/simple_example'
def create_agent(sess, environment):
return dqn_agent.DQNAgent(sess, num_actions=environment.action_space.n)
runner = Runner(base_dir, create_agent, atari_lib.create_atari_environment)
runner.run()
```
"""
def __init__(self,
base_dir,
create_agent_fn,
create_environment_fn=atari_lib.create_atari_environment,
checkpoint_file_prefix='ckpt',
logging_file_prefix='log',
log_every_n=1,
num_iterations=200,
training_steps=250000,
evaluation_steps=125000,
max_steps_per_episode=27000,
clip_rewards=True,
use_legacy_logger=True,
fine_grained_print_to_console=True):
"""Initialize the Runner object in charge of running a full experiment.
Args:
base_dir: str, the base directory to host all required sub-directories.
create_agent_fn: A function that takes as args a Tensorflow session and an
environment, and returns an agent.
create_environment_fn: A function which receives a problem name and
creates a Gym environment for that problem (e.g. an Atari 2600 game).
checkpoint_file_prefix: str, the prefix to use for checkpoint files.
logging_file_prefix: str, prefix to use for the log files.
log_every_n: int, the frequency for writing logs.
num_iterations: int, the iteration number threshold (must be greater than
start_iteration).
training_steps: int, the number of training steps to perform.
evaluation_steps: int, the number of evaluation steps to perform.
max_steps_per_episode: int, maximum number of steps after which an episode
terminates.
clip_rewards: bool, whether to clip rewards in [-1, 1].
use_legacy_logger: bool, whether to use the legacy Logger. This will be
deprecated soon, replaced with the new CollectorDispatcher setup.
fine_grained_print_to_console: bool, whether to print fine-grained
progress to console (useful for debugging).
This constructor will take the following actions:
- Initialize an environment.
- Initialize a `tf.compat.v1.Session`.
- Initialize a logger.
- Initialize an agent.
- Reload from the latest checkpoint, if available, and initialize the
Checkpointer object.
"""
assert base_dir is not None
self._legacy_logger_enabled = use_legacy_logger
self._fine_grained_print_to_console_enabled = fine_grained_print_to_console
self._logging_file_prefix = logging_file_prefix
self._log_every_n = log_every_n
self._num_iterations = num_iterations
self._training_steps = training_steps
self._evaluation_steps = evaluation_steps
self._max_steps_per_episode = max_steps_per_episode
self._base_dir = base_dir
self._clip_rewards = clip_rewards
self._create_directories()
self._environment = create_environment_fn()
# The agent is now in charge of setting up the session.
self._sess = None
# We're using a bit of a hack in that we pass in _base_dir instead of an
# actually SummaryWriter. This is because the agent is now in charge of the
# session, but needs to create the SummaryWriter before creating the ops,
# and in order to do so, it requires the base directory.
self._agent = create_agent_fn(self._sess, self._environment,
summary_writer=self._base_dir)
if hasattr(self._agent, '_sess'):
self._sess = self._agent._sess
self._summary_writer = self._agent.summary_writer
self._initialize_checkpointer_and_maybe_resume(checkpoint_file_prefix)
# Create a collector dispatcher for metrics reporting.
self._collector_dispatcher = collector_dispatcher.CollectorDispatcher(
self._base_dir)
set_collector_dispatcher_fn = getattr(
self._agent, 'set_collector_dispatcher', None)
if callable(set_collector_dispatcher_fn):
set_collector_dispatcher_fn(self._collector_dispatcher)
@property
def _use_legacy_logger(self):
if not hasattr(self, '_legacy_logger_enabled'):
return True
return self._legacy_logger_enabled
@property
def _has_collector_dispatcher(self):
if not hasattr(self, '_collector_dispatcher'):
return False
return True
@property
def _fine_grained_print_to_console(self):
if not hasattr(self, '_fine_grained_print_to_console_enabled'):
return True
return self._fine_grained_print_to_console_enabled
def _create_directories(self):
"""Create necessary sub-directories."""
self._checkpoint_dir = os.path.join(self._base_dir, 'checkpoints')
if self._use_legacy_logger:
logging.warning(
'DEPRECATION WARNING: Logger is being deprecated. '
'Please switch to CollectorDispatcher!')
self._logger = logger.Logger(os.path.join(self._base_dir, 'logs'))
def _initialize_checkpointer_and_maybe_resume(self, checkpoint_file_prefix):
"""Reloads the latest checkpoint if it exists.
This method will first create a `Checkpointer` object and then call
`checkpointer.get_latest_checkpoint_number` to determine if there is a valid
checkpoint in self._checkpoint_dir, and what the largest file number is.
If a valid checkpoint file is found, it will load the bundled data from this
file and will pass it to the agent for it to reload its data.
If the agent is able to successfully unbundle, this method will verify that
the unbundled data contains the keys,'logs' and 'current_iteration'. It will
then load the `Logger`'s data from the bundle, and will return the iteration
number keyed by 'current_iteration' as one of the return values (along with
the `Checkpointer` object).
Args:
checkpoint_file_prefix: str, the checkpoint file prefix.
Returns:
start_iteration: int, the iteration number to start the experiment from.
experiment_checkpointer: `Checkpointer` object for the experiment.
"""
self._checkpointer = checkpointer.Checkpointer(self._checkpoint_dir,
checkpoint_file_prefix)
self._start_iteration = 0
# Check if checkpoint exists. Note that the existence of checkpoint 0 means
# that we have finished iteration 0 (so we will start from iteration 1).
latest_checkpoint_version = checkpointer.get_latest_checkpoint_number(
self._checkpoint_dir)
if latest_checkpoint_version >= 0:
experiment_data = self._checkpointer.load_checkpoint(
latest_checkpoint_version)
if self._agent.unbundle(
self._checkpoint_dir, latest_checkpoint_version, experiment_data):
if experiment_data is not None:
assert 'logs' in experiment_data
assert 'current_iteration' in experiment_data
if self._use_legacy_logger:
self._logger.data = experiment_data['logs']
self._start_iteration = experiment_data['current_iteration'] + 1
logging.info('Reloaded checkpoint and will start from iteration %d',
self._start_iteration)
def _initialize_episode(self):
"""Initialization for a new episode.
Returns:
action: int, the initial action chosen by the agent.
"""
initial_observation = self._environment.reset()
return self._agent.begin_episode(initial_observation)
def _run_one_step(self, action):
"""Executes a single step in the environment.
Args:
action: int, the action to perform in the environment.
Returns:
The observation, reward, and is_terminal values returned from the
environment.
"""
observation, reward, is_terminal, _ = self._environment.step(action)
return observation, reward, is_terminal
def _end_episode(self, reward, terminal=True):
"""Finalizes an episode run.
Args:
reward: float, the last reward from the environment.
terminal: bool, whether the last state-action led to a terminal state.
"""
if isinstance(self._agent, jax_dqn_agent.JaxDQNAgent):
self._agent.end_episode(reward, terminal)
else:
# TODO(joshgreaves): Add terminal signal to TF dopamine agents
self._agent.end_episode(reward)
def _run_one_episode(self):
"""Executes a full trajectory of the agent interacting with the environment.
Returns:
The number of steps taken and the total reward.
"""
step_number = 0
total_reward = 0.
action = self._initialize_episode()
is_terminal = False
# Keep interacting until we reach a terminal state.
while True:
observation, reward, is_terminal = self._run_one_step(action)
total_reward += reward
step_number += 1
if self._clip_rewards:
# Perform reward clipping.
reward = np.clip(reward, -1, 1)
if (self._environment.game_over or
step_number == self._max_steps_per_episode):
# Stop the run loop once we reach the true end of episode.
break
elif is_terminal:
# If we lose a life but the episode is not over, signal an artificial
# end of episode to the agent.
self._end_episode(reward, is_terminal)
action = self._agent.begin_episode(observation)
else:
action = self._agent.step(reward, observation)
self._end_episode(reward, is_terminal)
return step_number, total_reward
def _run_one_phase(self, min_steps, statistics, run_mode_str):
"""Runs the agent/environment loop until a desired number of steps.
We follow the Machado et al., 2017 convention of running full episodes,
and terminating once we've run a minimum number of steps.
Args:
min_steps: int, minimum number of steps to generate in this phase.
statistics: `IterationStatistics` object which records the experimental
results.
run_mode_str: str, describes the run mode for this agent.
Returns:
Tuple containing the number of steps taken in this phase (int), the sum of
returns (float), and the number of episodes performed (int).
"""
step_count = 0
num_episodes = 0
sum_returns = 0.
while step_count < min_steps:
episode_length, episode_return = self._run_one_episode()
statistics.append({
'{}_episode_lengths'.format(run_mode_str): episode_length,
'{}_episode_returns'.format(run_mode_str): episode_return
})
step_count += episode_length
sum_returns += episode_return
num_episodes += 1
if self._fine_grained_print_to_console:
# We use sys.stdout.write instead of logging so as to flush frequently
# without generating a line break.
sys.stdout.write('Steps executed: {} '.format(step_count) +
'Episode length: {} '.format(episode_length) +
'Return: {}\r'.format(episode_return))
sys.stdout.flush()
return step_count, sum_returns, num_episodes
def _run_train_phase(self, statistics):
"""Run training phase.
Args:
statistics: `IterationStatistics` object which records the experimental
results. Note - This object is modified by this method.
Returns:
num_episodes: int, The number of episodes run in this phase.
average_reward: float, The average reward generated in this phase.
average_steps_per_second: float, The average number of steps per second.
"""
# Perform the training phase, during which the agent learns.
self._agent.eval_mode = False
start_time = time.time()
number_steps, sum_returns, num_episodes = self._run_one_phase(
self._training_steps, statistics, 'train')
average_return = sum_returns / num_episodes if num_episodes > 0 else 0.0
statistics.append({'train_average_return': average_return})
time_delta = time.time() - start_time
average_steps_per_second = number_steps / time_delta
statistics.append(
{'train_average_steps_per_second': average_steps_per_second})
logging.info('Average undiscounted return per training episode: %.2f',
average_return)
logging.info('Average training steps per second: %.2f',
average_steps_per_second)
return num_episodes, average_return, average_steps_per_second
def _run_eval_phase(self, statistics):
"""Run evaluation phase.
Args:
statistics: `IterationStatistics` object which records the experimental
results. Note - This object is modified by this method.
Returns:
num_episodes: int, The number of episodes run in this phase.
average_reward: float, The average reward generated in this phase.
"""
# Perform the evaluation phase -- no learning.
self._agent.eval_mode = True
_, sum_returns, num_episodes = self._run_one_phase(
self._evaluation_steps, statistics, 'eval')
average_return = sum_returns / num_episodes if num_episodes > 0 else 0.0
logging.info('Average undiscounted return per evaluation episode: %.2f',
average_return)
statistics.append({'eval_average_return': average_return})
return num_episodes, average_return
def _run_one_iteration(self, iteration):
"""Runs one iteration of agent/environment interaction.
An iteration involves running several episodes until a certain number of
steps are obtained. The interleaving of train/eval phases implemented here
are to match the implementation of (Mnih et al., 2015).
Args:
iteration: int, current iteration number, used as a global_step for saving
Tensorboard summaries.
Returns:
A dict containing summary statistics for this iteration.
"""
statistics = iteration_statistics.IterationStatistics()
logging.info('Starting iteration %d', iteration)
num_episodes_train, average_reward_train, average_steps_per_second = (
self._run_train_phase(statistics))
num_episodes_eval, average_reward_eval = self._run_eval_phase(
statistics)
if self._has_collector_dispatcher:
self._collector_dispatcher.write([ | statistics_instance.StatisticsInstance('Train/NumEpisodes', | 14 | 2023-10-15 22:14:16+00:00 | 12k |
BurgerBurgerBurger/AA | run.py | [
{
"identifier": "add_args",
"path": "args.py",
"snippet": "def add_args(parser):\n parser.add_argument(\"--do_train\", action=\"store_true\")\n parser.add_argument(\"--data_dir\", default=\"./dataset/docred\", type=str)\n parser.add_argument(\"--transformer_type\", default=\"bert\", type=str)\n parser.add_argument(\"--model_name_or_path\", default=\"bert-base-cased\", type=str)\n\n parser.add_argument(\"--train_file\", default=\"train_annotated.json\", type=str)\n parser.add_argument(\"--dev_file\", default=\"dev.json\", type=str)\n parser.add_argument(\"--test_file\", default=\"dev.json\", type=str)\n parser.add_argument(\"--pred_file\", default=\"results.json\", type=str)\n parser.add_argument(\"--save_path\", default=\"\", type=str)\n parser.add_argument(\"--load_path\", default=\"\", type=str)\n parser.add_argument(\"--results_path\", default=\"\", type=str)\n parser.add_argument(\"--teacher_sig_path\", default=\"\", type=str)\n parser.add_argument(\"--save_attn\", action=\"store_true\", help=\"Whether store the evidence distribution or not\")\n\n # graph\n parser.add_argument(\"--attn_heads\", default=2, type=int, help=\"Attention heads\")\n parser.add_argument(\"--gcn_layers\", default=2, type=int, help=\"GCN layers\")\n parser.add_argument(\"--iters\", default=2, type=int, help=\"Iteration\")\n parser.add_argument(\"--use_graph\", action=\"store_true\", help=\"Use graph\")\n\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Pretrained config name or path if not the same as model_name\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\")\n parser.add_argument(\"--max_seq_length\", default=1024, type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\")\n\n parser.add_argument(\"--train_batch_size\", default=4, type=int,\n help=\"Batch size for training.\")\n parser.add_argument(\"--test_batch_size\", default=8, type=int,\n help=\"Batch size for testing.\")\n parser.add_argument(\"--eval_mode\", default=\"single\", type=str,\n choices=[\"single\", \"fushion\"], \n help=\"Single-pass evaluation or evaluation with inference-stage fusion.\")\n parser.add_argument(\"--gradient_accumulation_steps\", default=1, type=int,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--num_labels\", default=4, type=int,\n help=\"Max number of labels in prediction.\")\n parser.add_argument(\"--max_sent_num\", default=25, type=int,\n help=\"Max number of sentences in each document.\")\n parser.add_argument(\"--evi_thresh\", default=0.2, type=float,\n help=\"Evidence Threshold. \")\n parser.add_argument(\"--evi_lambda\", default=0.1, type=float,\n help=\"Weight of relation-agnostic evidence loss during training. \")\n parser.add_argument(\"--attn_lambda\", default=1.0, type=float,\n help=\"Weight of knowledge distillation loss for attentions during training. \")\n parser.add_argument(\"--lr_transformer\", default=5e-5, type=float,\n help=\"The initial learning rate for transformer.\")\n parser.add_argument(\"--lr_added\", default=1e-4, type=float,\n help=\"The initial learning rate for added modules.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-6, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--warmup_ratio\", default=0.06, type=float,\n help=\"Warm up ratio for Adam.\")\n parser.add_argument(\"--num_train_epochs\", default=30.0, type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--evaluation_steps\", default=-1, type=int,\n help=\"Number of training steps between evaluations.\")\n parser.add_argument(\"--seed\", type=int, default=66,\n help=\"random seed for initialization\")\n parser.add_argument(\"--num_class\", type=int, default=97,\n help=\"Number of relation types in dataset.\")\n\n return parser"
},
{
"identifier": "DocREModel",
"path": "model.py",
"snippet": "class DocREModel(nn.Module):\n\n def __init__(self, args, config, model, tokenizer,\n emb_size=768, block_size=64, num_labels=-1,\n max_sent_num=25, evi_thresh=0.2):\n super().__init__()\n self.config = config\n self.model = model\n self.tokenizer = tokenizer\n self.hidden_size = config.hidden_size\n\n self.loss_fnt = ATLoss()\n self.loss_fnt_evi = nn.KLDivLoss(reduction=\"batchmean\")\n\n self.head_extractor = nn.Linear(self.hidden_size * 2, emb_size)\n self.tail_extractor = nn.Linear(self.hidden_size * 2, emb_size)\n\n self.use_graph = args.use_graph\n if self.use_graph:\n self.head_extractor = nn.Linear(3 * config.hidden_size, emb_size)\n self.tail_extractor = nn.Linear(3 * config.hidden_size, emb_size)\n self.bilinear = nn.Linear(emb_size * block_size, config.num_labels)\n\n self.emb_size = emb_size\n self.block_size = block_size\n self.num_labels = num_labels\n self.total_labels = config.num_labels\n self.max_sent_num = max_sent_num\n self.evi_thresh = evi_thresh\n\n self.edges = ['self-loop', 'mention-anaphor', 'co-reference', 'inter-entity']\n\n if self.use_graph:\n self.graph_layers = nn.ModuleList(\n AttentionGCNLayer(self.edges, self.hidden_size, nhead=args.attn_heads, iters=args.gcn_layers) for _ in\n range(args.iters))\n\n def encode(self, input_ids, attention_mask):\n config = self.config\n if config.transformer_type == \"bert\":\n start_tokens = [config.cls_token_id]\n end_tokens = [config.sep_token_id]\n elif config.transformer_type == \"roberta\":\n start_tokens = [config.cls_token_id]\n end_tokens = [config.sep_token_id, config.sep_token_id]\n # process long documents.\n sequence_output, attention = process_long_input(self.model, input_ids, attention_mask, start_tokens, end_tokens)\n\n return sequence_output, attention\n\n def get_hrt(self, sequence_output, attention, entity_pos, hts, offset):\n n, h, _, c = attention.size()\n hss, tss, rss = [], [], []\n ht_atts = []\n\n for i in range(len(entity_pos)): # for each batch\n entity_embs, entity_atts = [], []\n\n # obtain entity embedding from mention embeddings.\n for eid, e in enumerate(entity_pos[i]): # for each entity\n if len(e) > 1:\n e_emb, e_att = [], []\n for mid, (start, end) in enumerate(e): # for every mention\n if start + offset < c:\n # In case the entity mention is truncated due to limited max seq length.\n e_emb.append(sequence_output[i, start + offset])\n e_att.append(attention[i, :, start + offset])\n\n if len(e_emb) > 0:\n e_emb = torch.logsumexp(torch.stack(e_emb, dim=0), dim=0)\n e_att = torch.stack(e_att, dim=0).mean(0)\n else:\n e_emb = torch.zeros(self.config.hidden_size).to(sequence_output)\n e_att = torch.zeros(h, c).to(attention)\n else:\n start, end = e[0]\n if start + offset < c:\n e_emb = sequence_output[i, start + offset]\n e_att = attention[i, :, start + offset]\n else:\n e_emb = torch.zeros(self.config.hidden_size).to(sequence_output)\n e_att = torch.zeros(h, c).to(attention)\n\n entity_embs.append(e_emb)\n entity_atts.append(e_att)\n\n entity_embs = torch.stack(entity_embs, dim=0) # [n_e, d]\n entity_atts = torch.stack(entity_atts, dim=0) # [n_e, h, seq_len]\n\n ht_i = torch.LongTensor(hts[i]).to(sequence_output.device)\n\n # obtain subject/object (head/tail) embeddings from entity embeddings.\n hs = torch.index_select(entity_embs, 0, ht_i[:, 0])\n ts = torch.index_select(entity_embs, 0, ht_i[:, 1])\n\n h_att = torch.index_select(entity_atts, 0, ht_i[:, 0])\n t_att = torch.index_select(entity_atts, 0, ht_i[:, 1])\n\n ht_att = (h_att * t_att).mean(1) # average over all heads\n ht_att = ht_att / (ht_att.sum(1, keepdim=True) + 1e-30)\n ht_atts.append(ht_att)\n\n # obtain local context embeddings.\n rs = contract(\"ld,rl->rd\", sequence_output[i], ht_att)\n\n hss.append(hs)\n tss.append(ts)\n rss.append(rs)\n\n rels_per_batch = [len(b) for b in hss]\n hss = torch.cat(hss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n tss = torch.cat(tss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n rss = torch.cat(rss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n ht_atts = torch.cat(ht_atts, dim=0) # (num_ent_pairs_all_batches, max_doc_len)\n\n return hss, rss, tss, ht_atts, rels_per_batch\n\n def graph(self, sequence_output, graphs, attention, entity_pos, hts, offset):\n n, h, _, c = attention.size()\n\n max_node = max([graph.shape[0] for graph in graphs])\n graph_fea = torch.zeros(n, max_node, self.config.hidden_size, device=sequence_output.device)\n graph_adj = torch.zeros(n, max_node, max_node, device=sequence_output.device)\n\n for i, graph in enumerate(graphs):\n nodes_num = graph.shape[0]\n graph_adj[i, :nodes_num, :nodes_num] = torch.from_numpy(graph)\n\n for i in range(len(entity_pos)):\n mention_index = 0\n for e in entity_pos[i]:\n for start, end in e:\n if start + offset < c:\n # In case the entity mention is truncated due to limited max seq length.\n graph_fea[i, mention_index, :] = sequence_output[i, start + offset]\n else:\n graph_fea[i, mention_index, :] = torch.zeros(self.config.hidden_size).to(sequence_output)\n mention_index += 1\n\n for graph_layer in self.graph_layers:\n graph_fea, _ = graph_layer(graph_fea, graph_adj)\n\n h_entity, t_entity = [], []\n for i in range(len(entity_pos)):\n entity_embs = []\n mention_index = 0\n for e in entity_pos[i]:\n e_emb = graph_fea[i, mention_index:mention_index + len(e), :]\n mention_index += len(e)\n\n e_emb = torch.logsumexp(e_emb, dim=0) if len(e) > 1 else e_emb.squeeze(0)\n entity_embs.append(e_emb)\n\n entity_embs = torch.stack(entity_embs, dim=0)\n ht_i = torch.LongTensor(hts[i]).to(sequence_output.device)\n hs = torch.index_select(entity_embs, 0, ht_i[:, 0])\n ts = torch.index_select(entity_embs, 0, ht_i[:, 1])\n h_entity.append(hs)\n t_entity.append(ts)\n\n h_entity = torch.cat(h_entity, dim=0)\n t_entity = torch.cat(t_entity, dim=0)\n return h_entity, t_entity\n\n def forward_rel(self, hs, ts, rs, h, t):\n hs = torch.tanh(self.head_extractor(torch.cat([hs, rs, h], dim=-1)))\n ts = torch.tanh(self.tail_extractor(torch.cat([ts, rs, t], dim=-1)))\n # split into several groups.\n b1 = hs.view(-1, self.emb_size // self.block_size, self.block_size)\n b2 = ts.view(-1, self.emb_size // self.block_size, self.block_size)\n\n bl = (b1.unsqueeze(3) * b2.unsqueeze(2)).view(-1, self.emb_size * self.block_size)\n logits = self.bilinear(bl)\n\n return logits\n\n def forward_rel_no_graph(self, hs, ts, rs):\n hs = torch.tanh(self.head_extractor(torch.cat([hs, rs], dim=-1)))\n ts = torch.tanh(self.tail_extractor(torch.cat([ts, rs], dim=-1)))\n # split into several groups.\n b1 = hs.view(-1, self.emb_size // self.block_size, self.block_size)\n b2 = ts.view(-1, self.emb_size // self.block_size, self.block_size)\n\n bl = (b1.unsqueeze(3) * b2.unsqueeze(2)).view(-1, self.emb_size * self.block_size)\n logits = self.bilinear(bl)\n\n return logits\n\n def forward_evi(self, doc_attn, sent_pos, batch_rel, offset):\n max_sent_num = max([len(sent) for sent in sent_pos])\n rel_sent_attn = []\n for i in range(len(sent_pos)): # for each batch\n # the relation ids corresponds to document in batch i is [sum(batch_rel[:i]), sum(batch_rel[:i+1]))\n curr_attn = doc_attn[sum(batch_rel[:i]):sum(batch_rel[:i + 1])]\n curr_sent_pos = [torch.arange(s[0], s[1]).to(curr_attn.device) + offset for s in sent_pos[i]] # + offset\n\n curr_attn_per_sent = [curr_attn.index_select(-1, sent) for sent in curr_sent_pos]\n curr_attn_per_sent += [torch.zeros_like(curr_attn_per_sent[0])] * (max_sent_num - len(curr_attn_per_sent))\n sum_attn = torch.stack([attn.sum(dim=-1) for attn in curr_attn_per_sent],\n dim=-1) # sum across those attentions\n rel_sent_attn.append(sum_attn)\n\n s_attn = torch.cat(rel_sent_attn, dim=0)\n return s_attn\n\n def forward(self,\n input_ids=None,\n attention_mask=None,\n labels=None, # relation labels\n entity_pos=None,\n hts=None, # entity pairs\n sent_pos=None,\n sent_labels=None, # evidence labels (0/1)\n teacher_attns=None, # evidence distribution from teacher model\n graph=None,\n tag=\"train\"\n ):\n\n offset = 1 if self.config.transformer_type in [\"bert\", \"roberta\"] else 0\n output = {}\n sequence_output, attention = self.encode(input_ids, attention_mask)\n\n hs, rs, ts, doc_attn, batch_rel = self.get_hrt(sequence_output, attention, entity_pos, hts, offset)\n\n if self.use_graph:\n h, t = self.graph(sequence_output, graph, attention, entity_pos, hts, offset)\n logits = self.forward_rel(hs, ts, rs, h, t)\n else:\n logits = self.forward_rel_no_graph(hs, ts, rs)\n\n output[\"rel_pred\"] = self.loss_fnt.get_label(logits, num_labels=self.num_labels)\n\n if sent_labels is not None: # human-annotated evidence available\n\n s_attn = self.forward_evi(doc_attn, sent_pos, batch_rel, offset)\n output[\"evi_pred\"] = F.pad(s_attn > self.evi_thresh, (0, self.max_sent_num - s_attn.shape[-1]))\n\n if tag in [\"test\", \"dev\"]: # testing\n scores_topk = self.loss_fnt.get_score(logits, self.num_labels)\n output[\"scores\"] = scores_topk[0]\n output[\"topks\"] = scores_topk[1]\n\n if tag == \"infer\": # teacher model inference\n output[\"attns\"] = doc_attn.split(batch_rel)\n\n else: # training\n # relation extraction loss\n loss = self.loss_fnt(logits.float(), labels.float())\n output[\"loss\"] = {\"rel_loss\": loss.to(sequence_output)}\n\n if sent_labels is not None: # supervised training with human evidence\n\n idx_used = torch.nonzero(labels[:, 1:].sum(dim=-1)).view(-1)\n # evidence retrieval loss (kldiv loss)\n s_attn = s_attn[idx_used]\n sent_labels = sent_labels[idx_used]\n norm_s_labels = sent_labels / (sent_labels.sum(dim=-1, keepdim=True) + 1e-30)\n norm_s_labels[norm_s_labels == 0] = 1e-30\n s_attn[s_attn == 0] = 1e-30\n evi_loss = self.loss_fnt_evi(s_attn.log(), norm_s_labels)\n output[\"loss\"][\"evi_loss\"] = evi_loss.to(sequence_output)\n\n elif teacher_attns is not None: # self training with teacher attention\n\n doc_attn[doc_attn == 0] = 1e-30\n teacher_attns[teacher_attns == 0] = 1e-30\n attn_loss = self.loss_fnt_evi(doc_attn.log(), teacher_attns)\n output[\"loss\"][\"attn_loss\"] = attn_loss.to(sequence_output)\n\n return output"
},
{
"identifier": "set_seed",
"path": "utils.py",
"snippet": "def set_seed(args):\n seed = int(args.seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'\n torch.use_deterministic_algorithms(True)"
},
{
"identifier": "collate_fn",
"path": "utils.py",
"snippet": "def collate_fn(batch):\n max_len = max([len(f[\"input_ids\"]) for f in batch])\n max_sent = max([len(f[\"sent_pos\"]) for f in batch])\n input_ids = [f[\"input_ids\"] + [0] * (max_len - len(f[\"input_ids\"])) for f in batch]\n input_mask = [[1.0] * len(f[\"input_ids\"]) + [0.0] * (max_len - len(f[\"input_ids\"])) for f in batch]\n labels = [f[\"labels\"] for f in batch]\n entity_pos = [f[\"entity_pos\"] for f in batch]\n hts = [f[\"hts\"] for f in batch]\n sent_pos = [f[\"sent_pos\"] for f in batch]\n sent_labels = [f[\"sent_labels\"] for f in batch if \"sent_labels\" in f]\n attns = [f[\"attns\"] for f in batch if \"attns\" in f]\n\n input_ids = torch.tensor(input_ids, dtype=torch.long)\n input_mask = torch.tensor(input_mask, dtype=torch.float)\n\n labels = [torch.tensor(label) for label in labels]\n labels = torch.cat(labels, dim=0)\n\n if sent_labels != [] and None not in sent_labels:\n sent_labels_tensor = []\n for sent_label in sent_labels:\n sent_label = np.array(sent_label)\n sent_labels_tensor.append(np.pad(sent_label, ((0, 0), (0, max_sent - sent_label.shape[1]))))\n sent_labels_tensor = torch.from_numpy(np.concatenate(sent_labels_tensor, axis=0))\n else:\n sent_labels_tensor = None\n\n if attns:\n attns = [np.pad(attn, ((0, 0), (0, max_len - attn.shape[1]))) for attn in attns]\n attns = torch.from_numpy(np.concatenate(attns, axis=0))\n else:\n attns = None\n\n graph = [f[\"graph\"] for f in batch]\n\n output = (input_ids, input_mask, labels, entity_pos, hts, sent_pos, sent_labels_tensor, attns, graph)\n\n return output"
},
{
"identifier": "create_directory",
"path": "utils.py",
"snippet": "def create_directory(d):\n if d and not os.path.exists(d):\n os.makedirs(d)\n return d"
},
{
"identifier": "read_docred",
"path": "prepro.py",
"snippet": "def read_docred(file_in,\n tokenizer,\n transformer_type=\"bert\",\n max_seq_length=1024,\n teacher_sig_path=\"\",\n single_results=None):\n\n i_line = 0\n pos_samples = 0\n neg_samples = 0\n features = []\n\n if file_in == \"\":\n return None\n\n with open(file_in, \"r\", encoding='utf-8') as fh:\n data = json.load(fh)\n\n if teacher_sig_path != \"\": # load logits\n basename = os.path.splitext(os.path.basename(file_in))[0]\n attns_file = os.path.join(teacher_sig_path, f\"{basename}.attns\")\n attns = pickle.load(open(attns_file, 'rb'))\n\n if single_results != None:\n # reorder predictions as relations by title\n pred_pos_samples = 0\n pred_neg_samples = 0\n pred_rels = single_results\n title2preds = {}\n for pred_rel in pred_rels:\n if pred_rel[\"title\"] in title2preds:\n title2preds[pred_rel[\"title\"]].append(pred_rel)\n else:\n title2preds[pred_rel[\"title\"]] = [pred_rel]\n\n for doc_id in tqdm(range(len(data)), desc=\"Loading examples\"):\n\n sample = data[doc_id]\n entities = sample['vertexSet']\n entity_start, entity_end = [], []\n # record entities\n for entity in entities:\n for mention in entity:\n sent_id = mention[\"sent_id\"]\n pos = mention[\"pos\"]\n entity_start.append((sent_id, pos[0],))\n entity_end.append((sent_id, pos[1] - 1,))\n\n # add entity markers\n sents, sent_map, sent_pos = add_entity_markers(sample, tokenizer, entity_start, entity_end)\n\n # training triples with positive examples (entity pairs with labels)\n train_triple = {}\n\n if \"labels\" in sample:\n for label in sample['labels']:\n evidence = label['evidence']\n r = int(docred_rel2id[label['r']])\n\n # update training triples\n if (label['h'], label['t']) not in train_triple:\n train_triple[(label['h'], label['t'])] = [\n {'relation': r, 'evidence': evidence}]\n else:\n train_triple[(label['h'], label['t'])].append(\n {'relation': r, 'evidence': evidence})\n\n # get anaphors in the doc\n mentions = set([m['name'] for e in entities for m in e])\n\n potential_mention = get_anaphors(sample['sents'], mentions)\n\n entities.append(potential_mention)\n\n # entity start, end position\n entity_pos = []\n\n for e in entities:\n entity_pos.append([])\n for m in e:\n start = sent_map[m[\"sent_id\"]][m[\"pos\"][0]]\n end = sent_map[m[\"sent_id\"]][m[\"pos\"][1]]\n label = m[\"type\"]\n entity_pos[-1].append((start, end,))\n\n relations, hts, sent_labels = [], [], []\n\n for h, t in train_triple.keys(): # for every entity pair with gold relation\n relation = [0] * len(docred_rel2id)\n sent_evi = [0] * len(sent_pos)\n\n for mention in train_triple[h, t]: # for each relation mention with head h and tail t\n relation[mention[\"relation\"]] = 1\n for i in mention[\"evidence\"]:\n sent_evi[i] += 1\n\n relations.append(relation)\n hts.append([h, t])\n sent_labels.append(sent_evi)\n pos_samples += 1\n\n for h in range(len(entities) - 1):\n for t in range(len(entities) - 1):\n # all entity pairs that do not have relation are treated as negative samples\n if h != t and [h, t] not in hts: # and [t, h] not in hts:\n relation = [1] + [0] * (len(docred_rel2id) - 1)\n sent_evi = [0] * len(sent_pos)\n relations.append(relation)\n\n hts.append([h, t])\n sent_labels.append(sent_evi)\n neg_samples += 1\n\n graph = create_graph(entity_pos)\n\n assert len(relations) == (len(entities) - 1) * (len(entities) - 2)\n assert len(sents) < max_seq_length\n sents = sents[:max_seq_length - 2] # truncate, -2 for [CLS] and [SEP]\n input_ids = tokenizer.convert_tokens_to_ids(sents)\n input_ids = tokenizer.build_inputs_with_special_tokens(input_ids)\n\n feature = [{'input_ids': input_ids,\n 'entity_pos': entity_pos if entity_pos[-1] != [] else entity_pos[:-1],\n 'labels': relations,\n 'hts': hts,\n 'sent_pos': sent_pos,\n 'sent_labels': sent_labels,\n 'title': sample['title'],\n 'graph': graph\n }]\n\n if teacher_sig_path != '': # add evidence distributions from the teacher model\n feature[0]['attns'] = attns[doc_id][:, :len(input_ids)]\n\n if single_results is not None: # get pseudo documents from predictions of the single run\n offset = 1 if transformer_type in [\"bert\", \"roberta\"] else 0\n if sample[\"title\"] in title2preds:\n feature, pos_sample, neg_sample, = get_pseudo_features(feature[0], title2preds[sample[\"title\"]],\n entities, sent_map, offset, tokenizer)\n pred_pos_samples += pos_sample\n pred_neg_samples += neg_sample\n\n i_line += len(feature)\n features.extend(feature)\n\n print(\"# of documents {}.\".format(i_line))\n if single_results is not None:\n print(\"# of positive examples {}.\".format(pred_pos_samples))\n print(\"# of negative examples {}.\".format(pred_neg_samples))\n\n else:\n print(\"# of positive examples {}.\".format(pos_samples))\n print(\"# of negative examples {}.\".format(neg_samples))\n\n return features"
},
{
"identifier": "to_official",
"path": "evaluation.py",
"snippet": "def to_official(preds: list, features: list, evi_preds: list = [], scores: list = [], topks: list = []):\n '''\n Convert the predictions to official format for evaluating.\n Input:\n :preds: list of dictionaries, each dictionary entry is a predicted relation triple from the original document. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :features: list of features within each document. Identical to the lists obtained from pre-processing.\n :evi_preds: list of the evidence prediction corresponding to each relation triple prediction.\n :scores: list of scores of topk relation labels for each entity pair.\n :topks: list of topk relation labels for each entity pair.\n Output:\n :official_res: official results used for evaluation.\n :res: topk results to be dumped into file, which can be further used during fushion.\n '''\n\n h_idx, t_idx, title, sents = [], [], [], []\n\n for f in features:\n if \"entity_map\" in f:\n hts = [[f[\"entity_map\"][ht[0]], f[\"entity_map\"][ht[1]]] for ht in f[\"hts\"]]\n else:\n hts = f[\"hts\"]\n\n h_idx += [ht[0] for ht in hts]\n t_idx += [ht[1] for ht in hts]\n title += [f[\"title\"] for ht in hts]\n sents += [len(f[\"sent_pos\"])] * len(hts)\n\n official_res = []\n res = []\n\n for i in range(preds.shape[0]): # for each entity pair\n if scores != []:\n score = extract_relative_score(scores[i], topks[i])\n pred = topks[i]\n else:\n pred = preds[i]\n pred = np.nonzero(pred)[0].tolist()\n\n for p in pred: # for each predicted relation label (topk)\n curr_result = {\n 'title': title[i],\n 'h_idx': h_idx[i],\n 't_idx': t_idx[i],\n 'r': id2rel[p],\n }\n if evi_preds != []:\n curr_evi = evi_preds[i]\n evis = np.nonzero(curr_evi)[0].tolist()\n curr_result[\"evidence\"] = [evi for evi in evis if evi < sents[i]]\n if scores != []:\n curr_result[\"score\"] = score[np.where(topks[i] == p)].item()\n if p != 0 and p in np.nonzero(preds[i])[0].tolist():\n official_res.append(curr_result)\n res.append(curr_result)\n\n return official_res, res"
},
{
"identifier": "official_evaluate",
"path": "evaluation.py",
"snippet": "def official_evaluate(tmp, path, train_file=\"train_annotated.json\", dev_file=\"dev.json\"):\n '''\n Adapted from the official evaluation code\n '''\n truth_dir = os.path.join(path, 'ref')\n\n if not os.path.exists(truth_dir):\n os.makedirs(truth_dir)\n\n fact_in_train_annotated = gen_train_facts(os.path.join(path, train_file), truth_dir)\n fact_in_train_distant = gen_train_facts(os.path.join(path, \"train_distant.json\"), truth_dir)\n\n truth = json.load(open(os.path.join(path, dev_file)))\n\n std = {}\n tot_evidences = 0\n titleset = set([])\n\n title2vectexSet = {}\n\n for x in truth:\n title = x['title']\n titleset.add(title)\n\n vertexSet = x['vertexSet']\n title2vectexSet[title] = vertexSet\n\n if 'labels' not in x: # official test set from DocRED\n continue\n\n for label in x['labels']:\n r = label['r']\n h_idx = label['h']\n t_idx = label['t']\n std[(title, r, h_idx, t_idx)] = set(label['evidence'])\n tot_evidences += len(label['evidence'])\n\n tot_relations = len(std)\n tmp.sort(key=lambda x: (x['title'], x['h_idx'], x['t_idx'], x['r']))\n submission_answer = [tmp[0]]\n\n for i in range(1, len(tmp)):\n x = tmp[i]\n y = tmp[i - 1]\n if (x['title'], x['h_idx'], x['t_idx'], x['r']) != (y['title'], y['h_idx'], y['t_idx'], y['r']):\n submission_answer.append(tmp[i])\n\n correct_re = 0\n correct_evidence = 0\n pred_evi = 0\n\n correct_in_train_annotated = 0\n correct_in_train_distant = 0\n titleset2 = set([])\n for x in submission_answer:\n title = x['title']\n h_idx = x['h_idx']\n t_idx = x['t_idx']\n r = x['r']\n titleset2.add(title)\n if title not in title2vectexSet:\n continue\n vertexSet = title2vectexSet[title]\n\n if 'evidence' in x: # and (title, h_idx, t_idx) in std:\n evi = set(x['evidence'])\n else:\n evi = set([])\n pred_evi += len(evi)\n\n if (title, r, h_idx, t_idx) in std:\n correct_re += 1\n stdevi = std[(title, r, h_idx, t_idx)]\n correct_evidence += len(stdevi & evi)\n in_train_annotated = in_train_distant = False\n for n1 in vertexSet[h_idx]:\n for n2 in vertexSet[t_idx]:\n if (n1['name'], n2['name'], r) in fact_in_train_annotated:\n in_train_annotated = True\n if (n1['name'], n2['name'], r) in fact_in_train_distant:\n in_train_distant = True\n\n if in_train_annotated:\n correct_in_train_annotated += 1\n if in_train_distant:\n correct_in_train_distant += 1\n\n re_p = 1.0 * correct_re / len(submission_answer)\n re_r = 1.0 * correct_re / tot_relations if tot_relations != 0 else 0\n if re_p + re_r == 0:\n re_f1 = 0\n else:\n re_f1 = 2.0 * re_p * re_r / (re_p + re_r)\n\n evi_p = 1.0 * correct_evidence / pred_evi if pred_evi > 0 else 0\n evi_r = 1.0 * correct_evidence / tot_evidences if tot_evidences > 0 else 0\n\n if evi_p + evi_r == 0:\n evi_f1 = 0\n else:\n evi_f1 = 2.0 * evi_p * evi_r / (evi_p + evi_r)\n\n re_p_ignore_train_annotated = 1.0 * (correct_re - correct_in_train_annotated) / (\n len(submission_answer) - correct_in_train_annotated + 1e-5)\n re_p_ignore_train = 1.0 * (correct_re - correct_in_train_distant) / (\n len(submission_answer) - correct_in_train_distant + 1e-5)\n\n if re_p_ignore_train_annotated + re_r == 0:\n re_f1_ignore_train_annotated = 0\n else:\n re_f1_ignore_train_annotated = 2.0 * re_p_ignore_train_annotated * re_r / (re_p_ignore_train_annotated + re_r)\n\n if re_p_ignore_train + re_r == 0:\n re_f1_ignore_train = 0\n else:\n re_f1_ignore_train = 2.0 * re_p_ignore_train * re_r / (re_p_ignore_train + re_r)\n\n return [re_p, re_r, re_f1], [evi_p, evi_r, evi_f1], \\\n [re_p_ignore_train_annotated, re_r, re_f1_ignore_train_annotated], \\\n [re_p_ignore_train, re_r, re_f1_ignore_train]"
},
{
"identifier": "merge_results",
"path": "evaluation.py",
"snippet": "def merge_results(pred: list, pred_pseudo: list, features: list, thresh: float = None):\n '''\n Merge relation predictions from the original document and psuedo documents.\n Input:\n :pred: list of dictionaries, each dictionary entry is a predicted relation triple from the original document. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :pred_pseudo: list of dictionaries, each dictionary entry is a predicted relation triple from pseudo documents. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :features: list of features within each document. Identical to the lists obtained from pre-processing.\n :thresh: threshold for selecting predictions.\n Output:\n :merged_res: list of merged relation predictions. Each relation prediction is a dictionay with keys (title, h_idx, t_idx, r).\n :thresh: threshold of selecting relation predictions.\n '''\n\n title2pred = get_title2pred(pred)\n title2pred_pseudo = get_title2pred(pred_pseudo)\n\n title2gt = get_title2gt(features)\n num_gt = sum([len(title2gt[t]) for t in title2gt])\n\n titles = list(title2pred.keys())\n cand = []\n merged_res = []\n correct, num_pred = 0, 0\n\n for t in titles:\n rels = title2pred[t]\n rels_pseudo = title2pred_pseudo[t] if t in title2pred_pseudo else {}\n\n union = set(rels.keys()) | set(rels_pseudo.keys())\n for r in union:\n if r in rels and r in rels_pseudo: # add those into predictions\n if rels[r] > 0 and rels_pseudo[r] > 0:\n merged_res.append({'title': t, 'h_idx': r[0], 't_idx': r[1], 'r': r[2]})\n num_pred += 1\n correct += r in title2gt[t]\n continue\n score = rels[r] + rels_pseudo[r]\n elif r in rels: # -10 for penalty\n score = rels[r] - 10\n elif r in rels_pseudo:\n score = rels_pseudo[r] - 10\n cand.append((r in title2gt[t], score, t, r[0], r[1], r[2]))\n\n if thresh != None:\n sorted_pred = sorted(cand, key=lambda x: x[1], reverse=True)\n last = min(filter(lambda x: x[1] > thresh, sorted_pred))\n until = sorted_pred.index(last)\n cand = sorted_pred[:until + 1]\n merged_res.extend([{'title': r[2], 'h_idx': r[3], 't_idx': r[4], 'r': r[5]} for r in cand])\n return merged_res, thresh\n\n if cand != []:\n thresh, cand = select_thresh(cand, num_gt, correct, num_pred)\n merged_res.extend([{'title': r[2], 'h_idx': r[3], 't_idx': r[4], 'r': r[5]} for r in cand])\n\n return merged_res, thresh"
}
] | import argparse
import os
import numpy as np
import torch
import ujson as json
import pandas as pd
import pickle
from torch.cuda.amp import GradScaler
from torch.utils.data import DataLoader
from transformers import AutoConfig, AutoModel, AutoTokenizer
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from args import add_args
from model import DocREModel
from utils import set_seed, collate_fn, create_directory
from prepro import read_docred
from evaluation import to_official, official_evaluate, merge_results
from tqdm import tqdm | 10,421 |
def load_input(batch, device, tag="dev"):
input = {'input_ids': batch[0].to(device),
'attention_mask': batch[1].to(device),
'labels': batch[2].to(device),
'entity_pos': batch[3],
'hts': batch[4],
'sent_pos': batch[5],
'sent_labels': batch[6].to(device) if (not batch[6] is None) and (batch[7] is None) else None,
'teacher_attns': batch[7].to(device) if not batch[7] is None else None,
'graph': batch[8],
'tag': tag
}
return input
def train(args, model, train_features, dev_features):
def finetune(features, optimizer, num_epoch, num_steps):
best_score = -1
train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn,
drop_last=True)
train_iterator = range(int(num_epoch))
total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps)
warmup_steps = int(total_steps * args.warmup_ratio)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=total_steps)
scaler = GradScaler()
print("Total steps: {}".format(total_steps))
print("Warmup steps: {}".format(warmup_steps))
for epoch in tqdm(train_iterator, desc='Train epoch'):
for step, batch in enumerate(train_dataloader):
model.zero_grad()
optimizer.zero_grad()
model.train()
inputs = load_input(batch, args.device)
outputs = model(**inputs)
loss = [outputs["loss"]["rel_loss"]]
if inputs["sent_labels"] is not None:
loss.append(outputs["loss"]["evi_loss"] * args.evi_lambda)
if inputs["teacher_attns"] is not None:
loss.append(outputs["loss"]["attn_loss"] * args.attn_lambda)
loss = sum(loss) / args.gradient_accumulation_steps
scaler.scale(loss).backward()
if step % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scaler.step(optimizer)
scaler.update()
scheduler.step()
model.zero_grad()
num_steps += 1
if (step + 1) == len(train_dataloader) or (
args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0):
dev_scores, dev_output, official_results, results = evaluate(args, model, dev_features, tag="dev")
print(dev_output)
if dev_scores["dev_F1_ign"] > best_score:
best_score = dev_scores["dev_F1_ign"]
best_offi_results = official_results
best_results = results
best_output = dev_output
ckpt_file = os.path.join(args.save_path, "best.ckpt")
print(f"saving model checkpoint into {ckpt_file} ...")
torch.save(model.state_dict(), ckpt_file)
if epoch == train_iterator[-1]: # last epoch
ckpt_file = os.path.join(args.save_path, "last.ckpt")
print(f"saving model checkpoint into {ckpt_file} ...")
torch.save(model.state_dict(), ckpt_file)
pred_file = os.path.join(args.save_path, args.pred_file)
score_file = os.path.join(args.save_path, "scores.csv")
results_file = os.path.join(args.save_path, f"topk_{args.pred_file}")
dump_to_file(best_offi_results, pred_file, best_output, score_file, best_results, results_file)
return num_steps
new_layer = ["extractor", "bilinear", "graph"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], },
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": args.lr_added},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr_transformer, eps=args.adam_epsilon)
num_steps = 0
|
def load_input(batch, device, tag="dev"):
input = {'input_ids': batch[0].to(device),
'attention_mask': batch[1].to(device),
'labels': batch[2].to(device),
'entity_pos': batch[3],
'hts': batch[4],
'sent_pos': batch[5],
'sent_labels': batch[6].to(device) if (not batch[6] is None) and (batch[7] is None) else None,
'teacher_attns': batch[7].to(device) if not batch[7] is None else None,
'graph': batch[8],
'tag': tag
}
return input
def train(args, model, train_features, dev_features):
def finetune(features, optimizer, num_epoch, num_steps):
best_score = -1
train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn,
drop_last=True)
train_iterator = range(int(num_epoch))
total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps)
warmup_steps = int(total_steps * args.warmup_ratio)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=total_steps)
scaler = GradScaler()
print("Total steps: {}".format(total_steps))
print("Warmup steps: {}".format(warmup_steps))
for epoch in tqdm(train_iterator, desc='Train epoch'):
for step, batch in enumerate(train_dataloader):
model.zero_grad()
optimizer.zero_grad()
model.train()
inputs = load_input(batch, args.device)
outputs = model(**inputs)
loss = [outputs["loss"]["rel_loss"]]
if inputs["sent_labels"] is not None:
loss.append(outputs["loss"]["evi_loss"] * args.evi_lambda)
if inputs["teacher_attns"] is not None:
loss.append(outputs["loss"]["attn_loss"] * args.attn_lambda)
loss = sum(loss) / args.gradient_accumulation_steps
scaler.scale(loss).backward()
if step % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scaler.step(optimizer)
scaler.update()
scheduler.step()
model.zero_grad()
num_steps += 1
if (step + 1) == len(train_dataloader) or (
args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0):
dev_scores, dev_output, official_results, results = evaluate(args, model, dev_features, tag="dev")
print(dev_output)
if dev_scores["dev_F1_ign"] > best_score:
best_score = dev_scores["dev_F1_ign"]
best_offi_results = official_results
best_results = results
best_output = dev_output
ckpt_file = os.path.join(args.save_path, "best.ckpt")
print(f"saving model checkpoint into {ckpt_file} ...")
torch.save(model.state_dict(), ckpt_file)
if epoch == train_iterator[-1]: # last epoch
ckpt_file = os.path.join(args.save_path, "last.ckpt")
print(f"saving model checkpoint into {ckpt_file} ...")
torch.save(model.state_dict(), ckpt_file)
pred_file = os.path.join(args.save_path, args.pred_file)
score_file = os.path.join(args.save_path, "scores.csv")
results_file = os.path.join(args.save_path, f"topk_{args.pred_file}")
dump_to_file(best_offi_results, pred_file, best_output, score_file, best_results, results_file)
return num_steps
new_layer = ["extractor", "bilinear", "graph"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], },
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": args.lr_added},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr_transformer, eps=args.adam_epsilon)
num_steps = 0 | set_seed(args) | 2 | 2023-10-20 05:53:25+00:00 | 12k |
xingchenshanyao/YOLOP-E | lib/dataset/bdd.py | [
{
"identifier": "AutoDriveDataset",
"path": "lib/dataset/AutoDriveDataset.py",
"snippet": "class AutoDriveDataset(Dataset):\n \"\"\"\n A general Dataset for some common function\n \"\"\"\n def __init__(self, cfg, is_train, inputsize=640, transform=None):\n \"\"\"\n initial all the characteristic\n\n Inputs:\n -cfg: configurations\n -is_train(bool): whether train set or not\n -transform: ToTensor and Normalize\n \n Returns:\n None\n \"\"\"\n self.is_train = is_train\n self.cfg = cfg\n self.transform = transform\n self.inputsize = inputsize\n self.Tensor = transforms.ToTensor()\n img_root = Path(cfg.DATASET.DATAROOT)\n label_root = Path(cfg.DATASET.LABELROOT)\n mask_root = Path(cfg.DATASET.MASKROOT)\n lane_root = Path(cfg.DATASET.LANEROOT)\n if is_train:\n indicator = cfg.DATASET.TRAIN_SET\n else:\n indicator = cfg.DATASET.TEST_SET\n self.img_root = img_root / indicator\n self.label_root = label_root / indicator\n self.mask_root = mask_root / indicator\n self.lane_root = lane_root / indicator\n # self.label_list = self.label_root.iterdir()\n self.mask_list = self.mask_root.iterdir()\n\n self.db = []\n\n self.data_format = cfg.DATASET.DATA_FORMAT\n\n self.scale_factor = cfg.DATASET.SCALE_FACTOR\n self.rotation_factor = cfg.DATASET.ROT_FACTOR\n self.flip = cfg.DATASET.FLIP\n self.color_rgb = cfg.DATASET.COLOR_RGB\n\n # self.target_type = cfg.MODEL.TARGET_TYPE\n self.shapes = np.array(cfg.DATASET.ORG_IMG_SIZE)\n \n def _get_db(self):\n \"\"\"\n finished on children Dataset(for dataset which is not in Bdd100k format, rewrite children Dataset)\n \"\"\"\n raise NotImplementedError\n\n def evaluate(self, cfg, preds, output_dir):\n \"\"\"\n finished on children dataset\n \"\"\"\n raise NotImplementedError\n \n def __len__(self,):\n \"\"\"\n number of objects in the dataset\n \"\"\"\n return len(self.db)\n\n def __getitem__(self, idx):\n \"\"\"\n Get input and groud-truth from database & add data augmentation on input\n\n Inputs:\n -idx: the index of image in self.db(database)(list)\n self.db(list) [a,b,c,...]\n a: (dictionary){'image':, 'information':}\n\n Returns:\n -image: transformed image, first passed the data augmentation in __getitem__ function(type:numpy), then apply self.transform\n -target: ground truth(det_gt,seg_gt)\n\n function maybe useful\n cv2.imread\n cv2.cvtColor(data, cv2.COLOR_BGR2RGB)\n cv2.warpAffine\n \"\"\"\n data = self.db[idx]\n data_label = data[\"label\"]\n id_image = int(data[\"image\"].split('/')[-1][:-4]) # 获取图片序号\n img = cv2.imread(data[\"image\"], cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)\n # cv2.imshow(\"img\",img) # 原图像\n # cv2.waitKey(5000)\n \n # print(\"img = zmIceColor(img/255.0)*255\")\n # img = zmIceColor(img/255.0)*255\n # cv2.imshow(\"img\",img/255) # ACE自动色彩均衡快速算法\n # cv2.waitKey(5000)\n\n # Only Mascio Enhancement 数据增强 \n for line in data_label:\n idx_0 = line[0]\n x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)\n x1, y1, x2, y2 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)\n random.seed(idx)\n if self.is_train and int(idx_0) == 9 and random.random() > 1: # 只增强Straight or Right Turn Arrow\n # if self.is_train:\n # if True:\n c_y = 10 # 偏移间隙\n c_x = 0\n\n x_c_new = x_c+c_x\n y_c_new = y_c+h_c+c_y\n x1_new, y1_new, x2_new, y2_new = x1+c_x, y1+h_c+c_y, x2+c_x, y2+h_c+c_y\n\n if (x1_new >=0 and x2_new <=1280 and y1_new>=0 and y2_new <=720):\n # 向下重叠一次\n Is_add = True\n for line0 in data_label:\n x1_0, y1_0, x2_0, y2_0 = line0[1]*1280-line0[3]*1280/2, line0[2]*1280-line0[4]*720/2, line0[1]*1280+line0[3]*1280/2, line0[2]*1280+line0[4]*720/2\n if (x1_new>x1_0 and y1_new>y1_0 and x1_new<x2_0 and y1_new<y2_0) or (x2_new>x1_0 and y2_new>y1_0 and x2_new<x2_0 and y2_new<y2_0) or (x1_new<x1_0 and y1_new<y1_0 and x2_new>x2_0 and y2_new>y2_0):\n Is_add = False\n break\n if Is_add:\n try:\n cropped_line = [[idx_0, x_c_new, y_c_new, w_c, h_c]]\n data_label = np.append(data_label, cropped_line, axis=0)\n img[int(y1_new):int(y2_new), int(x1_new):int(x2_new)] = img[int(y1):int(y2), int(x1):int(x2)]\n except:\n Is_add = True\n # cv2.imshow(\"img\",img) \n # cv2.waitKey(10000)\n\n # Specific Mascio Enhancement数据增强 \n cropped_path0 = '/home/xingchen/Study/dataset/SDExpressway/traffic_object_cropped/'\n f=open('/home/xingchen/Study/dataset/SDExpressway/traffic_object_cropped.txt','r')\n lines=f.readlines()\n f.close()\n c_c = 10\n p = 0.8 # 数据增强概率\n # Only_day = True\n Only_day = False #只加强白天的图片\n # if self.is_train: # 限定只有训练的时候增强\n # if True:\n if False:\n random.seed(idx)\n if random.random() > p-0.1 : # Straight or Right Turn Arrow增强\n Is_add = True\n if id_image >= 3294 and Only_day: # 只加强白天的图片\n Is_add = False\n cropped_path = cropped_path0+'Straight or Right Turn Arrow/'\n fileList = os.listdir(cropped_path)\n cropped_id = random.randint(0,len(fileList)-1)\n txt_id = int(fileList[cropped_id].split('_')[0])\n txt_line = lines[txt_id-1].split(' ')\n x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])\n if x1>x2:\n x1,x2 = x2,x1\n if y1>y2:\n y1,y2 = y2,y1\n for line in data_label:\n idx_0 = line[0]\n x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)\n x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)\n if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):\n Is_add = False\n break\n if Is_add:\n try:\n cropped = cv2.imread(cropped_path+fileList[cropped_id])\n img[int(y1):int(y2), int(x1):int(x2)] = cropped\n cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]\n data_label = np.append(data_label, cropped_line, axis=0)\n except:\n Is_add = True\n\n if random.random() > p-0.1 : # Straight Ahead Arrow增强\n Is_add = True\n if id_image >= 3294 and Only_day: # 只加强白天的图片\n Is_add = False\n cropped_path = cropped_path0+'Straight Ahead Arrow/'\n fileList = os.listdir(cropped_path)\n cropped_id = random.randint(0,len(fileList)-1)\n txt_id = int(fileList[cropped_id].split('_')[0])\n txt_line = lines[txt_id-1].split(' ')\n x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])\n if x1>x2:\n x1,x2 = x2,x1\n if y1>y2:\n y1,y2 = y2,y1\n for line in data_label:\n idx_0 = line[0]\n x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)\n x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)\n if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):\n Is_add = False\n break\n if Is_add:\n try:\n cropped = cv2.imread(cropped_path+fileList[cropped_id])\n img[int(y1):int(y2), int(x1):int(x2)] = cropped\n cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]\n data_label = np.append(data_label, cropped_line, axis=0)\n except:\n Is_add = True\n\n if random.random() > p : # Speed Limit Sign增强\n Is_add = True\n if id_image >= 3294 and Only_day: # 只加强白天的图片\n Is_add = False\n cropped_path = cropped_path0+'Speed Limit Sign/'\n fileList = os.listdir(cropped_path)\n cropped_id = random.randint(0,len(fileList)-1)\n txt_id = int(fileList[cropped_id].split('_')[0])\n txt_line = lines[txt_id-1].split(' ')\n x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])\n if x1>x2:\n x1,x2 = x2,x1\n if y1>y2:\n y1,y2 = y2,y1\n for line in data_label:\n idx_0 = line[0]\n x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)\n x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)\n if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):\n Is_add = False\n break\n if Is_add:\n try:\n cropped = cv2.imread(cropped_path+fileList[cropped_id])\n img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped\n cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]\n data_label = np.append(data_label, cropped_line, axis=0)\n except:\n Is_add = True\n \n\n if random.random() > p : # Emergency Telephone Sign增强\n Is_add = True\n if id_image >= 3294 and Only_day: # 只加强白天的图片\n Is_add = False\n cropped_path = cropped_path0+'Emergency Telephone Sign/'\n fileList = os.listdir(cropped_path)\n cropped_id = random.randint(0,len(fileList)-1)\n txt_id = int(fileList[cropped_id].split('_')[0])\n txt_line = lines[txt_id-1].split(' ')\n x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])\n if x1>x2:\n x1,x2 = x2,x1\n if y1>y2:\n y1,y2 = y2,y1\n for line in data_label:\n idx_0 = line[0]\n x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)\n x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)\n if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):\n Is_add = False\n break\n if Is_add:\n try:\n cropped = cv2.imread(cropped_path+fileList[cropped_id])\n img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped\n cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]\n data_label = np.append(data_label, cropped_line, axis=0)\n except:\n Is_add = True\n\n if random.random() > p : # Warning Sign增强\n Is_add = True\n if id_image >= 3294 and Only_day: # 只加强白天的图片\n Is_add = False\n cropped_path = cropped_path0+'Warning Sign/'\n fileList = os.listdir(cropped_path)\n cropped_id = random.randint(0,len(fileList)-1)\n txt_id = int(fileList[cropped_id].split('_')[0])\n txt_line = lines[txt_id-1].split(' ')\n x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])\n if x1>x2:\n x1,x2 = x2,x1\n if y1>y2:\n y1,y2 = y2,y1\n for line in data_label:\n idx_0 = line[0]\n x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)\n x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)\n if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):\n Is_add = False\n break\n if Is_add:\n try:\n cropped = cv2.imread(cropped_path+fileList[cropped_id])\n img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped\n cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]\n data_label = np.append(data_label, cropped_line, axis=0)\n except:\n Is_add = True\n\n if random.random() > p : # Directional Sign增强\n Is_add = True\n if id_image >= 3294 and Only_day: # 只加强白天的图片\n Is_add = False\n cropped_path = cropped_path0+'Directional Sign/'\n fileList = os.listdir(cropped_path)\n cropped_id = random.randint(0,len(fileList)-1)\n txt_id = int(fileList[cropped_id].split('_')[0])\n txt_line = lines[txt_id-1].split(' ')\n x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])\n if x1>x2:\n x1,x2 = x2,x1\n if y1>y2:\n y1,y2 = y2,y1\n for line in data_label:\n idx_0 = line[0]\n x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)\n x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)\n if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):\n Is_add = False\n break\n if Is_add:\n try:\n cropped = cv2.imread(cropped_path+fileList[cropped_id])\n img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped\n cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]\n data_label = np.append(data_label, cropped_line, axis=0)\n except:\n Is_add = True\n\n if random.random() > p : # Pending Sign增强\n Is_add = True\n if id_image >= 3294 and Only_day: # 只加强白天的图片\n Is_add = False\n cropped_path = cropped_path0+'Pending Sign/'\n fileList = os.listdir(cropped_path)\n cropped_id = random.randint(0,len(fileList)-1)\n txt_id = int(fileList[cropped_id].split('_')[0])\n txt_line = lines[txt_id-1].split(' ')\n x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])\n if x1>x2:\n x1,x2 = x2,x1\n if y1>y2:\n y1,y2 = y2,y1\n for line in data_label:\n idx_0 = line[0]\n x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)\n x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)\n if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):\n Is_add = False\n break\n if Is_add:\n try:\n cropped = cv2.imread(cropped_path+fileList[cropped_id])\n img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped\n cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]\n data_label = np.append(data_label, cropped_line, axis=0)\n except:\n Is_add = True\n\n if random.random() > p : # Guidance Sign增强\n Is_add = True\n if id_image >= 3294 and Only_day: # 只加强白天的图片\n Is_add = False\n cropped_path = cropped_path0+'Guidance Sign/'\n fileList = os.listdir(cropped_path)\n cropped_id = random.randint(0,len(fileList)-1)\n txt_id = int(fileList[cropped_id].split('_')[0])\n txt_line = lines[txt_id-1].split(' ')\n x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5])\n if x1>x2:\n x1,x2 = x2,x1\n if y1>y2:\n y1,y2 = y2,y1\n for line in data_label:\n idx_0 = line[0]\n x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720)\n x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2)\n if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0):\n Is_add = False\n break\n if Is_add:\n try:\n cropped = cv2.imread(cropped_path+fileList[cropped_id])\n img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped\n cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]]\n data_label = np.append(data_label, cropped_line, axis=0)\n except:\n Is_add = True\n\n\n data[\"label\"] = data_label\n # cv2.imshow(\"img\",img) \n # cv2.waitKey(10000)\n\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # cv2.imshow(\"img\",img) # 图像颜色空间转换\n # cv2.waitKey(10000)\n\n # seg_label = cv2.imread(data[\"mask\"], 0)\n if self.cfg.num_seg_class == 3:\n seg_label = cv2.imread(data[\"mask\"])\n else:\n seg_label = cv2.imread(data[\"mask\"], 0)\n lane_label = cv2.imread(data[\"lane\"], 0)\n #print(lane_label.shape)\n # print(seg_label.shape)\n # print(lane_label.shape)\n # print(seg_label.shape)\n resized_shape = self.inputsize\n if isinstance(resized_shape, list):\n resized_shape = max(resized_shape)\n h0, w0 = img.shape[:2] # orig hw\n r = resized_shape / max(h0, w0) # resize image to img_size\n if r != 1: # always resize down, only resize up if training with augmentation\n interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR\n img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)\n # cv2.imshow(\"img\",img) # 图像缩小到640*360\n # cv2.waitKey(10000)\n seg_label = cv2.resize(seg_label, (int(w0 * r), int(h0 * r)), interpolation=interp)\n lane_label = cv2.resize(lane_label, (int(w0 * r), int(h0 * r)), interpolation=interp)\n h, w = img.shape[:2]\n \n (img, seg_label, lane_label), ratio, pad = letterbox((img, seg_label, lane_label), resized_shape, auto=True, scaleup=self.is_train)\n shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling\n # ratio = (w / w0, h / h0)\n # print(resized_shape)\n \n det_label = data[\"label\"]\n labels=[]\n \n if det_label.size > 0:\n # Normalized xywh to pixel xyxy format\n labels = det_label.copy()\n labels[:, 1] = ratio[0] * w * (det_label[:, 1] - det_label[:, 3] / 2) + pad[0] # pad width\n labels[:, 2] = ratio[1] * h * (det_label[:, 2] - det_label[:, 4] / 2) + pad[1] # pad height\n labels[:, 3] = ratio[0] * w * (det_label[:, 1] + det_label[:, 3] / 2) + pad[0]\n labels[:, 4] = ratio[1] * h * (det_label[:, 2] + det_label[:, 4] / 2) + pad[1]\n \n if self.is_train:\n combination = (img, seg_label, lane_label)\n (img, seg_label, lane_label), labels = random_perspective(\n combination=combination,\n targets=labels,\n degrees=self.cfg.DATASET.ROT_FACTOR,\n translate=self.cfg.DATASET.TRANSLATE,\n scale=self.cfg.DATASET.SCALE_FACTOR,\n shear=self.cfg.DATASET.SHEAR\n )\n #print(labels.shape)\n augment_hsv(img, hgain=self.cfg.DATASET.HSV_H, sgain=self.cfg.DATASET.HSV_S, vgain=self.cfg.DATASET.HSV_V)\n # img, seg_label, labels = cutout(combination=combination, labels=labels)\n\n if len(labels):\n # convert xyxy to xywh\n labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])\n\n # Normalize coordinates 0 - 1\n labels[:, [2, 4]] /= img.shape[0] # height\n labels[:, [1, 3]] /= img.shape[1] # width\n\n # if self.is_train:\n # random left-right flip\n lr_flip = True\n if lr_flip and random.random() < 0.5:\n img = np.fliplr(img)\n seg_label = np.fliplr(seg_label)\n lane_label = np.fliplr(lane_label)\n if len(labels):\n labels[:, 1] = 1 - labels[:, 1]\n\n # random up-down flip\n ud_flip = False\n if ud_flip and random.random() < 0.5:\n img = np.flipud(img)\n seg_label = np.filpud(seg_label)\n lane_label = np.filpud(lane_label)\n if len(labels):\n labels[:, 2] = 1 - labels[:, 2]\n \n else:\n if len(labels):\n # convert xyxy to xywh\n labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])\n\n # Normalize coordinates 0 - 1\n labels[:, [2, 4]] /= img.shape[0] # height\n labels[:, [1, 3]] /= img.shape[1] # width\n\n labels_out = torch.zeros((len(labels), 6))\n if len(labels):\n labels_out[:, 1:] = torch.from_numpy(labels)\n # Convert\n # img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n # img = img.transpose(2, 0, 1)\n # cv2.imshow(\"img\", img)\n # cv2.waitKey(10000)\n img = np.ascontiguousarray(img) # 返回一个连续的array\n\n # seg_label = np.ascontiguousarray(seg_label)\n # if idx == 0:\n # print(seg_label[:,:,0])\n\n if self.cfg.num_seg_class == 3:\n _,seg0 = cv2.threshold(seg_label[:,:,0],128,255,cv2.THRESH_BINARY)\n _,seg1 = cv2.threshold(seg_label[:,:,1],1,255,cv2.THRESH_BINARY)\n _,seg2 = cv2.threshold(seg_label[:,:,2],1,255,cv2.THRESH_BINARY)\n else:\n _,seg1 = cv2.threshold(seg_label,1,255,cv2.THRESH_BINARY)\n _,seg2 = cv2.threshold(seg_label,1,255,cv2.THRESH_BINARY_INV)\n _,lane1 = cv2.threshold(lane_label,1,255,cv2.THRESH_BINARY)\n _,lane2 = cv2.threshold(lane_label,1,255,cv2.THRESH_BINARY_INV)\n# _,seg2 = cv2.threshold(seg_label[:,:,2],1,255,cv2.THRESH_BINARY)\n # # seg1[cutout_mask] = 0\n # # seg2[cutout_mask] = 0\n \n # seg_label /= 255\n # seg0 = self.Tensor(seg0)\n if self.cfg.num_seg_class == 3:\n seg0 = self.Tensor(seg0)\n seg1 = self.Tensor(seg1)\n seg2 = self.Tensor(seg2)\n # seg1 = self.Tensor(seg1)\n # seg2 = self.Tensor(seg2)\n lane1 = self.Tensor(lane1)\n lane2 = self.Tensor(lane2)\n\n # seg_label = torch.stack((seg2[0], seg1[0]),0)\n if self.cfg.num_seg_class == 3:\n seg_label = torch.stack((seg0[0],seg1[0],seg2[0]),0)\n else:\n seg_label = torch.stack((seg2[0], seg1[0]),0)\n \n lane_label = torch.stack((lane2[0], lane1[0]),0)\n # _, gt_mask = torch.max(seg_label, 0)\n # _ = show_seg_result(img, gt_mask, idx, 0, save_dir='debug', is_gt=True)\n \n\n target = [labels_out, seg_label, lane_label]\n # cv2.imshow(\"img\", img) # 这里img还是图像\n # cv2.waitKey(10000)\n # print(img)\n img = self.transform(img) # 这里img变成了数组的格式\n # print(img)\n\n return img, target, data[\"image\"], shapes\n\n def select_data(self, db):\n \"\"\"\n You can use this function to filter useless images in the dataset\n\n Inputs:\n -db: (list)database\n\n Returns:\n -db_selected: (list)filtered dataset\n \"\"\"\n db_selected = ...\n return db_selected\n\n @staticmethod\n def collate_fn(batch):\n img, label, paths, shapes= zip(*batch)\n label_det, label_seg, label_lane = [], [], []\n for i, l in enumerate(label):\n l_det, l_seg, l_lane = l\n l_det[:, 0] = i # add target image index for build_targets()\n label_det.append(l_det)\n label_seg.append(l_seg)\n label_lane.append(l_lane)\n return torch.stack(img, 0), [torch.cat(label_det, 0), torch.stack(label_seg, 0), torch.stack(label_lane, 0)], paths, shapes"
},
{
"identifier": "convert",
"path": "lib/dataset/convert.py",
"snippet": "def convert(size, box):\r"
}
] | import numpy as np
import json
from .AutoDriveDataset import AutoDriveDataset
from .convert import convert, id_dict, id_dict_single, id_dict_SDExpressway, id_dict_SDExpressway_single
from tqdm import tqdm | 9,689 |
single_cls = False # just detect vehicle
class BddDataset(AutoDriveDataset):
def __init__(self, cfg, is_train, inputsize, transform=None):
super().__init__(cfg, is_train, inputsize, transform)
self.db = self._get_db() # 加载数据集 # self.db = [{'image': '/home/xingchen/Study...3225df.jpg', 'label': array([[0. , ...7547441]]), 'mask': '/home/xingchen/Study...3225df.png', 'lane': '/home/xingchen/Study...3225df.png'}, ...]
self.cfg = cfg
def _get_db(self):
"""
get database from the annotation file
Inputs:
Returns:
gt_db: (list)database [a,b,c,...]
a: (dictionary){'image':, 'information':, ......}
image: image path
mask: path of the segmetation label
label: [cls_id, center_x//256, center_y//256, w//256, h//256] 256=IMAGE_SIZE
"""
print('building database...')
gt_db = []
height, width = self.shapes
for mask in tqdm(list(self.mask_list)): # 加载数据集和标签
mask_path = str(mask)
label_path = mask_path.replace(str(self.mask_root), str(self.label_root)).replace(".png", ".json")
image_path = mask_path.replace(str(self.mask_root), str(self.img_root)).replace(".png", ".jpg")
lane_path = mask_path.replace(str(self.mask_root), str(self.lane_root))
with open(label_path, 'r') as f:
label = json.load(f)
# # BDD100k
# data = label['frames'][0]['objects']
# data = self.filter_data(data)
# gt = np.zeros((len(data), 5))
# for idx, obj in enumerate(data):
# category = obj['category']
# if category == "traffic light":
# color = obj['attributes']['trafficLightColor']
# category = "tl_" + color
# if category in id_dict.keys():
# x1 = float(obj['box2d']['x1'])
# y1 = float(obj['box2d']['y1'])
# x2 = float(obj['box2d']['x2'])
# y2 = float(obj['box2d']['y2'])
# cls_id = id_dict[category]
# gt[idx][0] = cls_id
# box = convert((width, height), (x1, x2, y1, y2))
# gt[idx][1:] = list(box)
# SDExpressway
data = label['shapes']
data = self.filter_data(data)
gt = np.zeros((len(data), 5))
for idx, obj in enumerate(data):
category = obj['label'] # 类别
if category in id_dict_SDExpressway.keys():
x1 = float(obj['points'][0][0])
y1 = float(obj['points'][0][1])
x2 = float(obj['points'][1][0])
y2 = float(obj['points'][1][1])
if x1>x2:
x1, x2 = x2, x1
if y1>y2:
y1, y2 = y2, y1
cls_id = id_dict_SDExpressway[category]
# if single_cls: # 20230816
# cls_id=0
gt[idx][0] = cls_id
box = convert((width, height), (x1, x2, y1, y2))
gt[idx][1:] = list(box)
rec = [{
'image': image_path,
'label': gt,
'mask': mask_path,
'lane': lane_path
}]
gt_db += rec
print('database build finish')
return gt_db
# # BDD100k数据集
# def filter_data(self, data):
# remain = []
# for obj in data:
# if 'box2d' in obj.keys(): # obj.has_key('box2d'):
# if single_cls: # 只预测车辆
# if obj['category'] in id_dict_single.keys():
# remain.append(obj)
# else:
# remain.append(obj)
# return remain
# SDExpressway数据集
def filter_data(self, data):
remain = []
for obj in data:
if 'points' in obj.keys(): # obj.has_key('box2d'):
if single_cls:
|
single_cls = False # just detect vehicle
class BddDataset(AutoDriveDataset):
def __init__(self, cfg, is_train, inputsize, transform=None):
super().__init__(cfg, is_train, inputsize, transform)
self.db = self._get_db() # 加载数据集 # self.db = [{'image': '/home/xingchen/Study...3225df.jpg', 'label': array([[0. , ...7547441]]), 'mask': '/home/xingchen/Study...3225df.png', 'lane': '/home/xingchen/Study...3225df.png'}, ...]
self.cfg = cfg
def _get_db(self):
"""
get database from the annotation file
Inputs:
Returns:
gt_db: (list)database [a,b,c,...]
a: (dictionary){'image':, 'information':, ......}
image: image path
mask: path of the segmetation label
label: [cls_id, center_x//256, center_y//256, w//256, h//256] 256=IMAGE_SIZE
"""
print('building database...')
gt_db = []
height, width = self.shapes
for mask in tqdm(list(self.mask_list)): # 加载数据集和标签
mask_path = str(mask)
label_path = mask_path.replace(str(self.mask_root), str(self.label_root)).replace(".png", ".json")
image_path = mask_path.replace(str(self.mask_root), str(self.img_root)).replace(".png", ".jpg")
lane_path = mask_path.replace(str(self.mask_root), str(self.lane_root))
with open(label_path, 'r') as f:
label = json.load(f)
# # BDD100k
# data = label['frames'][0]['objects']
# data = self.filter_data(data)
# gt = np.zeros((len(data), 5))
# for idx, obj in enumerate(data):
# category = obj['category']
# if category == "traffic light":
# color = obj['attributes']['trafficLightColor']
# category = "tl_" + color
# if category in id_dict.keys():
# x1 = float(obj['box2d']['x1'])
# y1 = float(obj['box2d']['y1'])
# x2 = float(obj['box2d']['x2'])
# y2 = float(obj['box2d']['y2'])
# cls_id = id_dict[category]
# gt[idx][0] = cls_id
# box = convert((width, height), (x1, x2, y1, y2))
# gt[idx][1:] = list(box)
# SDExpressway
data = label['shapes']
data = self.filter_data(data)
gt = np.zeros((len(data), 5))
for idx, obj in enumerate(data):
category = obj['label'] # 类别
if category in id_dict_SDExpressway.keys():
x1 = float(obj['points'][0][0])
y1 = float(obj['points'][0][1])
x2 = float(obj['points'][1][0])
y2 = float(obj['points'][1][1])
if x1>x2:
x1, x2 = x2, x1
if y1>y2:
y1, y2 = y2, y1
cls_id = id_dict_SDExpressway[category]
# if single_cls: # 20230816
# cls_id=0
gt[idx][0] = cls_id
box = convert((width, height), (x1, x2, y1, y2))
gt[idx][1:] = list(box)
rec = [{
'image': image_path,
'label': gt,
'mask': mask_path,
'lane': lane_path
}]
gt_db += rec
print('database build finish')
return gt_db
# # BDD100k数据集
# def filter_data(self, data):
# remain = []
# for obj in data:
# if 'box2d' in obj.keys(): # obj.has_key('box2d'):
# if single_cls: # 只预测车辆
# if obj['category'] in id_dict_single.keys():
# remain.append(obj)
# else:
# remain.append(obj)
# return remain
# SDExpressway数据集
def filter_data(self, data):
remain = []
for obj in data:
if 'points' in obj.keys(): # obj.has_key('box2d'):
if single_cls: | if obj['label'] in id_dict_SDExpressway_single.keys(): | 1 | 2023-10-24 02:08:25+00:00 | 12k |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.