repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
Secilia-Cxy/UNetTFI | train.py | [
{
"identifier": "recall_precision_f1_acc",
"path": "utils/evaluate.py",
"snippet": "def recall_precision_f1_acc(y=None, y_hat=None, cm=None):\n \"\"\" returns metrics for recall, precision, f1, accuracy\n\n Args:\n y (numpy array): ground truth \n y_hat (numpy array): prediction \n\n Returns:\n recall(float): recall/TPR \n precision(float): precision/PPV\n F1(float): f1-score\n acc(float): accuracy\n csi(float): critical success index\n \"\"\"\n\n # pytorch to numpy\n if cm is None:\n cm = get_confusion_matrix(y, y_hat)\n\n # if len(cm) == 4:\n tn, fp, fn, tp = cm\n recall, precision, F1, acc, csi = torch.tensor(0.), torch.tensor(0.), torch.tensor(0.), torch.tensor(\n 0.), torch.tensor(0.)\n\n if (tp + fn) > 0:\n recall = tp / (tp + fn)\n\n if (tp + fp) > 0:\n precision = tp / (tp + fp)\n\n if (precision + recall) > 0:\n F1 = 2 * (precision * recall) / (precision + recall)\n\n if (tp + fn + fp) > 0:\n csi = tp / (tp + fn + fp)\n\n if (tn + fp + fn + tp) > 0:\n acc = (tn + tp) / (tn + fp + fn + tp)\n\n return recall.item(), precision.item(), F1.item(), acc.item(), csi.item()"
},
{
"identifier": "get_confusion_matrix",
"path": "utils/evaluate.py",
"snippet": "def get_confusion_matrix(y, y_hat):\n \"\"\"get confusion matrix from y_true and y_pred\n\n Args:\n y_true (numpy array): ground truth \n y_pred (numpy array): prediction \n\n Returns:\n confusion matrix\n \"\"\"\n\n unique_mapping = (y * 2 + y_hat).to(torch.long).view(-1)\n cm = [0, 0, 0, 0]\n for i in range(len(cm)):\n cm[i] = (unique_mapping == i).sum()\n\n return cm"
},
{
"identifier": "UNet_Lightning",
"path": "models/unet_lightning_w4c23.py",
"snippet": "class UNet_Lightning(pl.LightningModule):\n def __init__(self, UNet_params: dict, params: dict,\n **kwargs):\n super(UNet_Lightning, self).__init__()\n self.plot_results = get_dict_value(params, 'plot_results', False)\n self.in_channel_to_plot = get_dict_value(params, 'in_channel_to_plot', 7)\n self.in_channels = params['in_channels']\n self.start_filts = params['init_filter_size']\n self.dropout_rate = params['dropout_rate']\n self.multi_output = UNet_params['multi_output']\n self.crop_size = UNet_params['crop_size']\n self.rotation_aug = get_dict_value(UNet_params, 'rotation_aug', False)\n self.repeated_aug = get_dict_value(UNet_params, 'repeated_aug', True)\n if self.rotation_aug:\n self.candidate_rotation = [\n lambda x: torch.flip(x, dims=[-1]),\n lambda x: torch.flip(x, dims=[-2]),\n lambda x: torch.flip(x, dims=[-1, -2]),\n lambda x: torch.rot90(x, k=1, dims=[-1, -2]),\n lambda x: torch.rot90(x, k=-1, dims=[-1, -2]),\n lambda x: torch.rot90(x, k=2, dims=[-1, -2])\n ]\n self.center_output = get_dict_value(UNet_params, 'center_output', 0)\n print(\"use rotation augmentation : \", self.rotation_aug)\n print(\"use center output : \", self.center_output)\n self.out_channels = params['len_seq_predict']\n self.use_embedding = get_dict_value(UNet_params, 'use_embedding', False)\n self.use_time_mix = get_dict_value(UNet_params, 'use_time_mix', False)\n if self.use_embedding:\n self.embedding = nn.Embedding(7 * 2, 252 * 252)\n self.in_channels += 1\n\n backbone = get_dict_value(params, \"backbone\", \"3D_UNET_base\")\n if backbone == \"3D_UNET_base\":\n self.model = Base_UNET3D(in_channels=self.in_channels, start_filts=self.start_filts,\n dropout_rate=self.dropout_rate, out_channels=self.out_channels,\n multi_output=self.multi_output, crop_input=self.crop_size,\n crop_output=self.center_output)\n\n elif params[\"backbone\"] == \"UNET2D\":\n self.model = UNET2D(input_channels=self.in_channels, input_step=4,\n crop_input=self.crop_size, crop_output=self.center_output,\n num_class=6 if self.multi_output else 1, forecast_step=32)\n\n\n else:\n raise NotImplementedError(f\"model {params['backbone']} not implemented\")\n\n self.save_hyperparameters()\n self.params = params\n\n self.val_batch = 0\n\n self.prec = 7\n\n pos_weight = torch.tensor(params['pos_weight'])\n if VERBOSE: print(\"Positive weight:\", pos_weight)\n\n self.loss = params['loss']\n self.thres = None\n self.bs = params['batch_size']\n self.loss_fn = get_lossfx(self.loss, params)\n self.main_metric = {\n 'DiceLoss': 'Dice',\n }[self.loss]\n\n self.relu = nn.ReLU() # None\n t = f\"============== n_workers: {params['n_workers']} | batch_size: {params['batch_size']} \\n\" + \\\n f\"============== loss: {self.loss}\"\n print(t)\n\n def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n super().on_load_checkpoint(checkpoint)\n print(\"loaded checkpoints\")\n\n def freeze_backbone(self):\n for name, param in self.model.named_parameters():\n param.requires_grad = False\n if self.use_embedding:\n for name, param in self.embedding.named_parameters():\n param.requires_grad = False\n\n def on_fit_start(self):\n \"\"\" create a placeholder to save the results of the metric per variable \"\"\"\n metric_placeholder = {self.main_metric: -1}\n self.logger.log_hyperparams(self.hparams)\n self.logger.log_metrics(metric_placeholder)\n\n def forward(self, x, metadata=None):\n if self.use_embedding:\n emb_idx = get_emb_idx(metadata, x.device)\n emb = self.embedding(emb_idx)\n emb = emb.reshape(-1, 1, 1, 252, 252)\n emb = emb.repeat(x.shape[0] // emb.shape[0], 1, x.shape[2], 1, 1)\n x = torch.cat([x, emb], dim=1)\n x = self.model(x)\n return x\n\n def retrieve_only_valid_pixels(self, x, m):\n \"\"\" we asume 1s in mask are invalid pixels \"\"\"\n ##print(f\"x: {x.shape} | mask: {m.shape}\")\n return x[~m]\n\n def get_target_mask(self, metadata):\n mask = metadata['target']['mask']\n # print(\"mask---->\", mask.shape)\n return mask\n\n def _compute_loss(self, y_hat, y, mask=None):\n loss = self.loss_fn(y_hat, y, mask=mask)\n return loss\n\n def training_step(self, batch, batch_idx, phase='train'):\n x, y, metadata = batch\n mask = self.get_target_mask(metadata)\n if self.use_time_mix:\n alpha = np.random.beta(1, 1)\n x_mix = (1 - alpha) * x[:, :, :-1] + alpha * x[:, :, 1:]\n y_mix = (1 - alpha) * y[:, :, :-1] + alpha * y[:, :, 1:]\n mask_mix = mask[:, :, :-1] | mask[:, :, 1:]\n x = torch.cat([x[:, :, :-1], x[:, :, 1:], x_mix], dim=0)\n y = torch.cat([y[:, :, :-1], y[:, :, 1:], y_mix], dim=0)\n mask = torch.cat([mask[:, :, :-1], mask[:, :, 1:], mask_mix], dim=0)\n else:\n x = torch.cat([x[:, :, :-1], x[:, :, 1:]], dim=0)\n y = torch.cat([y[:, :, :-1], y[:, :, 1:]], dim=0)\n mask = torch.cat([mask[:, :, :-1], mask[:, :, 1:]], dim=0)\n if self.multi_output:\n mask = torch.repeat_interleave(mask, repeats=6, dim=1)\n y = to_one_hot(y)\n\n if self.rotation_aug:\n if self.repeated_aug:\n x = torch.cat([x, torch.flip(x, dims=[-1]), torch.flip(x, dims=[-2]), torch.flip(x, dims=[-2, -1])],\n dim=0)\n y = torch.cat([y, torch.flip(y, dims=[-1]), torch.flip(y, dims=[-2]), torch.flip(y, dims=[-2, -1])],\n dim=0)\n mask = torch.cat(\n [mask, torch.flip(mask, dims=[-1]), torch.flip(mask, dims=[-2]), torch.flip(mask, dims=[-2, -1])],\n dim=0)\n else:\n randi = random.randint(0, len(self.candidate_rotation) - 1)\n x = torch.cat([x, self.candidate_rotation[randi](x)], dim=0)\n y = torch.cat([y, self.candidate_rotation[randi](y)], dim=0)\n mask = torch.cat(\n [mask, self.candidate_rotation[randi](mask)],\n dim=0)\n\n if VERBOSE:\n print('x', x.shape, 'y', y.shape, '----------------- batch')\n\n y_hat = self.forward(x, metadata)\n\n if VERBOSE:\n print('y_hat', y_hat.shape, 'y', y.shape, '----------------- model')\n\n loss = self._compute_loss(y_hat, y, mask=mask)\n\n # LOGGING\n self.log(f'{phase}_loss', loss, batch_size=self.bs, sync_dist=True)\n\n return loss\n\n def validation_step(self, batch, batch_idx, phase='val'):\n x, y, metadata = batch\n x_raw = x.detach().clone()\n mask = self.get_target_mask(metadata)\n\n if VERBOSE:\n print('x', x.shape, 'y', y.shape, '----------------- batch')\n\n if self.multi_output:\n mask = torch.repeat_interleave(mask, repeats=6, dim=1)\n y = to_one_hot(y)\n\n if VERBOSE:\n print('x', x.shape, 'y', y.shape, '----------------- batch')\n\n y_hat = self.forward(x, metadata)\n\n if VERBOSE:\n print('y_hat', y_hat.shape, 'y', y.shape, '----------------- model')\n\n loss = self._compute_loss(y_hat, y, mask=mask)\n\n if mask is not None:\n y_hat[mask] = 0\n y[mask] = 0\n\n cm = combine_metrics(y=y, y_hat=y_hat, mode='val_step', multi_output=self.multi_output)\n\n # LOGGING\n self.log(f'{phase}_loss', loss, batch_size=self.bs, sync_dist=True)\n\n if self.plot_results:\n title = f'batch {self.val_batch}'\n for channel in range(11):\n # self.in_channel_to_plot = channel\n self.plot_batch(x_raw, y, y_hat, metadata, title, 'test', vmax=1., channel=channel)\n self.val_batch += 1\n\n return {'loss': loss.cpu().item(), 'N': x.shape[0], \"cm\": cm}\n\n def validation_epoch_end(self, outputs, phase='val'):\n print(\"Validation epoch end average over batches: \",\n [batch['N'] for batch in outputs])\n values = {}\n cm = torch.sum(torch.stack([batch[\"cm\"] for batch in outputs], 0), 0)\n values.update({f\"{k}_epoch\": v for k, v in\n combine_metrics(cm=cm, mode=phase, multi_output=self.multi_output).items()})\n\n values[f\"{phase}_loss_epoch\"] = np.average([batch['loss'] for batch in outputs],\n weights=[batch['N'] for batch in outputs])\n self.log_dict(values, batch_size=self.bs, sync_dist=True)\n self.log(self.main_metric, values[f\"{phase}_loss_epoch\"], batch_size=self.bs, sync_dist=True)\n # print(values)\n\n def test_step(self, batch, batch_idx, phase='test'):\n x, y, metadata = batch\n x_raw = x.detach().clone()\n y_raw = y.detach().clone()\n mask = self.get_target_mask(metadata)\n\n if VERBOSE:\n print('x', x.shape, 'y', y.shape, '----------------- batch')\n\n if self.multi_output:\n mask = torch.repeat_interleave(mask, repeats=6, dim=1)\n y = to_one_hot(y)\n\n if VERBOSE:\n print('x', x.shape, 'y', y.shape, '----------------- batch')\n\n y_hat = self.forward(x, metadata)\n\n if VERBOSE:\n print('y_hat', y_hat.shape, 'y', y.shape, '----------------- model')\n\n loss = self._compute_loss(y_hat, y, mask=mask)\n\n if mask is not None:\n y_hat[mask] = 0\n y[mask] = 0\n\n cm = combine_metrics(y=y, y_hat=y_hat, mode='val_step', multi_output=self.multi_output)\n\n # LOGGING\n self.log(f'{phase}_loss', loss, batch_size=self.bs, sync_dist=True)\n\n if self.plot_results:\n title = f'batch {self.val_batch}'\n self.plot_batch(x_raw, y_raw, y_hat, metadata, title, 'test', vmax=1., channel=self.in_channel_to_plot)\n\n self.val_batch += 1\n\n return {'loss': loss.cpu().item(), 'N': x.shape[0], \"cm\": cm, \"y_hat\": y_hat, \"y\": y}\n\n def plot_batch(self, xs, ys, y_hats, metadata, loss, phase, vmax=0.01, vmin=0, channel=0, mix=False):\n figures = []\n\n # ys = to_number(ys)\n y_hats = to_number(y_hats)\n\n # pytorch to numpy\n xs, y_hats = [o.cpu() for o in [xs, y_hats]]\n xs, y_hats = [np.asarray(o) for o in [xs, y_hats]]\n\n if phase in [\"test\"]:\n ys = ys.cpu()\n ys = np.asarray(ys)\n else:\n ys = y_hats # it's going to be empty - just to make life easier while passing values to other functions\n\n print(f\"\\nplot batch of size {len(xs)}\")\n for i in range(len(xs)):\n print(f\"plot, {i + 1}/{len(xs)}\")\n texts_in = [t[i] for t in metadata['input']['timestamps']]\n # print(texts_in)\n texts_ta = [t[i] for t in metadata['target']['timestamps']]\n # title = self.seq_metrics(ys[i].ravel(), y_hats[i].ravel())\n if VERBOSE:\n print(\"inputs\")\n print(np.shape(xs[i]))\n if (phase == \"test\"):\n print(\"target\")\n print(np.shape(ys[i]))\n print(\"prediction\")\n print(np.shape(y_hats[i]))\n self.collapse_time = True\n\n fig = plot_sequence(xs[i], ys[i], y_hats[i], texts_in, texts_ta,\n self.params, phase, self.collapse_time, vmax=vmax, vmin=vmin,\n channel=channel, title=loss)\n figures.append(fig)\n # save individual image to tensorboard\n # self.logger.experiment.add_figure(f\"preds_{self.trainer.global_step}_{self.val_batch}_{i}\", fig)\n # self.logger.log_image(f\"preds_{self.trainer.global_step}_{self.val_batch}_{i}\", fig)\n # save all figures to disk\n date_time = datetime.datetime.now().strftime(\"%m%d-%H:%M\")\n channel_names = ['IR_016', 'IR_039', 'IR_087', 'IR_097', 'IR_108', 'IR_120', 'IR_134', 'VIS006', 'VIS008',\n 'WV_062', 'WV_073']\n channel_name = channel_names[channel]\n if mix:\n fname = f\"batch_{self.val_batch}_channel_{channel_name}_{date_time}_mix\"\n else:\n fname = f\"batch_{self.val_batch}_channel_{channel_name}_{date_time}\"\n dir_path = os.path.join('plots', f\"{self.params['name']}\")\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n f_path = os.path.join(dir_path, fname)\n save_pdf(figures, f_path)\n if (phase == \"test\"):\n print(f'saved figures at: {fname} | {loss}')\n else:\n print(f'saved figures at: {fname}')\n # self.val_batch += 1\n return figures\n\n def predict_step(self, batch, batch_idx, phase='predict'):\n x, y, metadata = batch\n mask = self.get_target_mask(metadata)\n\n if VERBOSE:\n print('x', x.shape, 'y', y.shape, '----------------- batch')\n\n y_hat = self.forward(x, metadata)\n\n if VERBOSE:\n print('y_hat', y_hat.shape, 'y', y.shape, '----------------- model')\n if self.plot_results:\n self.plot_batch(x, y, y_hat, metadata, f'batch: {self.val_batch} | prediction results', phase, vmax=1.)\n return y_hat, y, mask\n\n def configure_optimizers(self):\n print(\"config optimizers\")\n optim_params = self.params[self.params['optim']]\n\n model_parameters = self.set_model_params_optimizer()\n\n if self.params['optim'].lower() == 'adam':\n optimizer = optim.Adam(model_parameters, lr=float(self.params[\"lr\"]), **optim_params)\n elif self.params['optim'].lower() == 'adamw':\n optimizer = optim.AdamW(model_parameters, lr=float(self.params[\"lr\"]), **optim_params)\n elif self.params['optim'].lower() == 'sgd':\n optimizer = optim.SGD(model_parameters, lr=float(self.params[\"lr\"]), **optim_params)\n else:\n raise ValueError(f'No support {self.params.optim} optimizer!')\n\n ## configure scheduler\n lr_params = self.params[self.params['scheduler']]\n\n print(\"Learning rate:\", self.params[\"lr\"],\n \"optimizer: \", self.params[\"optim\"], \"optimier parameters: \", optim_params,\n \"scheduler: \", self.params['scheduler'], \"scheduler paramsters: \", lr_params)\n\n if self.params['scheduler'] == 'exp':\n scheduler = lr_scheduler.ExponentialLR(optimizer, **lr_params)\n return [optimizer], [scheduler]\n elif self.params['scheduler'] == 'cosine':\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, **lr_params)\n return [optimizer], [scheduler]\n elif self.params['scheduler'] == 'cosinewarm':\n scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer, **lr_params)\n return [optimizer], [scheduler]\n elif self.params['scheduler'] == 'step':\n scheduler = lr_scheduler.StepLR(optimizer, **lr_params)\n return [optimizer], [scheduler]\n elif self.params['scheduler'] == 'multistep':\n scheduler = lr_scheduler.MultiStepLR(optimizer, **lr_params)\n return [optimizer], [scheduler]\n elif self.params['scheduler'] == 'onecycle':\n scheduler = lr_scheduler.OneCycleLR(optimizer, **lr_params)\n return [optimizer], [scheduler]\n elif self.params['scheduler'] == 'reducelr':\n return {\n 'optimizer': optimizer,\n 'lr_scheduler': {\n 'scheduler': lr_scheduler.ReduceLROnPlateau(optimizer, **lr_params),\n 'monitor': self.params['reducelr_monitor'],\n }\n }\n else:\n raise ValueError(f\"No support {self.params['scheduler']} scheduler!\")\n\n def set_model_params_optimizer(self):\n if 'no_bias_decay' in self.params and self.params.get('no_bias_decay'):\n if 'encoder_lr_ratio' in self.params:\n encoder_lr_ratio = self.params.get('encoder_lr_ratio')\n group_decay_encoder, group_no_decay_encoder = group_weight(self.model.down_convs)\n group_decay_decoder, group_no_decay_decoder = group_weight(self.model.up_convs)\n base_lr = self.params['lr']\n params = [{'params': group_decay_decoder},\n {'params': group_no_decay_decoder, 'weight_decay': 0.0},\n {'params': group_decay_encoder, 'lr': base_lr * encoder_lr_ratio},\n {'params': group_no_decay_encoder, 'lr': base_lr * encoder_lr_ratio, 'weight_decay': 0.0}]\n print(\n f'separately set lr with no_bias_decay for encoder {base_lr} and decoder {base_lr * encoder_lr_ratio}...')\n else:\n group_decay, group_no_decay = group_weight(self.model)\n params = [{'params': group_decay},\n {'params': group_no_decay, 'weight_decay': 0.0}]\n print(f'set params with no_bias_decay...')\n elif 'encoder_lr_ratio' in self.params:\n encoder_lr_ratio = self.params.get('encoder_lr_ratio')\n base_lr = float(self.params['lr'])\n print(encoder_lr_ratio, base_lr)\n print(f'separately set lr for encoder {base_lr} and decoder {base_lr * encoder_lr_ratio}...')\n params = [{'params': self.model.up_convs.parameters()},\n {'params': self.model.reduce_channels.parameters()},\n {'params': self.model.down_convs.parameters(), 'lr': base_lr * encoder_lr_ratio}]\n else:\n params = list(filter(lambda x: x.requires_grad, self.parameters()))\n\n return params\n\n def freeze_model_params(self):\n if 'freeze_encoder' in self.params and self.params.get('freeze_encoder'):\n print('freezing the parameters of encoder...')\n for name, param in self.model.down_convs.named_parameters():\n param.requires_grad = False\n\n if 'freeze_decoder' in self.params and self.params.get('freeze_decoder'):\n print('freezing the parameters of decoder...')\n for name, param in self.model.down_convs.named_parameters():\n param.requires_grad = False\n\n if 'freeze_output' in self.params and self.params.get('freeze_output'):\n print('freezing the parameters of final output...')\n for name, param in self.model.reduce_channels.named_parameters():\n param.requires_grad = False"
},
{
"identifier": "load_config",
"path": "utils/data_utils.py",
"snippet": "def load_config(config_path):\n \"\"\"Load confgiuration file\n\n Args:\n config_path (String): path to configuration file\n\n Returns:\n dict: configuration file\n \"\"\"\n with open(config_path) as file:\n config = yaml.safe_load(file)\n return config"
},
{
"identifier": "get_cuda_memory_usage",
"path": "utils/data_utils.py",
"snippet": "def get_cuda_memory_usage(gpus):\n \"\"\"Get the GPU memory usage\n\n Args:\n gpus (list): list of GPUs\n \"\"\"\n for gpu in gpus:\n r = torch.cuda.memory_reserved(gpu)\n a = torch.cuda.memory_allocated(gpu)\n f = r - a # free inside reserved\n print(\"GPU\", gpu, \"CUDA memory reserved:\", r, \"allocated:\", a, \"free:\", f)"
},
{
"identifier": "tensor_to_submission_file",
"path": "utils/data_utils.py",
"snippet": "def tensor_to_submission_file(predictions, predict_params):\n \"\"\"saves prediction tesnor to submission .h5 file\n\n Args:\n predictions (numpy array): data cube of predictions\n predict_params (dict): dictionary of parameters for prediction\n \"\"\"\n\n path = os.path.join(predict_params[\"submission_out_dir\"],\n str(predict_params[\"year_to_predict\"]))\n if not os.path.exists(path):\n os.makedirs(path)\n\n submission_file_name = predict_params[\"region_to_predict\"] + \".pred.h5\"\n submission_path = os.path.join(path, submission_file_name)\n h5f = h5py.File(submission_path, \"w\")\n h5f.create_dataset(\"submission\", data=predictions.squeeze())\n h5f.close()"
},
{
"identifier": "get_dict_value",
"path": "utils/data_utils.py",
"snippet": "def get_dict_value(dic, value, default):\n if value in dic:\n return dic[value]\n else:\n return default"
},
{
"identifier": "RainData",
"path": "utils/w4c_dataloader.py",
"snippet": "class RainData(Dataset):\n def __init__(\n self,\n data_split,\n project_root=\"\",\n data_root=\"\",\n input_product=\"REFL-BT\",\n compute_seq=True,\n output_product=\"RATE\",\n sat_bands=[],\n preprocess_OPERA=None,\n size_target_center=None,\n full_opera_context=None,\n preprocess_HRIT=None,\n path_to_sample_ids=\"\",\n len_seq_in=4,\n len_seq_predict=32,\n regions=[\"boxi_0015\"],\n regions_def={},\n generate_samples=False,\n latlon_path=\"\",\n altitudes_path=\"\",\n splits_path=None,\n swap_time_ch=False,\n years=None,\n **kwargs\n ):\n start = timer()\n # Data Dimensions\n self.len_seq_in = len_seq_in\n self.len_seq_predict = len_seq_predict\n self.channel_dim = 1 # where to concat channels in structure\n\n # type of data & processing variables\n self.sat_bands = sat_bands\n self.regions = regions\n self.input_product = input_product\n self.output_product = output_product\n self.preprocess_target = preprocess_OPERA\n self.size_target_center = size_target_center\n self.full_opera_context = full_opera_context\n self.crop = int(\n (self.full_opera_context - self.size_target_center) / 2\n ) # calculate centre of image to begin crop\n self.preprocess_input = preprocess_HRIT\n self.path_to_sample_ids = path_to_sample_ids\n self.regions_def = regions_def\n self.generate_samples = generate_samples\n self.path_to_sample_ids = path_to_sample_ids\n self.swap_time_ch = swap_time_ch\n self.years = years\n\n # data splits to load (training/validation/test)\n self.root = project_root\n self.data_root = data_root\n self.data_split = data_split\n self.splits_df = load_timestamps(splits_path)\n # prepare all elements to load - sample idx will use the object 'self.idx'\n self.idxs = load_sample_ids(\n self.data_split,\n self.splits_df,\n self.len_seq_in,\n self.len_seq_predict,\n self.regions,\n self.generate_samples,\n self.years,\n self.path_to_sample_ids\n )\n\n # LOAD DATASET\n self.in_ds = load_dataset(\n self.data_root, self.data_split, self.regions, years, self.input_product\n )\n if self.data_split not in [\"test\", \"heldout\"]:\n self.out_ds = load_dataset(\n self.data_root, self.data_split, self.regions, years, self.output_product\n )\n else:\n self.out_ds = []\n\n def __len__(self):\n \"\"\"total number of samples (sequences of in:4-out:1 in our case) to train\"\"\"\n # print(len(self.idxs), \"-------------------\", self.data_split)\n return len(self.idxs)\n\n def load_in(self, in_seq, seq_r, metadata, loaded_input=False):\n in0 = time.time()\n input_data, in_masks, year = get_sequence(\n in_seq,\n self.data_root,\n self.data_split,\n seq_r,\n self.input_product,\n self.sat_bands,\n self.preprocess_input,\n self.swap_time_ch,\n self.in_ds,\n )\n metadata[\"year\"] = year\n if VERBOSE:\n print(np.shape(input_data), time.time() - in0, \"in sequence time\")\n return input_data, metadata\n\n def load_out(self, out_seq, seq_r, metadata):\n t1 = time.time()\n # GROUND TRUTH (OUTPUT)\n if self.data_split not in [\"test\", \"heldout\"]:\n output_data, out_masks, year = get_sequence(\n out_seq,\n self.data_root,\n self.data_split,\n seq_r,\n self.output_product,\n [],\n self.preprocess_target,\n self.swap_time_ch,\n self.out_ds,\n )\n # collapse time to channels\n metadata[\"target\"][\"mask\"] = out_masks\n else: # Just return [] if its test/heldout data\n output_data = np.array([])\n if VERBOSE:\n print(time.time() - t1, \"out sequence\")\n return output_data, metadata\n\n def load_in_out(self, in_seq, out_seq=None, seq_r=None):\n metadata = {\n \"input\": {\"mask\": [], \"timestamps\": in_seq},\n \"target\": {\"mask\": [], \"timestamps\": out_seq},\n }\n\n t0 = time.time()\n input_data, metadata = self.load_in(in_seq, seq_r, metadata)\n output_data, metadata = self.load_out(out_seq, seq_r, metadata)\n metadata[\"region\"] = seq_r\n if VERBOSE:\n print(time.time() - t0, \"seconds\")\n return input_data, output_data, metadata\n\n def __getitem__(self, idx):\n \"\"\"load 1 sequence (1 sample)\"\"\"\n in_seq = self.idxs[idx][0]\n out_seq = self.idxs[idx][1]\n seq_r = self.idxs[idx][2]\n# # print(\"=== DEBUG in_seq: \",in_seq, file=sys.stderr);\n# print(\"=== DEBUG in_seq: \",in_seq);\n return self.load_in_out(in_seq, out_seq, seq_r)"
},
{
"identifier": "to_one_hot",
"path": "utils/evaluate.py",
"snippet": "def to_one_hot(arr, thresholds=None):\n if thresholds is None:\n thresholds = [0.2, 1, 5, 10, 15]\n num_classes = len(thresholds) + 1\n one_hot = torch.zeros((arr.shape[0], num_classes) + arr.shape[2:], dtype=torch.float32, device=arr.device)\n\n for i, threshold in enumerate(thresholds):\n if i == 0:\n one_hot[:, i] = (arr < threshold).squeeze(1)\n else:\n one_hot[:, i] = ((arr >= thresholds[i - 1]) & (arr < threshold)).squeeze(1)\n\n one_hot[:, -1] = (arr >= thresholds[-1]).squeeze(1)\n return one_hot"
}
] | import argparse
import copy
import numpy as np
import pytorch_lightning as pl
import datetime
import os
import torch
import torch.nn.functional as F
import wandb
from pytorch_lightning.callbacks import ModelCheckpoint, ModelSummary
from pytorch_lightning.plugins import DDPPlugin
from torch.utils.data import DataLoader, ConcatDataset
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from utils.evaluate import recall_precision_f1_acc, get_confusion_matrix
from models.unet_lightning_w4c23 import UNet_Lightning as UNetModel
from utils.data_utils import load_config
from utils.data_utils import get_cuda_memory_usage
from utils.data_utils import tensor_to_submission_file
from utils.data_utils import get_dict_value
from utils.w4c_dataloader import RainData
from utils.evaluate import to_one_hot | 9,492 | def val_dataloader(self):
return self.__load_dataloader(self.val_ds, shuffle=False, pin=True)
def test_dataloader(self):
return self.__load_dataloader(self.test_ds, shuffle=False, pin=True)
def load_model(Model, params, checkpoint_path='') -> pl.LightningModule:
""" loads a model from a checkpoint or from scratch if checkpoint_path='' """
p = {**params['experiment'], **params['dataset'], **params['train']}
if checkpoint_path == '':
print('-> Modelling from scratch! (no checkpoint loaded)')
model = Model(params['model'], p)
else:
print(f'-> Loading model checkpoint: {checkpoint_path}')
model = Model.load_from_checkpoint(checkpoint_path, UNet_params=params['model'], params=p)
return model
def get_trainer(gpus, params, mode):
""" get the trainer, modify here its options:
- save_top_k
"""
max_epochs = params['train']['max_epochs']
# max_epochs = 1
print("Trainig for", max_epochs, "epochs")
checkpoint_callback = ModelCheckpoint(monitor='val_loss_epoch', save_top_k=90, save_last=True,
filename='{epoch:02d}-{val_loss_epoch:.6f}')
parallel_training = None
ddpplugin = None
if gpus[0] == -1:
gpus = None
elif len(gpus) > 1:
parallel_training = 'ddp'
## ddpplugin = DDPPlugin(find_unused_parameters=True)
print(f"====== process started on the following GPUs: {gpus} ======")
date_time = datetime.datetime.now().strftime("%m%d-%H:%M")
version = params['experiment']['name']
version = version + '_' + date_time
# SET LOGGER
# if params['experiment']['logging']:
# tb_logger = pl_loggers.TensorBoardLogger(save_dir=params['experiment']['experiment_folder'],name=params['experiment']['sub_folder'], version=version, log_graph=True)
# else:
# tb_logger = False
if params['experiment']['logging'] and mode != "predict" and mode != "val":
# Create a WandbLogger instead of TensorBoardLogger
wandb_logger = WandbLogger(
project='w4c23',
save_dir=params['experiment']['experiment_folder'],
name=params['experiment']['sub_folder'],
)
else:
wandb_logger = False
if mode == "predict" or mode == "val" or len(gpus) <= 1:
strategy = None
else:
strategy = "ddp"
if params['train']['early_stopping']:
early_stop_callback = EarlyStopping(monitor="val_loss_epoch",
patience=params['train']['patience'],
mode="min")
callback_funcs = [checkpoint_callback, ModelSummary(max_depth=2), early_stop_callback]
else:
callback_funcs = [checkpoint_callback, ModelSummary(max_depth=2)]
trainer = pl.Trainer(devices=gpus, max_epochs=max_epochs,
gradient_clip_val=params['model']['gradient_clip_val'],
gradient_clip_algorithm=params['model']['gradient_clip_algorithm'],
accelerator="gpu",
callbacks=callback_funcs, logger=wandb_logger,
# profiler='simple',
# fast_dev_run=3,
# log_every_n_steps=1,
precision=params['experiment']['precision'],
strategy=strategy
)
return trainer
def to_number(y_hat, nums=None, thres=None):
if nums is None:
nums = torch.tensor([0, 0.6, 3, 7.5, 12.5, 16]).reshape(1, 6, 1, 1, 1).to(y_hat.device)
num_classes = 6
y_hat = F.softmax(y_hat, dim=1)
if thres is not None:
y_sum = 1 - torch.cumsum(y_hat, dim=1)
y_hat = torch.argmax((y_sum < torch.tensor(thres + [2], device=y_sum.device).reshape(1, 6, 1, 1, 1)).long(),
dim=1)
else:
y_hat = torch.argmax(y_hat, dim=1)
y_hat = F.one_hot(y_hat, num_classes=num_classes).permute(0, 4, 1, 2, 3)
ret = torch.sum(y_hat * nums, axis=1, keepdim=True)
return y_hat, ret
def do_predict(trainer, model, predict_params, test_data):
ret = 0
test_batch = trainer.predict(model, dataloaders=test_data)
scores = torch.cat([b[0] for b in test_batch])
_, scores = to_number(scores)
tensor_to_submission_file(scores, predict_params)
return ret
def do_test(trainer, model, test_data):
scores = trainer.test(model, dataloaders=test_data)
def do_val(trainer, model, test_data):
scores = trainer.validate(model, dataloaders=test_data)
def train(params, gpus, mode, checkpoint_path, model=UNetModel, tune=True):
""" main training/evaluation method
"""
# ------------
# model & data
# ------------
| # Weather4cast 2023 Starter Kit
#
# This Starter Kit builds on and extends the Weather4cast 2022 Starter Kit,
# the original license for which is included below.
#
# In line with the provisions of this license, all changes and additional
# code are also released unde the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# Weather4cast 2022 Starter Kit
#
# Copyright (C) 2022
# Institute of Advanced Research in Artificial Intelligence (IARAI)
# This file is part of the Weather4cast 2022 Starter Kit.
#
# The Weather4cast 2022 Starter Kit is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# The Weather4cast 2022 Starter Kit is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Contributors: Aleksandra Gruca, Pedro Herruzo, David Kreil, Stephen Moran
class DataModule(pl.LightningDataModule):
""" Class to handle training/validation splits in a single object
"""
def __init__(self, params, training_params, mode):
super().__init__()
self.params = params
self.training_params = training_params
concat_train_val = get_dict_value(training_params, 'concat_train_val', False)
print("----------------------- concat_train_val: ", concat_train_val)
if mode in ['train']:
print("Loading TRAINING/VALIDATION dataset -- as test")
if concat_train_val:
self.val_ds = RainData('validation', **self.params)
self.train_ds = ConcatDataset([RainData('training', **self.params), self.val_ds])
else:
self.train_ds = RainData('training', **self.params)
self.val_ds = RainData('validation', **self.params)
print(f"Training dataset size: {len(self.train_ds)}")
if mode in ['val']:
print("Loading VALIDATION dataset -- as test")
self.val_ds = RainData('validation', **self.params)
if mode in ['predict']:
print("Loading PREDICTION/TEST dataset -- as test")
self.test_ds = RainData('test', **self.params)
def __load_dataloader(self, dataset, shuffle=True, pin=True):
dl = DataLoader(dataset,
batch_size=self.training_params['batch_size'],
num_workers=self.training_params['n_workers'],
shuffle=shuffle,
pin_memory=pin, prefetch_factor=2,
persistent_workers=False)
return dl
def train_dataloader(self):
return self.__load_dataloader(self.train_ds, shuffle=True, pin=True)
def val_dataloader(self):
return self.__load_dataloader(self.val_ds, shuffle=False, pin=True)
def test_dataloader(self):
return self.__load_dataloader(self.test_ds, shuffle=False, pin=True)
def load_model(Model, params, checkpoint_path='') -> pl.LightningModule:
""" loads a model from a checkpoint or from scratch if checkpoint_path='' """
p = {**params['experiment'], **params['dataset'], **params['train']}
if checkpoint_path == '':
print('-> Modelling from scratch! (no checkpoint loaded)')
model = Model(params['model'], p)
else:
print(f'-> Loading model checkpoint: {checkpoint_path}')
model = Model.load_from_checkpoint(checkpoint_path, UNet_params=params['model'], params=p)
return model
def get_trainer(gpus, params, mode):
""" get the trainer, modify here its options:
- save_top_k
"""
max_epochs = params['train']['max_epochs']
# max_epochs = 1
print("Trainig for", max_epochs, "epochs")
checkpoint_callback = ModelCheckpoint(monitor='val_loss_epoch', save_top_k=90, save_last=True,
filename='{epoch:02d}-{val_loss_epoch:.6f}')
parallel_training = None
ddpplugin = None
if gpus[0] == -1:
gpus = None
elif len(gpus) > 1:
parallel_training = 'ddp'
## ddpplugin = DDPPlugin(find_unused_parameters=True)
print(f"====== process started on the following GPUs: {gpus} ======")
date_time = datetime.datetime.now().strftime("%m%d-%H:%M")
version = params['experiment']['name']
version = version + '_' + date_time
# SET LOGGER
# if params['experiment']['logging']:
# tb_logger = pl_loggers.TensorBoardLogger(save_dir=params['experiment']['experiment_folder'],name=params['experiment']['sub_folder'], version=version, log_graph=True)
# else:
# tb_logger = False
if params['experiment']['logging'] and mode != "predict" and mode != "val":
# Create a WandbLogger instead of TensorBoardLogger
wandb_logger = WandbLogger(
project='w4c23',
save_dir=params['experiment']['experiment_folder'],
name=params['experiment']['sub_folder'],
)
else:
wandb_logger = False
if mode == "predict" or mode == "val" or len(gpus) <= 1:
strategy = None
else:
strategy = "ddp"
if params['train']['early_stopping']:
early_stop_callback = EarlyStopping(monitor="val_loss_epoch",
patience=params['train']['patience'],
mode="min")
callback_funcs = [checkpoint_callback, ModelSummary(max_depth=2), early_stop_callback]
else:
callback_funcs = [checkpoint_callback, ModelSummary(max_depth=2)]
trainer = pl.Trainer(devices=gpus, max_epochs=max_epochs,
gradient_clip_val=params['model']['gradient_clip_val'],
gradient_clip_algorithm=params['model']['gradient_clip_algorithm'],
accelerator="gpu",
callbacks=callback_funcs, logger=wandb_logger,
# profiler='simple',
# fast_dev_run=3,
# log_every_n_steps=1,
precision=params['experiment']['precision'],
strategy=strategy
)
return trainer
def to_number(y_hat, nums=None, thres=None):
if nums is None:
nums = torch.tensor([0, 0.6, 3, 7.5, 12.5, 16]).reshape(1, 6, 1, 1, 1).to(y_hat.device)
num_classes = 6
y_hat = F.softmax(y_hat, dim=1)
if thres is not None:
y_sum = 1 - torch.cumsum(y_hat, dim=1)
y_hat = torch.argmax((y_sum < torch.tensor(thres + [2], device=y_sum.device).reshape(1, 6, 1, 1, 1)).long(),
dim=1)
else:
y_hat = torch.argmax(y_hat, dim=1)
y_hat = F.one_hot(y_hat, num_classes=num_classes).permute(0, 4, 1, 2, 3)
ret = torch.sum(y_hat * nums, axis=1, keepdim=True)
return y_hat, ret
def do_predict(trainer, model, predict_params, test_data):
ret = 0
test_batch = trainer.predict(model, dataloaders=test_data)
scores = torch.cat([b[0] for b in test_batch])
_, scores = to_number(scores)
tensor_to_submission_file(scores, predict_params)
return ret
def do_test(trainer, model, test_data):
scores = trainer.test(model, dataloaders=test_data)
def do_val(trainer, model, test_data):
scores = trainer.validate(model, dataloaders=test_data)
def train(params, gpus, mode, checkpoint_path, model=UNetModel, tune=True):
""" main training/evaluation method
"""
# ------------
# model & data
# ------------ | get_cuda_memory_usage(gpus) | 4 | 2023-11-30 06:12:26+00:00 | 12k |
opisaac9001/TTS-With-ooba-and-voice | TTS/tts/layers/delightful_tts/acoustic_model.py | [
{
"identifier": "Conformer",
"path": "TTS/tts/layers/delightful_tts/conformer.py",
"snippet": "class Conformer(nn.Module):\n def __init__(\n self,\n dim: int,\n n_layers: int,\n n_heads: int,\n speaker_embedding_dim: int,\n p_dropout: float,\n kernel_size_conv_mod: int,\n lrelu_slope: float,\n ):\n \"\"\"\n A Transformer variant that integrates both CNNs and Transformers components.\n Conformer proposes a novel combination of self-attention and convolution, in which self-attention\n learns the global interaction while the convolutions efficiently capture the local correlations.\n\n Args:\n dim (int): Number of the dimensions for the model.\n n_layers (int): Number of model layers.\n n_heads (int): The number of attention heads.\n speaker_embedding_dim (int): Number of speaker embedding dimensions.\n p_dropout (float): Probabilty of dropout.\n kernel_size_conv_mod (int): Size of kernels for convolution modules.\n\n Inputs: inputs, mask\n - **inputs** (batch, time, dim): Tensor containing input vector\n - **encoding** (batch, time, dim): Positional embedding tensor\n - **mask** (batch, 1, time2) or (batch, time1, time2): Tensor containing indices to be masked\n Returns:\n - **outputs** (batch, time, dim): Tensor produced by Conformer Encoder.\n \"\"\"\n super().__init__()\n d_k = d_v = dim // n_heads\n self.layer_stack = nn.ModuleList(\n [\n ConformerBlock(\n dim,\n n_heads,\n d_k,\n d_v,\n kernel_size_conv_mod=kernel_size_conv_mod,\n dropout=p_dropout,\n speaker_embedding_dim=speaker_embedding_dim,\n lrelu_slope=lrelu_slope,\n )\n for _ in range(n_layers)\n ]\n )\n\n def forward(\n self,\n x: torch.Tensor,\n mask: torch.Tensor,\n speaker_embedding: torch.Tensor,\n encoding: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Shapes:\n - x: :math:`[B, T_src, C]`\n - mask: :math: `[B]`\n - speaker_embedding: :math: `[B, C]`\n - encoding: :math: `[B, T_max2, C]`\n \"\"\"\n\n attn_mask = mask.view((mask.shape[0], 1, 1, mask.shape[1]))\n for enc_layer in self.layer_stack:\n x = enc_layer(\n x,\n mask=mask,\n slf_attn_mask=attn_mask,\n speaker_embedding=speaker_embedding,\n encoding=encoding,\n )\n return x"
},
{
"identifier": "PhonemeLevelProsodyEncoder",
"path": "TTS/tts/layers/delightful_tts/encoders.py",
"snippet": "class PhonemeLevelProsodyEncoder(nn.Module):\n def __init__(\n self,\n num_mels: int,\n ref_enc_filters: List[Union[int, int, int, int, int, int]],\n ref_enc_size: int,\n ref_enc_strides: List[Union[int, int, int, int, int]],\n ref_enc_gru_size: int,\n dropout: float,\n n_hidden: int,\n n_heads: int,\n bottleneck_size_p: int,\n ):\n super().__init__()\n\n self.E = n_hidden\n self.d_q = self.d_k = n_hidden\n bottleneck_size = bottleneck_size_p\n\n self.encoder = ReferenceEncoder(\n ref_enc_filters=ref_enc_filters,\n ref_enc_gru_size=ref_enc_gru_size,\n ref_enc_size=ref_enc_size,\n ref_enc_strides=ref_enc_strides,\n num_mels=num_mels,\n )\n self.encoder_prj = nn.Linear(ref_enc_gru_size, n_hidden)\n self.attention = ConformerMultiHeadedSelfAttention(\n d_model=n_hidden,\n num_heads=n_heads,\n dropout_p=dropout,\n )\n self.encoder_bottleneck = nn.Linear(n_hidden, bottleneck_size)\n\n def forward(\n self,\n x: torch.Tensor,\n src_mask: torch.Tensor,\n mels: torch.Tensor,\n mel_lens: torch.Tensor,\n encoding: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n x --- [N, seq_len, encoder_embedding_dim]\n mels --- [N, Ty/r, n_mels*r], r=1\n out --- [N, seq_len, bottleneck_size]\n attn --- [N, seq_len, ref_len], Ty/r = ref_len\n \"\"\"\n embedded_prosody, _, mel_masks = self.encoder(mels, mel_lens)\n\n # Bottleneck\n embedded_prosody = self.encoder_prj(embedded_prosody)\n\n attn_mask = mel_masks.view((mel_masks.shape[0], 1, 1, -1))\n x, _ = self.attention(\n query=x,\n key=embedded_prosody,\n value=embedded_prosody,\n mask=attn_mask,\n encoding=encoding,\n )\n x = self.encoder_bottleneck(x)\n x = x.masked_fill(src_mask.unsqueeze(-1), 0.0)\n return x"
},
{
"identifier": "UtteranceLevelProsodyEncoder",
"path": "TTS/tts/layers/delightful_tts/encoders.py",
"snippet": "class UtteranceLevelProsodyEncoder(nn.Module):\n def __init__(\n self,\n num_mels: int,\n ref_enc_filters: List[Union[int, int, int, int, int, int]],\n ref_enc_size: int,\n ref_enc_strides: List[Union[int, int, int, int, int]],\n ref_enc_gru_size: int,\n dropout: float,\n n_hidden: int,\n bottleneck_size_u: int,\n token_num: int,\n ):\n \"\"\"\n Encoder to extract prosody from utterance. it is made up of a reference encoder\n with a couple of linear layers and style token layer with dropout.\n\n Args:\n num_mels (int): Number of mel frames to produce.\n ref_enc_filters (list[int]): List of channel sizes for ref encoder layers.\n ref_enc_size (int): Size of the kernel for the ref encoder conv layers.\n ref_enc_strides (List[int]): List of strides to use for teh ref encoder conv layers.\n ref_enc_gru_size (int): Number of hidden features for the gated recurrent unit.\n dropout (float): Probability of dropout.\n n_hidden (int): Size of hidden layers.\n bottleneck_size_u (int): Size of the bottle neck layer.\n\n Inputs: inputs, mask\n - **inputs** (batch, dim, time): Tensor containing mel vector\n - **lengths** (batch): Tensor containing the mel lengths.\n Returns:\n - **outputs** (batch, 1, dim): Tensor produced by Utterance Level Prosody Encoder.\n \"\"\"\n super().__init__()\n\n self.E = n_hidden\n self.d_q = self.d_k = n_hidden\n bottleneck_size = bottleneck_size_u\n\n self.encoder = ReferenceEncoder(\n ref_enc_filters=ref_enc_filters,\n ref_enc_gru_size=ref_enc_gru_size,\n ref_enc_size=ref_enc_size,\n ref_enc_strides=ref_enc_strides,\n num_mels=num_mels,\n )\n self.encoder_prj = nn.Linear(ref_enc_gru_size, self.E // 2)\n self.stl = STL(n_hidden=n_hidden, token_num=token_num)\n self.encoder_bottleneck = nn.Linear(self.E, bottleneck_size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, mels: torch.Tensor, mel_lens: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Shapes:\n mels: :math: `[B, C, T]`\n mel_lens: :math: `[B]`\n\n out --- [N, seq_len, E]\n \"\"\"\n _, embedded_prosody, _ = self.encoder(mels, mel_lens)\n\n # Bottleneck\n embedded_prosody = self.encoder_prj(embedded_prosody)\n\n # Style Token\n out = self.encoder_bottleneck(self.stl(embedded_prosody))\n out = self.dropout(out)\n\n out = out.view((-1, 1, out.shape[3]))\n return out"
},
{
"identifier": "get_mask_from_lengths",
"path": "TTS/tts/layers/delightful_tts/encoders.py",
"snippet": "def get_mask_from_lengths(lengths: torch.Tensor) -> torch.Tensor:\n batch_size = lengths.shape[0]\n max_len = torch.max(lengths).item()\n ids = torch.arange(0, max_len, device=lengths.device).unsqueeze(0).expand(batch_size, -1)\n mask = ids >= lengths.unsqueeze(1).expand(-1, max_len)\n return mask"
},
{
"identifier": "EnergyAdaptor",
"path": "TTS/tts/layers/delightful_tts/energy_adaptor.py",
"snippet": "class EnergyAdaptor(nn.Module): # pylint: disable=abstract-method\n \"\"\"Variance Adaptor with an added 1D conv layer. Used to\n get energy embeddings.\n\n Args:\n channels_in (int): Number of in channels for conv layers.\n channels_out (int): Number of out channels.\n kernel_size (int): Size the kernel for the conv layers.\n dropout (float): Probability of dropout.\n lrelu_slope (float): Slope for the leaky relu.\n emb_kernel_size (int): Size the kernel for the pitch embedding.\n\n Inputs: inputs, mask\n - **inputs** (batch, time1, dim): Tensor containing input vector\n - **target** (batch, 1, time2): Tensor containing the energy target\n - **dr** (batch, time1): Tensor containing aligner durations vector\n - **mask** (batch, time1): Tensor containing indices to be masked\n Returns:\n - **energy prediction** (batch, 1, time1): Tensor produced by energy predictor\n - **energy embedding** (batch, channels, time1): Tensor produced energy adaptor\n - **average energy target(train only)** (batch, 1, time1): Tensor produced after averaging over durations\n\n \"\"\"\n\n def __init__(\n self,\n channels_in: int,\n channels_hidden: int,\n channels_out: int,\n kernel_size: int,\n dropout: float,\n lrelu_slope: float,\n emb_kernel_size: int,\n ):\n super().__init__()\n self.energy_predictor = VariancePredictor(\n channels_in=channels_in,\n channels=channels_hidden,\n channels_out=channels_out,\n kernel_size=kernel_size,\n p_dropout=dropout,\n lrelu_slope=lrelu_slope,\n )\n self.energy_emb = nn.Conv1d(\n 1,\n channels_hidden,\n kernel_size=emb_kernel_size,\n padding=int((emb_kernel_size - 1) / 2),\n )\n\n def get_energy_embedding_train(\n self, x: torch.Tensor, target: torch.Tensor, dr: torch.IntTensor, mask: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Shapes:\n x: :math: `[B, T_src, C]`\n target: :math: `[B, 1, T_max2]`\n dr: :math: `[B, T_src]`\n mask: :math: `[B, T_src]`\n \"\"\"\n energy_pred = self.energy_predictor(x, mask)\n energy_pred.unsqueeze_(1)\n avg_energy_target = average_over_durations(target, dr)\n energy_emb = self.energy_emb(avg_energy_target)\n return energy_pred, avg_energy_target, energy_emb\n\n def get_energy_embedding(self, x: torch.Tensor, mask: torch.Tensor, energy_transform: Callable) -> torch.Tensor:\n energy_pred = self.energy_predictor(x, mask)\n energy_pred.unsqueeze_(1)\n if energy_transform is not None:\n energy_pred = energy_transform(energy_pred, (~mask).sum(dim=(1, 2)), self.pitch_mean, self.pitch_std)\n energy_emb_pred = self.energy_emb(energy_pred)\n return energy_emb_pred, energy_pred"
},
{
"identifier": "EmbeddingPadded",
"path": "TTS/tts/layers/delightful_tts/networks.py",
"snippet": "class EmbeddingPadded(nn.Module):\n def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):\n super().__init__()\n padding_mult = torch.ones((num_embeddings, 1), dtype=torch.int64)\n padding_mult[padding_idx] = 0\n self.register_buffer(\"padding_mult\", padding_mult)\n self.embeddings = nn.parameter.Parameter(initialize_embeddings((num_embeddings, embedding_dim)))\n\n def forward(self, idx: torch.Tensor) -> torch.Tensor:\n embeddings_zeroed = self.embeddings * self.padding_mult\n x = F.embedding(idx, embeddings_zeroed)\n return x"
},
{
"identifier": "positional_encoding",
"path": "TTS/tts/layers/delightful_tts/networks.py",
"snippet": "def positional_encoding(d_model: int, length: int, device: torch.device) -> torch.Tensor:\n pe = torch.zeros(length, d_model, device=device)\n position = torch.arange(0, length, dtype=torch.float, device=device).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2, device=device).float() * -(math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n return pe"
},
{
"identifier": "PhonemeProsodyPredictor",
"path": "TTS/tts/layers/delightful_tts/phoneme_prosody_predictor.py",
"snippet": "class PhonemeProsodyPredictor(nn.Module):\n \"\"\"Non-parallel Prosody Predictor inspired by: https://arxiv.org/pdf/2102.00851.pdf\n It consists of 2 layers of 1D convolutions each followed by a relu activation, layer norm\n and dropout, then finally a linear layer.\n\n Args:\n hidden_size (int): Size of hidden channels.\n kernel_size (int): Kernel size for the conv layers.\n dropout: (float): Probability of dropout.\n bottleneck_size (int): bottleneck size for last linear layer.\n lrelu_slope (float): Slope of the leaky relu.\n \"\"\"\n\n def __init__(\n self,\n hidden_size: int,\n kernel_size: int,\n dropout: float,\n bottleneck_size: int,\n lrelu_slope: float,\n ):\n super().__init__()\n self.d_model = hidden_size\n self.layers = nn.ModuleList(\n [\n ConvTransposed(\n self.d_model,\n self.d_model,\n kernel_size=kernel_size,\n padding=(kernel_size - 1) // 2,\n ),\n nn.LeakyReLU(lrelu_slope),\n nn.LayerNorm(self.d_model),\n nn.Dropout(dropout),\n ConvTransposed(\n self.d_model,\n self.d_model,\n kernel_size=kernel_size,\n padding=(kernel_size - 1) // 2,\n ),\n nn.LeakyReLU(lrelu_slope),\n nn.LayerNorm(self.d_model),\n nn.Dropout(dropout),\n ]\n )\n self.predictor_bottleneck = nn.Linear(self.d_model, bottleneck_size)\n\n def forward(self, x: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Shapes:\n x: :math: `[B, T, D]`\n mask: :math: `[B, T]`\n \"\"\"\n mask = mask.unsqueeze(2)\n for layer in self.layers:\n x = layer(x)\n x = x.masked_fill(mask, 0.0)\n x = self.predictor_bottleneck(x)\n return x"
},
{
"identifier": "PitchAdaptor",
"path": "TTS/tts/layers/delightful_tts/pitch_adaptor.py",
"snippet": "class PitchAdaptor(nn.Module): # pylint: disable=abstract-method\n \"\"\"Module to get pitch embeddings via pitch predictor\n\n Args:\n n_input (int): Number of pitch predictor input channels.\n n_hidden (int): Number of pitch predictor hidden channels.\n n_out (int): Number of pitch predictor out channels.\n kernel size (int): Size of the kernel for conv layers.\n emb_kernel_size (int): Size the kernel for the pitch embedding.\n p_dropout (float): Probability of dropout.\n lrelu_slope (float): Slope for the leaky relu.\n\n Inputs: inputs, mask\n - **inputs** (batch, time1, dim): Tensor containing input vector\n - **target** (batch, 1, time2): Tensor containing the pitch target\n - **dr** (batch, time1): Tensor containing aligner durations vector\n - **mask** (batch, time1): Tensor containing indices to be masked\n Returns:\n - **pitch prediction** (batch, 1, time1): Tensor produced by pitch predictor\n - **pitch embedding** (batch, channels, time1): Tensor produced pitch pitch adaptor\n - **average pitch target(train only)** (batch, 1, time1): Tensor produced after averaging over durations\n \"\"\"\n\n def __init__(\n self,\n n_input: int,\n n_hidden: int,\n n_out: int,\n kernel_size: int,\n emb_kernel_size: int,\n p_dropout: float,\n lrelu_slope: float,\n ):\n super().__init__()\n self.pitch_predictor = VariancePredictor(\n channels_in=n_input,\n channels=n_hidden,\n channels_out=n_out,\n kernel_size=kernel_size,\n p_dropout=p_dropout,\n lrelu_slope=lrelu_slope,\n )\n self.pitch_emb = nn.Conv1d(\n 1,\n n_input,\n kernel_size=emb_kernel_size,\n padding=int((emb_kernel_size - 1) / 2),\n )\n\n def get_pitch_embedding_train(\n self, x: torch.Tensor, target: torch.Tensor, dr: torch.IntTensor, mask: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Shapes:\n x: :math: `[B, T_src, C]`\n target: :math: `[B, 1, T_max2]`\n dr: :math: `[B, T_src]`\n mask: :math: `[B, T_src]`\n \"\"\"\n pitch_pred = self.pitch_predictor(x, mask) # [B, T_src, C_hidden], [B, T_src] --> [B, T_src]\n pitch_pred.unsqueeze_(1) # --> [B, 1, T_src]\n avg_pitch_target = average_over_durations(target, dr) # [B, 1, T_mel], [B, T_src] --> [B, 1, T_src]\n pitch_emb = self.pitch_emb(avg_pitch_target) # [B, 1, T_src] --> [B, C_hidden, T_src]\n return pitch_pred, avg_pitch_target, pitch_emb\n\n def get_pitch_embedding(\n self,\n x: torch.Tensor,\n mask: torch.Tensor,\n pitch_transform: Callable,\n pitch_mean: torch.Tensor,\n pitch_std: torch.Tensor,\n ) -> torch.Tensor:\n pitch_pred = self.pitch_predictor(x, mask)\n if pitch_transform is not None:\n pitch_pred = pitch_transform(pitch_pred, (~mask).sum(), pitch_mean, pitch_std)\n pitch_pred.unsqueeze_(1)\n pitch_emb_pred = self.pitch_emb(pitch_pred)\n return pitch_emb_pred, pitch_pred"
},
{
"identifier": "VariancePredictor",
"path": "TTS/tts/layers/delightful_tts/variance_predictor.py",
"snippet": "class VariancePredictor(nn.Module):\n \"\"\"\n Network is 2-layer 1D convolutions with leaky relu activation and then\n followed by layer normalization then a dropout layer and finally an\n extra linear layer to project the hidden states into the output sequence.\n\n Args:\n channels_in (int): Number of in channels for conv layers.\n channels_out (int): Number of out channels for the last linear layer.\n kernel_size (int): Size the kernel for the conv layers.\n p_dropout (float): Probability of dropout.\n lrelu_slope (float): Slope for the leaky relu.\n\n Inputs: inputs, mask\n - **inputs** (batch, time, dim): Tensor containing input vector\n - **mask** (batch, time): Tensor containing indices to be masked\n Returns:\n - **outputs** (batch, time): Tensor produced by last linear layer.\n \"\"\"\n\n def __init__(\n self, channels_in: int, channels: int, channels_out: int, kernel_size: int, p_dropout: float, lrelu_slope: float\n ):\n super().__init__()\n\n self.layers = nn.ModuleList(\n [\n ConvTransposed(\n channels_in,\n channels,\n kernel_size=kernel_size,\n padding=(kernel_size - 1) // 2,\n ),\n nn.LeakyReLU(lrelu_slope),\n nn.LayerNorm(channels),\n nn.Dropout(p_dropout),\n ConvTransposed(\n channels,\n channels,\n kernel_size=kernel_size,\n padding=(kernel_size - 1) // 2,\n ),\n nn.LeakyReLU(lrelu_slope),\n nn.LayerNorm(channels),\n nn.Dropout(p_dropout),\n ]\n )\n\n self.linear_layer = nn.Linear(channels, channels_out)\n\n def forward(self, x: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Shapes:\n x: :math: `[B, T_src, C]`\n mask: :math: `[B, T_src]`\n \"\"\"\n for layer in self.layers:\n x = layer(x)\n x = self.linear_layer(x)\n x = x.squeeze(-1)\n x = x.masked_fill(mask, 0.0)\n return x"
},
{
"identifier": "AlignmentNetwork",
"path": "TTS/tts/layers/generic/aligner.py",
"snippet": "class AlignmentNetwork(torch.nn.Module):\n \"\"\"Aligner Network for learning alignment between the input text and the model output with Gaussian Attention.\n\n ::\n\n query -> conv1d -> relu -> conv1d -> relu -> conv1d -> L2_dist -> softmax -> alignment\n key -> conv1d -> relu -> conv1d -----------------------^\n\n Args:\n in_query_channels (int): Number of channels in the query network. Defaults to 80.\n in_key_channels (int): Number of channels in the key network. Defaults to 512.\n attn_channels (int): Number of inner channels in the attention layers. Defaults to 80.\n temperature (float): Temperature for the softmax. Defaults to 0.0005.\n \"\"\"\n\n def __init__(\n self,\n in_query_channels=80,\n in_key_channels=512,\n attn_channels=80,\n temperature=0.0005,\n ):\n super().__init__()\n self.temperature = temperature\n self.softmax = torch.nn.Softmax(dim=3)\n self.log_softmax = torch.nn.LogSoftmax(dim=3)\n\n self.key_layer = nn.Sequential(\n nn.Conv1d(\n in_key_channels,\n in_key_channels * 2,\n kernel_size=3,\n padding=1,\n bias=True,\n ),\n torch.nn.ReLU(),\n nn.Conv1d(in_key_channels * 2, attn_channels, kernel_size=1, padding=0, bias=True),\n )\n\n self.query_layer = nn.Sequential(\n nn.Conv1d(\n in_query_channels,\n in_query_channels * 2,\n kernel_size=3,\n padding=1,\n bias=True,\n ),\n torch.nn.ReLU(),\n nn.Conv1d(in_query_channels * 2, in_query_channels, kernel_size=1, padding=0, bias=True),\n torch.nn.ReLU(),\n nn.Conv1d(in_query_channels, attn_channels, kernel_size=1, padding=0, bias=True),\n )\n\n self.init_layers()\n\n def init_layers(self):\n torch.nn.init.xavier_uniform_(self.key_layer[0].weight, gain=torch.nn.init.calculate_gain(\"relu\"))\n torch.nn.init.xavier_uniform_(self.key_layer[2].weight, gain=torch.nn.init.calculate_gain(\"linear\"))\n torch.nn.init.xavier_uniform_(self.query_layer[0].weight, gain=torch.nn.init.calculate_gain(\"relu\"))\n torch.nn.init.xavier_uniform_(self.query_layer[2].weight, gain=torch.nn.init.calculate_gain(\"linear\"))\n torch.nn.init.xavier_uniform_(self.query_layer[4].weight, gain=torch.nn.init.calculate_gain(\"linear\"))\n\n def forward(\n self, queries: torch.tensor, keys: torch.tensor, mask: torch.tensor = None, attn_prior: torch.tensor = None\n ) -> Tuple[torch.tensor, torch.tensor]:\n \"\"\"Forward pass of the aligner encoder.\n Shapes:\n - queries: :math:`[B, C, T_de]`\n - keys: :math:`[B, C_emb, T_en]`\n - mask: :math:`[B, T_de]`\n Output:\n attn (torch.tensor): :math:`[B, 1, T_en, T_de]` soft attention mask.\n attn_logp (torch.tensor): :math:`[ßB, 1, T_en , T_de]` log probabilities.\n \"\"\"\n key_out = self.key_layer(keys)\n query_out = self.query_layer(queries)\n attn_factor = (query_out[:, :, :, None] - key_out[:, :, None]) ** 2\n attn_logp = -self.temperature * attn_factor.sum(1, keepdim=True)\n if attn_prior is not None:\n attn_logp = self.log_softmax(attn_logp) + torch.log(attn_prior[:, None] + 1e-8)\n\n if mask is not None:\n attn_logp.data.masked_fill_(~mask.bool().unsqueeze(2), -float(\"inf\"))\n\n attn = self.softmax(attn_logp)\n return attn, attn_logp"
},
{
"identifier": "generate_path",
"path": "TTS/tts/utils/helpers.py",
"snippet": "def generate_path(duration, mask):\n \"\"\"\n Shapes:\n - duration: :math:`[B, T_en]`\n - mask: :math:'[B, T_en, T_de]`\n - path: :math:`[B, T_en, T_de]`\n \"\"\"\n b, t_x, t_y = mask.shape\n cum_duration = torch.cumsum(duration, 1)\n\n cum_duration_flat = cum_duration.view(b * t_x)\n path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)\n path = path.view(b, t_x, t_y)\n path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]\n path = path * mask\n return path"
},
{
"identifier": "maximum_path",
"path": "TTS/tts/utils/helpers.py",
"snippet": "def maximum_path(value, mask):\n if CYTHON:\n return maximum_path_cython(value, mask)\n return maximum_path_numpy(value, mask)"
},
{
"identifier": "sequence_mask",
"path": "TTS/tts/utils/helpers.py",
"snippet": "def sequence_mask(sequence_length, max_len=None):\n \"\"\"Create a sequence mask for filtering padding in a sequence tensor.\n\n Args:\n sequence_length (torch.tensor): Sequence lengths.\n max_len (int, Optional): Maximum sequence length. Defaults to None.\n\n Shapes:\n - mask: :math:`[B, T_max]`\n \"\"\"\n if max_len is None:\n max_len = sequence_length.max()\n seq_range = torch.arange(max_len, dtype=sequence_length.dtype, device=sequence_length.device)\n # B x T_max\n return seq_range.unsqueeze(0) < sequence_length.unsqueeze(1)"
}
] | from typing import Callable, Dict, Tuple
from coqpit import Coqpit
from torch import nn
from TTS.tts.layers.delightful_tts.conformer import Conformer
from TTS.tts.layers.delightful_tts.encoders import (
PhonemeLevelProsodyEncoder,
UtteranceLevelProsodyEncoder,
get_mask_from_lengths,
)
from TTS.tts.layers.delightful_tts.energy_adaptor import EnergyAdaptor
from TTS.tts.layers.delightful_tts.networks import EmbeddingPadded, positional_encoding
from TTS.tts.layers.delightful_tts.phoneme_prosody_predictor import PhonemeProsodyPredictor
from TTS.tts.layers.delightful_tts.pitch_adaptor import PitchAdaptor
from TTS.tts.layers.delightful_tts.variance_predictor import VariancePredictor
from TTS.tts.layers.generic.aligner import AlignmentNetwork
from TTS.tts.utils.helpers import generate_path, maximum_path, sequence_mask
import torch
import torch.nn.functional as F | 7,469 | ### credit: https://github.com/dunky11/voicesmith
class AcousticModel(torch.nn.Module):
def __init__(
self,
args: "ModelArgs",
tokenizer: "TTSTokenizer" = None,
speaker_manager: "SpeakerManager" = None,
):
super().__init__()
self.args = args
self.tokenizer = tokenizer
self.speaker_manager = speaker_manager
self.init_multispeaker(args)
# self.set_embedding_dims()
self.length_scale = (
float(self.args.length_scale) if isinstance(self.args.length_scale, int) else self.args.length_scale
)
self.emb_dim = args.n_hidden_conformer_encoder
self.encoder = Conformer(
dim=self.args.n_hidden_conformer_encoder,
n_layers=self.args.n_layers_conformer_encoder,
n_heads=self.args.n_heads_conformer_encoder,
speaker_embedding_dim=self.embedded_speaker_dim,
p_dropout=self.args.dropout_conformer_encoder,
kernel_size_conv_mod=self.args.kernel_size_conv_mod_conformer_encoder,
lrelu_slope=self.args.lrelu_slope,
)
self.pitch_adaptor = PitchAdaptor(
n_input=self.args.n_hidden_conformer_encoder,
n_hidden=self.args.n_hidden_variance_adaptor,
n_out=1,
kernel_size=self.args.kernel_size_variance_adaptor,
emb_kernel_size=self.args.emb_kernel_size_variance_adaptor,
p_dropout=self.args.dropout_variance_adaptor,
lrelu_slope=self.args.lrelu_slope,
)
self.energy_adaptor = EnergyAdaptor(
channels_in=self.args.n_hidden_conformer_encoder,
channels_hidden=self.args.n_hidden_variance_adaptor,
channels_out=1,
kernel_size=self.args.kernel_size_variance_adaptor,
emb_kernel_size=self.args.emb_kernel_size_variance_adaptor,
dropout=self.args.dropout_variance_adaptor,
lrelu_slope=self.args.lrelu_slope,
)
self.aligner = AlignmentNetwork(
in_query_channels=self.args.out_channels,
in_key_channels=self.args.n_hidden_conformer_encoder,
)
self.duration_predictor = VariancePredictor(
channels_in=self.args.n_hidden_conformer_encoder,
channels=self.args.n_hidden_variance_adaptor,
channels_out=1,
kernel_size=self.args.kernel_size_variance_adaptor,
p_dropout=self.args.dropout_variance_adaptor,
lrelu_slope=self.args.lrelu_slope,
)
self.utterance_prosody_encoder = UtteranceLevelProsodyEncoder(
num_mels=self.args.num_mels,
ref_enc_filters=self.args.ref_enc_filters_reference_encoder,
ref_enc_size=self.args.ref_enc_size_reference_encoder,
ref_enc_gru_size=self.args.ref_enc_gru_size_reference_encoder,
ref_enc_strides=self.args.ref_enc_strides_reference_encoder,
n_hidden=self.args.n_hidden_conformer_encoder,
dropout=self.args.dropout_conformer_encoder,
bottleneck_size_u=self.args.bottleneck_size_u_reference_encoder,
token_num=self.args.token_num_reference_encoder,
)
| ### credit: https://github.com/dunky11/voicesmith
class AcousticModel(torch.nn.Module):
def __init__(
self,
args: "ModelArgs",
tokenizer: "TTSTokenizer" = None,
speaker_manager: "SpeakerManager" = None,
):
super().__init__()
self.args = args
self.tokenizer = tokenizer
self.speaker_manager = speaker_manager
self.init_multispeaker(args)
# self.set_embedding_dims()
self.length_scale = (
float(self.args.length_scale) if isinstance(self.args.length_scale, int) else self.args.length_scale
)
self.emb_dim = args.n_hidden_conformer_encoder
self.encoder = Conformer(
dim=self.args.n_hidden_conformer_encoder,
n_layers=self.args.n_layers_conformer_encoder,
n_heads=self.args.n_heads_conformer_encoder,
speaker_embedding_dim=self.embedded_speaker_dim,
p_dropout=self.args.dropout_conformer_encoder,
kernel_size_conv_mod=self.args.kernel_size_conv_mod_conformer_encoder,
lrelu_slope=self.args.lrelu_slope,
)
self.pitch_adaptor = PitchAdaptor(
n_input=self.args.n_hidden_conformer_encoder,
n_hidden=self.args.n_hidden_variance_adaptor,
n_out=1,
kernel_size=self.args.kernel_size_variance_adaptor,
emb_kernel_size=self.args.emb_kernel_size_variance_adaptor,
p_dropout=self.args.dropout_variance_adaptor,
lrelu_slope=self.args.lrelu_slope,
)
self.energy_adaptor = EnergyAdaptor(
channels_in=self.args.n_hidden_conformer_encoder,
channels_hidden=self.args.n_hidden_variance_adaptor,
channels_out=1,
kernel_size=self.args.kernel_size_variance_adaptor,
emb_kernel_size=self.args.emb_kernel_size_variance_adaptor,
dropout=self.args.dropout_variance_adaptor,
lrelu_slope=self.args.lrelu_slope,
)
self.aligner = AlignmentNetwork(
in_query_channels=self.args.out_channels,
in_key_channels=self.args.n_hidden_conformer_encoder,
)
self.duration_predictor = VariancePredictor(
channels_in=self.args.n_hidden_conformer_encoder,
channels=self.args.n_hidden_variance_adaptor,
channels_out=1,
kernel_size=self.args.kernel_size_variance_adaptor,
p_dropout=self.args.dropout_variance_adaptor,
lrelu_slope=self.args.lrelu_slope,
)
self.utterance_prosody_encoder = UtteranceLevelProsodyEncoder(
num_mels=self.args.num_mels,
ref_enc_filters=self.args.ref_enc_filters_reference_encoder,
ref_enc_size=self.args.ref_enc_size_reference_encoder,
ref_enc_gru_size=self.args.ref_enc_gru_size_reference_encoder,
ref_enc_strides=self.args.ref_enc_strides_reference_encoder,
n_hidden=self.args.n_hidden_conformer_encoder,
dropout=self.args.dropout_conformer_encoder,
bottleneck_size_u=self.args.bottleneck_size_u_reference_encoder,
token_num=self.args.token_num_reference_encoder,
)
| self.utterance_prosody_predictor = PhonemeProsodyPredictor( | 7 | 2023-11-29 08:15:06+00:00 | 12k |
magic-research/magic-animate | magicanimate/models/unet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n output_states = ()\n\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n ):\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None):\n output_states = ()\n\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n attentions = []\n motion_modules = []\n\n for _ in range(num_layers):\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n in_channels // attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=in_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None,):\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n):\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n):\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
},
{
"identifier": "InflatedConv3d",
"path": "magicanimate/models/resnet.py",
"snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x"
}
] | from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from .unet_3d_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d
from diffusers.utils import WEIGHTS_NAME
import os
import json
import pdb
import torch
import torch.nn as nn
import torch.utils.checkpoint | 9,109 | # up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False):
| # *************************************************************************
# This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
# difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
# ytedance Inc..
# *************************************************************************
# Adapted from https://github.com/guoyww/AnimateDiff
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn":
self.mid_block = UNetMidBlock3DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and motion_module_mid_block,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
else:
raise ValueError(f"unknown mid_block_type : {mid_block_type}")
# count how many layers upsample the videos
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False): | if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): | 2 | 2023-11-21 08:33:54+00:00 | 12k |
luciddreamer-cvlab/LucidDreamer | scene/dataset_readers.py | [
{
"identifier": "BasicPointCloud",
"path": "scene/gaussian_model.py",
"snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):\n def __init__(self, sh_degree : int):\n def capture(self):\n def restore(self, model_args, training_args):\n def get_scaling(self):\n def get_rotation(self):\n def get_xyz(self):\n def get_features(self):\n def get_opacity(self):\n def get_covariance(self, scaling_modifier = 1):\n def oneupSHdegree(self):\n def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):\n def training_setup(self, training_args):\n def update_learning_rate(self, iteration):\n def construct_list_of_attributes(self):\n def save_ply(self, filepath):\n def reset_opacity(self):\n def load_ply(self, path):\n def replace_tensor_to_optimizer(self, tensor, name):\n def _prune_optimizer(self, mask):\n def prune_points(self, mask):\n def cat_tensors_to_optimizer(self, tensors_dict):\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)"
},
{
"identifier": "MiniCam",
"path": "scene/cameras.py",
"snippet": "class MiniCam:\n def __init__(self, width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform):\n self.image_width = width\n self.image_height = height \n self.FoVy = fovy\n self.FoVx = fovx\n self.znear = znear\n self.zfar = zfar\n self.world_view_transform = world_view_transform\n self.full_proj_transform = full_proj_transform\n view_inv = torch.inverse(self.world_view_transform)\n self.camera_center = view_inv[3][:3]"
},
{
"identifier": "Camera",
"path": "scene/cameras.py",
"snippet": "class Camera(nn.Module):\n def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask,\n image_name, uid,\n trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = \"cuda\"\n ):\n super(Camera, self).__init__()\n\n self.uid = uid\n self.colmap_id = colmap_id\n self.R = R\n self.T = T\n self.FoVx = FoVx\n self.FoVy = FoVy\n self.image_name = image_name\n\n try:\n self.data_device = torch.device(data_device)\n except Exception as e:\n print(e)\n print(f\"[Warning] Custom device {data_device} failed, fallback to default cuda device\" )\n self.data_device = torch.device(\"cuda\")\n\n self.original_image = image.clamp(0.0, 1.0).to(self.data_device)\n self.canny_mask = image2canny(self.original_image.permute(1,2,0), 50, 150, isEdge1=False).detach().to(self.data_device)\n self.image_width = self.original_image.shape[2]\n self.image_height = self.original_image.shape[1]\n\n if gt_alpha_mask is not None:\n self.original_image *= gt_alpha_mask.to(self.data_device)\n else:\n self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device)\n\n self.zfar = 100.0\n self.znear = 0.01\n\n self.trans = trans\n self.scale = scale\n\n self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()\n self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda()\n self.full_proj_transform = (self.world_view_transform.unsqueeze(0).bmm(self.projection_matrix.unsqueeze(0))).squeeze(0)\n self.camera_center = self.world_view_transform.inverse()[3, :3]"
},
{
"identifier": "read_extrinsics_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n image_id = int(elems[0])\n qvec = np.array(tuple(map(float, elems[1:5])))\n tvec = np.array(tuple(map(float, elems[5:8])))\n camera_id = int(elems[8])\n image_name = elems[9]\n elems = fid.readline().split()\n xys = np.column_stack([tuple(map(float, elems[0::3])),\n tuple(map(float, elems[1::3]))])\n point3D_ids = np.array(tuple(map(int, elems[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images"
},
{
"identifier": "read_intrinsics_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_intrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n cameras = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n camera_id = int(elems[0])\n model = elems[1]\n assert model == \"PINHOLE\", \"While the loader support other types, the rest of the code assumes PINHOLE\"\n width = int(elems[2])\n height = int(elems[3])\n params = np.array(tuple(map(float, elems[4:])))\n cameras[camera_id] = Camera(id=camera_id, model=model,\n width=width, height=height,\n params=params)\n return cameras"
},
{
"identifier": "qvec2rotmat",
"path": "scene/colmap_loader.py",
"snippet": "def qvec2rotmat(qvec):\n return np.array([\n [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,\n 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],\n 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],\n 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,\n 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],\n [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],\n 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],\n 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])"
},
{
"identifier": "read_extrinsics_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_extrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(\n fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8,\n format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,\n format_char_sequence=\"ddq\"*num_points2D)\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),\n tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images"
},
{
"identifier": "read_intrinsics_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_intrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(\n fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8*num_params,\n format_char_sequence=\"d\"*num_params)\n cameras[camera_id] = Camera(id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params))\n assert len(cameras) == num_cameras\n return cameras"
},
{
"identifier": "read_points3D_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n\n\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n\n for p_id in range(num_points):\n binary_point_line_properties = read_next_bytes(\n fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(\n fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid, num_bytes=8*track_length,\n format_char_sequence=\"ii\"*track_length)\n xyzs[p_id] = xyz\n rgbs[p_id] = rgb\n errors[p_id] = error\n return xyzs, rgbs, errors"
},
{
"identifier": "read_points3D_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_points3D_text(path):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DText(const std::string& path)\n void Reconstruction::WritePoints3DText(const std::string& path)\n \"\"\"\n xyzs = None\n rgbs = None\n errors = None\n num_points = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n num_points += 1\n\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n count = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n xyz = np.array(tuple(map(float, elems[1:4])))\n rgb = np.array(tuple(map(int, elems[4:7])))\n error = np.array(float(elems[7]))\n xyzs[count] = xyz\n rgbs[count] = rgb\n errors[count] = error\n count += 1\n\n return xyzs, rgbs, errors"
},
{
"identifier": "getWorld2View2",
"path": "utils/graphics.py",
"snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)"
},
{
"identifier": "focal2fov",
"path": "utils/graphics.py",
"snippet": "def focal2fov(focal, pixels):\n return 2*math.atan(pixels/(2*focal))"
},
{
"identifier": "fov2focal",
"path": "utils/graphics.py",
"snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))"
},
{
"identifier": "getProjectionMatrix",
"path": "utils/graphics.py",
"snippet": "def getProjectionMatrix(znear, zfar, fovX, fovY):\n tanHalfFovY = math.tan((fovY / 2))\n tanHalfFovX = math.tan((fovX / 2))\n\n top = tanHalfFovY * znear\n bottom = -top\n right = tanHalfFovX * znear\n left = -right\n\n P = torch.zeros(4, 4)\n\n z_sign = 1.0\n\n P[0, 0] = 2.0 * znear / (right - left)\n P[1, 1] = 2.0 * znear / (top - bottom)\n P[0, 2] = (right + left) / (right - left)\n P[1, 2] = (top + bottom) / (top - bottom)\n P[3, 2] = z_sign\n P[2, 2] = z_sign * zfar / (zfar - znear)\n P[2, 3] = -(zfar * znear) / (zfar - znear)\n return P"
},
{
"identifier": "get_camerapaths",
"path": "utils/trajectory.py",
"snippet": "def get_camerapaths():\n preset_json = {}\n for cam_path in [\"back_and_forth\", \"llff\", \"headbanging\"]:\n if cam_path == 'back_and_forth':\n render_poses = generate_seed_back()\n elif cam_path == 'llff':\n render_poses = generate_seed_llff(5, 400, round=4, d=2)\n elif cam_path == 'headbanging':\n render_poses = generate_seed_headbanging(maxdeg=15, nviews_per_round=180, round=2, fullround=0)\n else:\n raise(\"Unknown pass\")\n \n yz_reverse = np.array([[1,0,0], [0,-1,0], [0,0,-1]])\n blender_train_json = {\"frames\": []}\n for render_pose in render_poses:\n curr_frame = {}\n ### Transform world to pixel\n Rw2i = render_pose[:3,:3]\n Tw2i = render_pose[:3,3:4]\n\n # Transfrom cam2 to world + change sign of yz axis\n Ri2w = np.matmul(yz_reverse, Rw2i).T\n Ti2w = -np.matmul(Ri2w, np.matmul(yz_reverse, Tw2i))\n Pc2w = np.concatenate((Ri2w, Ti2w), axis=1)\n Pc2w = np.concatenate((Pc2w, np.array([0,0,0,1]).reshape((1,4))), axis=0)\n\n curr_frame[\"transform_matrix\"] = Pc2w.tolist()\n blender_train_json[\"frames\"].append(curr_frame)\n\n preset_json[cam_path] = blender_train_json\n\n return preset_json"
},
{
"identifier": "SH2RGB",
"path": "utils/sh.py",
"snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5"
}
] | import os
import sys
import json
import imageio
import torch
import numpy as np
from typing import NamedTuple
from pathlib import Path
from PIL import Image
from plyfile import PlyData, PlyElement
from scene.gaussian_model import BasicPointCloud
from scene.cameras import MiniCam, Camera
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics import getWorld2View2, focal2fov, fov2focal
from utils.graphics import getProjectionMatrix
from utils.trajectory import get_camerapaths
from utils.sh import SH2RGB | 8,039 |
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path='None', image_name='None', width=image.size[1], height=image.size[0]))
return cam_infos
def readNerfSyntheticInfo(path, white_background, eval, preset=None, extension=".png"):
print("Reading Training Transforms")
train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension)
print("Reading Test Transforms")
test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension)
if preset:
preset_cam_infos = readCamerasFromPreset('/home/chung/workspace/gaussian-splatting/poses_supplementary', f"{preset}.json")
else:
preset_cam_infos = None
if not eval:
train_cam_infos.extend(test_cam_infos)
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "points3d.ply")
if not os.path.exists(ply_path):
# Since this data set has no colmap data, we start with random points
num_pts = 100_000
print(f"Generating random point cloud ({num_pts})...")
# We create random points inside the bounds of the synthetic Blender scenes
xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
shs = np.random.random((num_pts, 3)) / 255.0
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
storePly(ply_path, xyz, SH2RGB(shs) * 255)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
preset_cameras=preset_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def loadCamerasFromData(traindata, white_background):
cameras = []
fovx = traindata["camera_angle_x"]
frames = traindata["frames"]
for idx, frame in enumerate(frames):
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
image = frame["image"] if "image" in frame else None
im_data = np.array(image.convert("RGBA"))
bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
norm_data = im_data / 255.0
arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
loaded_mask = np.ones_like(norm_data[:, :, 3:4])
fovy = focal2fov(fov2focal(fovx, image.size[1]), image.size[0])
FovY = fovy
FovX = fovx
image = torch.Tensor(arr).permute(2,0,1)
loaded_mask = None #torch.Tensor(loaded_mask).permute(2,0,1)
### torch로 바꿔야함
cameras.append(Camera(colmap_id=idx, R=R, T=T, FoVx=FovX, FoVy=FovY, image=image,
gt_alpha_mask=loaded_mask, image_name='', uid=idx, data_device='cuda'))
return cameras
def loadCameraPreset(traindata, presetdata):
cam_infos = {}
## camera setting (for H, W and focal)
fovx = traindata["camera_angle_x"] * 1.2
W, H = traindata["frames"][0]["image"].size
# W, H = traindata["W"], traindata["H"]
for camkey in presetdata:
cam_infos[camkey] = []
for idx, frame in enumerate(presetdata[camkey]["frames"]):
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
fovy = focal2fov(fov2focal(fovx, W), H)
FovY = fovy
FovX = fovx
znear, zfar = 0.01, 100
world_view_transform = torch.tensor(getWorld2View2(R, T, np.array([0.0, 0.0, 0.0]), 1.0)).transpose(0, 1).cuda()
projection_matrix = getProjectionMatrix(znear=znear, zfar=zfar, fovX=FovX, fovY=FovY).transpose(0,1).cuda()
full_proj_transform = (world_view_transform.unsqueeze(0).bmm(projection_matrix.unsqueeze(0))).squeeze(0)
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
preset_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model=="SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
idx = np.random.choice(len(vertices['x']),size=(min(len(vertices['x']), 100_000),),replace=False)
positions = np.vstack([vertices['x'][idx], vertices['y'][idx], vertices['z'][idx]]).T if 'x' in vertices else None
colors = np.vstack([vertices['red'][idx], vertices['green'][idx], vertices['blue'][idx]]).T / 255.0 if 'red' in vertices else None
normals = np.vstack([vertices['nx'][idx], vertices['ny'][idx], vertices['nz'][idx]]).T if 'nx' in vertices else None
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, preset=None, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
reading_dir = "images" if images == None else images
cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir))
cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name)
if eval:
# train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
# test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % 5 == 2 or idx % 5 == 0]
test_cam_infos = [c for idx, c in enumerate(cam_infos) if not (idx % 5 == 2 or idx % 5 == 0)]
else:
train_cam_infos = cam_infos
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "sparse/0/points3D.ply")
bin_path = os.path.join(path, "sparse/0/points3D.bin")
txt_path = os.path.join(path, "sparse/0/points3D.txt")
if not os.path.exists(ply_path):
print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
try:
xyz, rgb, _ = read_points3D_binary(bin_path)
except:
xyz, rgb, _ = read_points3D_text(txt_path)
storePly(ply_path, xyz, rgb)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
if preset:
preset_cam_infos = readCamerasFromPreset('/home/chung/workspace/gaussian-splatting/poses_supplementary', f"{preset}.json")
else:
preset_cam_infos = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
preset_cameras=preset_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"):
cam_infos = []
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
fovx = contents["camera_angle_x"]
frames = contents["frames"]
for idx, frame in enumerate(frames):
cam_name = os.path.join(path, frame["file_path"] + extension)
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
image_path = os.path.join(path, cam_name)
image_name = Path(cam_name).stem
image = Image.open(image_path)
# if os.path.exists(os.path.join(path, frame["file_path"].replace("/train/", "/depths_train/")+'.npy')):
# depth = np.load(os.path.join(path, frame["file_path"].replace("/train/", "/depths_train/")+'.npy'))
# if os.path.exists(os.path.join(path, frame["file_path"].replace("/train/", "/masks_train/")+'.png')):
# mask = imageio.v3.imread(os.path.join(path, frame["file_path"].replace("/train/", "/masks_train/")+'.png'))[:,:,0]/255.
# else:
# mask = np.ones_like(depth)
# final_depth = depth*mask
# else:
# final_depth = None
im_data = np.array(image.convert("RGBA"))
bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
norm_data = im_data / 255.0
arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
fovy = focal2fov(fov2focal(fovx, image.size[1]), image.size[0])
FovY = fovy
FovX = fovx
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=image.size[0], height=image.size[1]))
return cam_infos
def readCamerasFromPreset(path, transformsfile):
cam_infos = []
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
FOV = contents["camera_angle_x"]*1.2
frames = contents["frames"]
for idx, frame in enumerate(frames):
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(np.concatenate((c2w, np.array([0,0,0,1]).reshape(1,4)), axis=0))
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
# R = c2w[:3,:3]
# T = - np.transpose(R).dot(c2w[:3,3])
image = Image.fromarray(np.zeros((512,512)), "RGB")
FovY = focal2fov(fov2focal(FOV, 512), image.size[0])
FovX = focal2fov(fov2focal(FOV, 512), image.size[1])
# FovX, FovY = contents["camera_angle_x"], contents["camera_angle_x"]
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path='None', image_name='None', width=image.size[1], height=image.size[0]))
return cam_infos
def readNerfSyntheticInfo(path, white_background, eval, preset=None, extension=".png"):
print("Reading Training Transforms")
train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension)
print("Reading Test Transforms")
test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension)
if preset:
preset_cam_infos = readCamerasFromPreset('/home/chung/workspace/gaussian-splatting/poses_supplementary', f"{preset}.json")
else:
preset_cam_infos = None
if not eval:
train_cam_infos.extend(test_cam_infos)
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "points3d.ply")
if not os.path.exists(ply_path):
# Since this data set has no colmap data, we start with random points
num_pts = 100_000
print(f"Generating random point cloud ({num_pts})...")
# We create random points inside the bounds of the synthetic Blender scenes
xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
shs = np.random.random((num_pts, 3)) / 255.0
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
storePly(ply_path, xyz, SH2RGB(shs) * 255)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
preset_cameras=preset_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def loadCamerasFromData(traindata, white_background):
cameras = []
fovx = traindata["camera_angle_x"]
frames = traindata["frames"]
for idx, frame in enumerate(frames):
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
image = frame["image"] if "image" in frame else None
im_data = np.array(image.convert("RGBA"))
bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
norm_data = im_data / 255.0
arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
loaded_mask = np.ones_like(norm_data[:, :, 3:4])
fovy = focal2fov(fov2focal(fovx, image.size[1]), image.size[0])
FovY = fovy
FovX = fovx
image = torch.Tensor(arr).permute(2,0,1)
loaded_mask = None #torch.Tensor(loaded_mask).permute(2,0,1)
### torch로 바꿔야함
cameras.append(Camera(colmap_id=idx, R=R, T=T, FoVx=FovX, FoVy=FovY, image=image,
gt_alpha_mask=loaded_mask, image_name='', uid=idx, data_device='cuda'))
return cameras
def loadCameraPreset(traindata, presetdata):
cam_infos = {}
## camera setting (for H, W and focal)
fovx = traindata["camera_angle_x"] * 1.2
W, H = traindata["frames"][0]["image"].size
# W, H = traindata["W"], traindata["H"]
for camkey in presetdata:
cam_infos[camkey] = []
for idx, frame in enumerate(presetdata[camkey]["frames"]):
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
fovy = focal2fov(fov2focal(fovx, W), H)
FovY = fovy
FovX = fovx
znear, zfar = 0.01, 100
world_view_transform = torch.tensor(getWorld2View2(R, T, np.array([0.0, 0.0, 0.0]), 1.0)).transpose(0, 1).cuda()
projection_matrix = getProjectionMatrix(znear=znear, zfar=zfar, fovX=FovX, fovY=FovY).transpose(0,1).cuda()
full_proj_transform = (world_view_transform.unsqueeze(0).bmm(projection_matrix.unsqueeze(0))).squeeze(0)
| cam_infos[camkey].append(MiniCam(width=W, height=H, fovy=FovY, fovx=FovX, znear=znear, zfar=zfar, | 1 | 2023-11-22 06:54:32+00:00 | 12k |
AILab-CVC/UniRepLKNet | Video/run_class_finetuning.py | [
{
"identifier": "LayerDecayValueAssigner",
"path": "optim_factory.py",
"snippet": "class LayerDecayValueAssigner(object):\n def __init__(self, values):\n self.values = values\n\n def get_scale(self, layer_id):\n return self.values[layer_id]\n\n def get_layer_id(self, var_name):\n return get_num_layer_for_convnext(var_name)"
},
{
"identifier": "create_optimizer",
"path": "optim_factory.py",
"snippet": "def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):\n opt_lower = args.opt.lower()\n weight_decay = args.weight_decay\n # if weight_decay and filter_bias_and_bn:\n if filter_bias_and_bn:\n skip = {}\n if skip_list is not None:\n skip = skip_list\n elif hasattr(model, 'no_weight_decay'):\n skip = model.no_weight_decay()\n parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)\n weight_decay = 0.\n else:\n parameters = model.parameters()\n\n if 'fused' in opt_lower:\n assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'\n\n opt_args = dict(lr=args.lr, weight_decay=weight_decay)\n if hasattr(args, 'opt_eps') and args.opt_eps is not None:\n opt_args['eps'] = args.opt_eps\n if hasattr(args, 'opt_betas') and args.opt_betas is not None:\n opt_args['betas'] = args.opt_betas\n\n opt_split = opt_lower.split('_')\n opt_lower = opt_split[-1]\n if opt_lower == 'sgd' or opt_lower == 'nesterov':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'momentum':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'adam':\n optimizer = optim.Adam(parameters, **opt_args)\n elif opt_lower == 'adamw':\n optimizer = optim.AdamW(parameters, **opt_args)\n elif opt_lower == 'nadam':\n optimizer = Nadam(parameters, **opt_args)\n elif opt_lower == 'radam':\n optimizer = RAdam(parameters, **opt_args)\n elif opt_lower == 'adamp':\n optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)\n elif opt_lower == 'sgdp':\n optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'adadelta':\n optimizer = optim.Adadelta(parameters, **opt_args)\n elif opt_lower == 'adafactor':\n if not args.lr:\n opt_args['lr'] = None\n optimizer = Adafactor(parameters, **opt_args)\n elif opt_lower == 'adahessian':\n optimizer = Adahessian(parameters, **opt_args)\n elif opt_lower == 'rmsprop':\n optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n elif opt_lower == 'rmsproptf':\n optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n elif opt_lower == 'fusedsgd':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'fusedmomentum':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'fusedadam':\n optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)\n elif opt_lower == 'fusedadamw':\n optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)\n elif opt_lower == 'fusedlamb':\n optimizer = FusedLAMB(parameters, **opt_args)\n elif opt_lower == 'fusednovograd':\n opt_args.setdefault('betas', (0.95, 0.98))\n optimizer = FusedNovoGrad(parameters, **opt_args)\n else:\n assert False and \"Invalid optimizer\"\n\n if len(opt_split) > 1:\n if opt_split[0] == 'lookahead':\n optimizer = Lookahead(optimizer)\n\n return optimizer"
},
{
"identifier": "get_parameter_groups",
"path": "optim_factory.py",
"snippet": "def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None):\n parameter_group_names = {}\n parameter_group_vars = {}\n\n for name, param in model.named_parameters():\n if not param.requires_grad:\n continue # frozen weights\n if len(param.shape) == 1 or name.endswith(\".bias\") or name in skip_list:\n group_name = \"no_decay\"\n this_weight_decay = 0.\n else:\n group_name = \"decay\"\n this_weight_decay = weight_decay\n if get_num_layer is not None:\n layer_id = get_num_layer(name)\n group_name = \"layer_%d_%s\" % (layer_id, group_name)\n else:\n layer_id = None\n\n if group_name not in parameter_group_names:\n if get_layer_scale is not None:\n scale = get_layer_scale(layer_id)\n else:\n scale = 1.\n\n parameter_group_names[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr_scale\": scale\n }\n parameter_group_vars[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr_scale\": scale\n }\n\n parameter_group_vars[group_name][\"params\"].append(param)\n parameter_group_names[group_name][\"params\"].append(name)\n print(\"Param groups = %s\" % json.dumps(parameter_group_names, indent=2))\n return list(parameter_group_vars.values())"
},
{
"identifier": "NativeScalerWithGradNormCount",
"path": "utils.py",
"snippet": "class NativeScalerWithGradNormCount:\n state_dict_key = \"amp_scaler\"\n\n def __init__(self):\n self._scaler = torch.cuda.amp.GradScaler()\n\n def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):\n self._scaler.scale(loss).backward(create_graph=create_graph)\n if update_grad:\n if clip_grad is not None:\n assert parameters is not None\n self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place\n norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)\n else:\n self._scaler.unscale_(optimizer)\n norm = get_grad_norm_(parameters)\n self._scaler.step(optimizer)\n self._scaler.update()\n else:\n norm = None\n return norm\n\n def state_dict(self):\n return self._scaler.state_dict()\n\n def load_state_dict(self, state_dict):\n self._scaler.load_state_dict(state_dict)"
},
{
"identifier": "multiple_samples_collate",
"path": "utils.py",
"snippet": "class SmoothedValue(object):\nclass MetricLogger(object):\nclass TensorboardLogger(object):\nclass WandbLogger(object):\nclass NativeScalerWithGradNormCount:\n def __init__(self, window_size=20, fmt=None):\n def update(self, value, n=1):\n def synchronize_between_processes(self):\n def median(self):\n def avg(self):\n def global_avg(self):\n def max(self):\n def value(self):\n def __str__(self):\n def __init__(self, delimiter=\"\\t\"):\n def update(self, **kwargs):\n def __getattr__(self, attr):\n def __str__(self):\n def synchronize_between_processes(self):\n def add_meter(self, name, meter):\n def log_every(self, iterable, print_freq, header=None):\n def __init__(self, log_dir):\n def set_step(self, step=None):\n def update(self, head='scalar', step=None, **kwargs):\n def flush(self):\n def __init__(self, args):\n def log_epoch_metrics(self, metrics, commit=True):\n def log_checkpoints(self):\n def set_steps(self):\ndef setup_for_distributed(is_master):\n def print(*args, **kwargs):\ndef is_dist_avail_and_initialized():\ndef get_world_size():\ndef get_rank():\ndef is_main_process():\ndef save_on_master(*args, **kwargs):\ndef init_distributed_mode(args):\ndef load_state_dict(model, state_dict, prefix='', ignore_missing=\"relative_position_index\"):\n def load(module, prefix=''):\n def __init__(self):\n def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):\n def state_dict(self):\n def load_state_dict(self, state_dict):\ndef get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:\ndef cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0,\n start_warmup_value=0, warmup_steps=-1):\ndef save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):\ndef auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):\n MB = 1024.0 * 1024.0"
},
{
"identifier": "UniRepLKNet",
"path": "unireplknet.py",
"snippet": "class UniRepLKNet(nn.Module):\n r\"\"\" UniRepLKNet\n A PyTorch impl of UniRepLKNet\n\n Args:\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default: 1000\n depths (tuple(int)): Number of blocks at each stage. Default: (3, 3, 27, 3)\n dims (int): Feature dimension at each stage. Default: (96, 192, 384, 768)\n drop_path_rate (float): Stochastic depth rate. Default: 0.\n layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.\n head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.\n kernel_sizes (tuple(tuple(int))): Kernel size for each block. None means using the default settings. Default: None.\n deploy (bool): deploy = True means using the inference structure. Default: False\n with_cp (bool): with_cp = True means using torch.utils.checkpoint to save GPU memory. Default: False\n init_cfg (dict): weights to load. The easiest way to use UniRepLKNet with for OpenMMLab family. Default: None\n attempt_use_lk_impl (bool): try to load the efficient iGEMM large-kernel impl. Setting it to False disabling the iGEMM impl. Default: True\n use_sync_bn (bool): use_sync_bn = True means using sync BN. Use it if your batch size is small. Default: False\n \"\"\"\n def __init__(self,\n in_chans=3,\n num_classes=1000,\n depths=(3, 3, 27, 3),\n dims=(96, 192, 384, 768),\n drop_path_rate=0.,\n layer_scale_init_value=1e-6,\n head_init_scale=1.,\n kernel_sizes=None,\n deploy=False,\n with_cp=False,\n init_cfg=None,\n attempt_use_lk_impl=True,\n use_sync_bn=False,\n **kwargs\n ):\n super().__init__()\n\n depths = tuple(depths)\n if kernel_sizes is None:\n if depths in default_depths_to_kernel_sizes:\n print('=========== use default kernel size ')\n kernel_sizes = default_depths_to_kernel_sizes[depths]\n else:\n raise ValueError('no default kernel size settings for the given depths, '\n 'please specify kernel sizes for each block, e.g., '\n '((3, 3), (13, 13), (13, 13, 13, 13, 13, 13), (13, 13))')\n print(kernel_sizes)\n for i in range(4):\n assert len(kernel_sizes[i]) == depths[i], 'kernel sizes do not match the depths'\n\n self.with_cp = with_cp\n\n dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]\n print('=========== drop path rates: ', dp_rates)\n\n self.downsample_layers = nn.ModuleList()\n self.downsample_layers.append(nn.Sequential(\n nn.Conv2d(in_chans, dims[0] // 2, kernel_size=3, stride=2, padding=1),\n LayerNorm(dims[0] // 2, eps=1e-6, data_format=\"channels_first\"),\n nn.GELU(),\n nn.Conv2d(dims[0] // 2, dims[0], kernel_size=3, stride=2, padding=1),\n LayerNorm(dims[0], eps=1e-6, data_format=\"channels_first\")))\n\n for i in range(3):\n self.downsample_layers.append(nn.Sequential(\n nn.Conv2d(dims[i], dims[i + 1], kernel_size=3, stride=2, padding=1),\n LayerNorm(dims[i + 1], eps=1e-6, data_format=\"channels_first\")))\n\n self.stages = nn.ModuleList()\n\n cur = 0\n for i in range(4):\n main_stage = nn.Sequential(\n *[UniRepLKNetBlock(dim=dims[i], kernel_size=kernel_sizes[i][j], drop_path=dp_rates[cur + j],\n layer_scale_init_value=layer_scale_init_value, deploy=deploy,\n attempt_use_lk_impl=attempt_use_lk_impl,\n with_cp=with_cp, use_sync_bn=use_sync_bn) for j in\n range(depths[i])])\n self.stages.append(main_stage)\n cur += depths[i]\n\n last_channels = dims[-1]\n\n self.for_pretrain = init_cfg is None\n self.for_downstream = not self.for_pretrain # there may be some other scenarios\n if self.for_downstream:\n assert num_classes is None\n\n if self.for_pretrain:\n self.init_cfg = None\n self.norm = nn.LayerNorm(last_channels, eps=1e-6) # final norm layer\n self.head = nn.Linear(last_channels, num_classes)\n self.apply(self._init_weights)\n self.head.weight.data.mul_(head_init_scale)\n self.head.bias.data.mul_(head_init_scale)\n self.output_mode = 'logits'\n else:\n self.init_cfg = init_cfg # OpenMMLab style init\n self.init_weights()\n self.output_mode = 'features'\n norm_layer = partial(LayerNorm, eps=1e-6, data_format=\"channels_first\")\n for i_layer in range(4):\n layer = norm_layer(dims[i_layer])\n layer_name = f'norm{i_layer}'\n self.add_module(layer_name, layer)\n\n\n # load pretrained backbone weights in the OpenMMLab style\n def init_weights(self):\n\n def load_state_dict(module, state_dict, strict=False, logger=None):\n unexpected_keys = []\n own_state = module.state_dict()\n for name, param in state_dict.items():\n if name not in own_state:\n unexpected_keys.append(name)\n continue\n if isinstance(param, torch.nn.Parameter):\n # backwards compatibility for serialized parameters\n param = param.data\n try:\n own_state[name].copy_(param)\n except Exception:\n raise RuntimeError(\n 'While copying the parameter named {}, '\n 'whose dimensions in the model are {} and '\n 'whose dimensions in the checkpoint are {}.'.format(\n name, own_state[name].size(), param.size()))\n missing_keys = set(own_state.keys()) - set(state_dict.keys())\n\n err_msg = []\n if unexpected_keys:\n err_msg.append('unexpected key in source state_dict: {}\\n'.format(', '.join(unexpected_keys)))\n if missing_keys:\n err_msg.append('missing keys in source state_dict: {}\\n'.format(', '.join(missing_keys)))\n err_msg = '\\n'.join(err_msg)\n if err_msg:\n if strict:\n raise RuntimeError(err_msg)\n elif logger is not None:\n logger.warn(err_msg)\n else:\n print(err_msg)\n\n logger = get_root_logger()\n assert self.init_cfg is not None\n ckpt_path = self.init_cfg['checkpoint']\n if ckpt_path is None:\n print('================ Note: init_cfg is provided but I got no init ckpt path, so skip initialization')\n else:\n ckpt = _load_checkpoint(ckpt_path, logger=logger, map_location='cpu')\n if 'state_dict' in ckpt:\n _state_dict = ckpt['state_dict']\n elif 'model' in ckpt:\n _state_dict = ckpt['model']\n else:\n _state_dict = ckpt\n\n load_state_dict(self, _state_dict, strict=False, logger=logger)\n\n\n def _init_weights(self, m):\n if isinstance(m, (nn.Conv2d, nn.Linear)):\n trunc_normal_(m.weight, std=.02)\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n if self.output_mode == 'logits':\n for stage_idx in range(4):\n x = self.downsample_layers[stage_idx](x)\n x = self.stages[stage_idx](x)\n x = self.norm(x.mean([-2, -1]))\n x = self.head(x)\n return x\n elif self.output_mode == 'features':\n outs = []\n for stage_idx in range(4):\n x = self.downsample_layers[stage_idx](x)\n x = self.stages[stage_idx](x)\n outs.append(self.__getattr__(f'norm{stage_idx}')(x))\n return outs\n else:\n raise ValueError('Defined new output mode?')\n\n def reparameterize_unireplknet(self):\n for m in self.modules():\n if hasattr(m, 'reparameterize'):\n m.reparameterize()"
}
] | import argparse
import datetime
import json
import os
import random
import time
import deepspeed
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import models # noqa: F401
import utils
from collections import OrderedDict
from functools import partial
from pathlib import Path
from timm.data.mixup import Mixup
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.models import create_model
from timm.utils import ModelEma
from dataset import build_dataset
from engine_for_finetuning import (
final_test,
merge,
train_one_epoch,
validation_one_epoch,
)
from optim_factory import (
LayerDecayValueAssigner,
create_optimizer,
get_parameter_groups,
)
from utils import NativeScalerWithGradNormCount as NativeScaler
from utils import multiple_samples_collate
from unireplknet import UniRepLKNet | 7,622 | 0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_size, new_size),
mode='bicubic',
align_corners=False)
# BT, C, H, W -> BT, H, W, C -> B, T, H, W, C
pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(
-1, args.num_frames // model.patch_embed.tubelet_size,
new_size, new_size, embedding_size)
pos_tokens = pos_tokens.flatten(1, 3) # B, L, C
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
elif args.input_size != 224:
pos_tokens = model.pos_embed
org_num_frames = 16
T = org_num_frames // args.tubelet_size
P = int((pos_tokens.shape[1] // T)**0.5)
C = pos_tokens.shape[2]
new_P = args.input_size // patch_size[0]
# B, L, C -> BT, H, W, C -> BT, C, H, W
pos_tokens = pos_tokens.reshape(-1, T, P, P, C)
pos_tokens = pos_tokens.reshape(-1, P, P, C).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_P, new_P),
mode='bicubic',
align_corners=False)
# BT, C, H, W -> BT, H, W, C -> B, T, H, W, C
pos_tokens = pos_tokens.permute(0, 2, 3,
1).reshape(-1, T, new_P, new_P, C)
pos_tokens = pos_tokens.flatten(1, 3) # B, L, C
model.pos_embed = pos_tokens # update
if args.num_frames != 16:
org_num_frames = 16
T = org_num_frames // args.tubelet_size
pos_tokens = model.pos_embed
new_T = args.num_frames // args.tubelet_size
P = int((pos_tokens.shape[1] // T)**0.5)
C = pos_tokens.shape[2]
pos_tokens = pos_tokens.reshape(-1, T, P, P, C)
pos_tokens = pos_tokens.permute(0, 2, 3, 4,
1).reshape(-1, C, T) # BHW,C,T
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=new_T, mode='linear')
pos_tokens = pos_tokens.reshape(1, P, P, C,
new_T).permute(0, 4, 1, 2, 3)
pos_tokens = pos_tokens.flatten(1, 3)
model.pos_embed = pos_tokens # update
utils.load_state_dict(
model, checkpoint_model, prefix=args.model_prefix)
default_kernel_sizes = [[3, 3, 3], [13, 13, 13], [13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3], [11, 11, 11]]
model = UniRepLKNet(num_classes=400, depths=[3, 3, 27, 3], dims=[128,256,512,1024], drop_path_rate=0.4,
kernel_sizes=default_kernel_sizes,
custom_set='nolk', disable_iGEMM=True)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
print("Using EMA with decay = %.8f" % args.model_ema_decay)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters()
if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
total_batch_size = args.batch_size * args.update_freq * num_tasks
num_training_steps_per_epoch = len(dataset_train) // total_batch_size
args.lr = args.lr * total_batch_size / 256
#########scale the lr#############
args.min_lr = args.min_lr * total_batch_size / 256
args.warmup_lr = args.warmup_lr * total_batch_size / 256
#########scale the lr#############
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Update frequent = %d" % args.update_freq)
print("Number of training examples = %d" % len(dataset_train))
print("Number of training training per epoch = %d" %
num_training_steps_per_epoch)
# num_layers = model_without_ddp.get_num_layers()
# num_layers = 13
# if args.layer_decay < 1.0:
# assigner = LayerDecayValueAssigner(
# list(args.layer_decay**(num_layers + 1 - i)
# for i in range(num_layers + 2)))
# else:
# assigner = None
# if assigner is not None:
# print("Assigned values = %s" % str(assigner.values))
# if args.layer_decay < 1.0 or args.layer_decay > 1.0:
# num_layers = 12
# # set lower learning rate for lower-level layers.
# # follow the implementation in the code of ConvNeXt and BeiT
# assigner = RepLKNetLayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
# else:
assigner = None
if assigner is None and (args.layer_decay < 1.0 or args.layer_decay > 1.0):
num_layers = 12 # convnext layers divided into 12 parts, each with a different decayed lr value.
assigner = LayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
else:
assigner = None
if assigner is not None:
print("Assigned values = %s" % str(assigner.values))
| # --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
# NOTE: Do not comment `import models`, it is used to register models
def get_args():
parser = argparse.ArgumentParser(
'VideoMAE fine-tuning and evaluation script for action classification',
add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=30, type=int)
parser.add_argument('--update_freq', default=1, type=int)
parser.add_argument('--save_ckpt_freq', default=100, type=int)
# Model parameters
parser.add_argument(
'--model',
default='vit_base_patch16_224',
type=str,
metavar='MODEL',
help='Name of model to train')
parser.add_argument('--tubelet_size', type=int, default=2)
parser.add_argument(
'--input_size', default=224, type=int, help='images input size')
parser.add_argument(
'--with_checkpoint', action='store_true', default=False)
parser.add_argument(
'--drop',
type=float,
default=0.0,
metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument(
'--attn_drop_rate',
type=float,
default=0.0,
metavar='PCT',
help='Attention dropout rate (default: 0.)')
parser.add_argument(
'--drop_path',
type=float,
default=0.1,
metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument(
'--head_drop_rate',
type=float,
default=0.0,
metavar='PCT',
help='cls head dropout rate (default: 0.)')
parser.add_argument(
'--disable_eval_during_finetuning', action='store_true', default=False)
parser.add_argument('--model_ema', action='store_true', default=False)
parser.add_argument(
'--model_ema_decay', type=float, default=0.9999, help='')
parser.add_argument(
'--model_ema_force_cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument(
'--opt',
default='adamw',
type=str,
metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument(
'--opt_eps',
default=1e-8,
type=float,
metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument(
'--opt_betas',
default=None,
type=float,
nargs='+',
metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument(
'--clip_grad',
type=float,
default=None,
metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument(
'--momentum',
type=float,
default=0.9,
metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument(
'--weight_decay',
type=float,
default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument(
'--weight_decay_end',
type=float,
default=None,
help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument(
'--lr',
type=float,
default=1e-3,
metavar='LR',
help='learning rate (default: 1e-3)')
parser.add_argument('--layer_decay', type=float, default=0.75)
parser.add_argument(
'--warmup_lr',
type=float,
default=1e-8,
metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument(
'--min_lr',
type=float,
default=1e-6,
metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument(
'--warmup_epochs',
type=int,
default=5,
metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument(
'--warmup_steps',
type=int,
default=-1,
metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0'
)
# Augmentation parameters
parser.add_argument(
'--color_jitter',
type=float,
default=0.4,
metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument(
'--num_sample', type=int, default=2, help='Repeated_aug (default: 2)')
parser.add_argument(
'--aa',
type=str,
default='rand-m7-n4-mstd0.5-inc1',
metavar='NAME',
help=
'Use AutoAugment policy. "v0" or "original". " + "(default: rand-m7-n4-mstd0.5-inc1)'
),
parser.add_argument(
'--smoothing',
type=float,
default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument(
'--train_interpolation',
type=str,
default='bicubic',
help=
'Training interpolation (random, bilinear, bicubic default: "bicubic")'
)
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=None)
parser.add_argument('--short_side_size', type=int, default=224)
parser.add_argument('--test_num_segment', type=int, default=10)
parser.add_argument('--test_num_crop', type=int, default=3)
# * Random Erase params
parser.add_argument(
'--reprob',
type=float,
default=0.25,
metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument(
'--remode',
type=str,
default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument(
'--recount',
type=int,
default=1,
help='Random erase count (default: 1)')
parser.add_argument(
'--resplit',
action='store_true',
default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument(
'--mixup',
type=float,
default=0.8,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument(
'--cutmix',
type=float,
default=1.0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument(
'--cutmix_minmax',
type=float,
nargs='+',
default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set')
parser.add_argument(
'--mixup_prob',
type=float,
default=1.0,
help=
'Probability of performing mixup or cutmix when either/both is enabled'
)
parser.add_argument(
'--mixup_switch_prob',
type=float,
default=0.5,
help=
'Probability of switching to cutmix when both mixup and cutmix enabled'
)
parser.add_argument(
'--mixup_mode',
type=str,
default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"'
)
# * Finetuning params
parser.add_argument(
'--finetune', default='', help='finetune from checkpoint')
parser.add_argument('--model_key', default='model|module', type=str)
parser.add_argument('--model_prefix', default='', type=str)
parser.add_argument('--init_scale', default=0.001, type=float)
parser.add_argument('--use_mean_pooling', action='store_true')
parser.set_defaults(use_mean_pooling=True)
parser.add_argument(
'--use_cls', action='store_false', dest='use_mean_pooling')
# Dataset parameters
parser.add_argument(
'--data_path',
default='/your/data/path/',
type=str,
help='dataset path')
parser.add_argument(
'--data_root', default='', type=str, help='dataset path root')
parser.add_argument(
'--eval_data_path',
default=None,
type=str,
help='dataset path for evaluation')
parser.add_argument(
'--nb_classes',
default=400,
type=int,
help='number of the classification types')
parser.add_argument(
'--imagenet_default_mean_and_std', default=True, action='store_true')
parser.add_argument('--num_segments', type=int, default=1)
parser.add_argument('--num_frames', type=int, default=16)
parser.add_argument('--sampling_rate', type=int, default=4)
parser.add_argument('--sparse_sample', default=False, action='store_true')
parser.add_argument(
'--data_set',
default='Kinetics-400',
choices=[
'Kinetics-400', 'Kinetics-600', 'Kinetics-700', 'SSV2', 'UCF101',
'HMDB51', 'Diving48', 'Kinetics-710', 'MIT'
],
type=str,
help='dataset')
parser.add_argument(
'--fname_tmpl',
default='img_{:05}.jpg',
type=str,
help='filename_tmpl for rawframe dataset')
parser.add_argument(
'--start_idx',
default=1,
type=int,
help='start_idx for rwaframe dataset')
parser.add_argument(
'--output_dir',
default='',
help='path where to save, empty for no saving')
parser.add_argument(
'--log_dir', default=None, help='path where to tensorboard log')
parser.add_argument(
'--device',
default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument(
'--no_auto_resume', action='store_false', dest='auto_resume')
parser.set_defaults(auto_resume=True)
parser.add_argument('--save_ckpt', action='store_true')
parser.add_argument(
'--no_save_ckpt', action='store_false', dest='save_ckpt')
parser.set_defaults(save_ckpt=True)
parser.add_argument(
'--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument(
'--eval', action='store_true', help='Perform evaluation only')
parser.add_argument(
'--validation', action='store_true', help='Perform validation only')
parser.add_argument(
'--dist_eval',
action='store_true',
default=False,
help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument(
'--pin_mem',
action='store_true',
help=
'Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.'
)
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument(
'--world_size',
default=1,
type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument(
'--dist_url',
default='env://',
help='url used to set up distributed training')
parser.add_argument(
'--enable_deepspeed', action='store_true', default=False)
known_args, _ = parser.parse_known_args()
if known_args.enable_deepspeed:
parser = deepspeed.add_config_arguments(parser)
ds_init = deepspeed.initialize
else:
ds_init = None
return parser.parse_args(), ds_init
def main(args, ds_init):
utils.init_distributed_mode(args)
if ds_init is not None:
utils.create_ds_config(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(
is_train=True, test_mode=False, args=args)
if args.disable_eval_during_finetuning:
dataset_val = None
else:
dataset_val, _ = build_dataset(
is_train=False, test_mode=False, args=args)
dataset_test, _ = build_dataset(is_train=False, test_mode=True, args=args)
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print(
'Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val,
num_replicas=num_tasks,
rank=global_rank,
shuffle=False)
sampler_test = torch.utils.data.DistributedSampler(
dataset_test,
num_replicas=num_tasks,
rank=global_rank,
shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
if args.num_sample > 1:
collate_func = partial(multiple_samples_collate, fold=False)
else:
collate_func = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
collate_fn=collate_func,
persistent_workers=True)
if dataset_val is not None:
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
sampler=sampler_val,
# batch_size=int(1.5 * args.batch_size),
batch_size=int(args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False,
persistent_workers=True)
else:
data_loader_val = None
if dataset_test is not None:
data_loader_test = torch.utils.data.DataLoader(
dataset_test,
sampler=sampler_test,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False,
persistent_workers=True)
else:
data_loader_test = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup,
cutmix_alpha=args.cutmix,
cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob,
switch_prob=args.mixup_switch_prob,
mode=args.mixup_mode,
label_smoothing=args.smoothing,
num_classes=args.nb_classes)
model = create_model(
args.model,
img_size=args.input_size,
pretrained=False,
num_classes=args.nb_classes,
all_frames=args.num_frames * args.num_segments,
tubelet_size=args.tubelet_size,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
attn_drop_rate=args.attn_drop_rate,
head_drop_rate=args.head_drop_rate,
drop_block_rate=None,
use_mean_pooling=args.use_mean_pooling,
init_scale=args.init_scale,
with_cp=args.with_checkpoint,
)
patch_size = model.patch_embed.patch_size
print("Patch size = %s" % str(patch_size))
args.window_size = (args.num_frames // args.tubelet_size,
args.input_size // patch_size[0],
args.input_size // patch_size[1])
args.patch_size = patch_size
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load ckpt from %s" % args.finetune)
checkpoint_model = None
for model_key in args.model_key.split('|'):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
for old_key in list(checkpoint_model.keys()):
if old_key.startswith('_orig_mod.'):
new_key = old_key[10:]
checkpoint_model[new_key] = checkpoint_model.pop(old_key)
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[
k].shape != state_dict[k].shape:
if checkpoint_model[k].shape[
0] == 710 and args.data_set.startswith('Kinetics'):
print(f'Convert K710 head to {args.data_set} head')
if args.data_set == 'Kinetics-400':
label_map_path = 'misc/label_710to400.json'
elif args.data_set == 'Kinetics-600':
label_map_path = 'misc/label_710to600.json'
elif args.data_set == 'Kinetics-700':
label_map_path = 'misc/label_710to700.json'
label_map = json.load(open(label_map_path))
checkpoint_model[k] = checkpoint_model[k][label_map]
else:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
all_keys = list(checkpoint_model.keys())
new_dict = OrderedDict()
for key in all_keys:
if key.startswith('backbone.'):
new_dict[key[9:]] = checkpoint_model[key]
elif key.startswith('encoder.'):
new_dict[key[8:]] = checkpoint_model[key]
else:
new_dict[key] = checkpoint_model[key]
checkpoint_model = new_dict
# interpolate position embedding
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1] # channel dim
num_patches = model.patch_embed.num_patches #
num_extra_tokens = model.pos_embed.shape[-2] - num_patches # 0/1
# height (== width) for the checkpoint position embedding
orig_size = int(
((pos_embed_checkpoint.shape[-2] - num_extra_tokens) //
(args.num_frames // model.patch_embed.tubelet_size))**0.5)
# height (== width) for the new position embedding
new_size = int(
(num_patches //
(args.num_frames // model.patch_embed.tubelet_size))**0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" %
(orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
# B, L, C -> BT, H, W, C -> BT, C, H, W
pos_tokens = pos_tokens.reshape(
-1, args.num_frames // model.patch_embed.tubelet_size,
orig_size, orig_size, embedding_size)
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size,
embedding_size).permute(
0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_size, new_size),
mode='bicubic',
align_corners=False)
# BT, C, H, W -> BT, H, W, C -> B, T, H, W, C
pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(
-1, args.num_frames // model.patch_embed.tubelet_size,
new_size, new_size, embedding_size)
pos_tokens = pos_tokens.flatten(1, 3) # B, L, C
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
elif args.input_size != 224:
pos_tokens = model.pos_embed
org_num_frames = 16
T = org_num_frames // args.tubelet_size
P = int((pos_tokens.shape[1] // T)**0.5)
C = pos_tokens.shape[2]
new_P = args.input_size // patch_size[0]
# B, L, C -> BT, H, W, C -> BT, C, H, W
pos_tokens = pos_tokens.reshape(-1, T, P, P, C)
pos_tokens = pos_tokens.reshape(-1, P, P, C).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_P, new_P),
mode='bicubic',
align_corners=False)
# BT, C, H, W -> BT, H, W, C -> B, T, H, W, C
pos_tokens = pos_tokens.permute(0, 2, 3,
1).reshape(-1, T, new_P, new_P, C)
pos_tokens = pos_tokens.flatten(1, 3) # B, L, C
model.pos_embed = pos_tokens # update
if args.num_frames != 16:
org_num_frames = 16
T = org_num_frames // args.tubelet_size
pos_tokens = model.pos_embed
new_T = args.num_frames // args.tubelet_size
P = int((pos_tokens.shape[1] // T)**0.5)
C = pos_tokens.shape[2]
pos_tokens = pos_tokens.reshape(-1, T, P, P, C)
pos_tokens = pos_tokens.permute(0, 2, 3, 4,
1).reshape(-1, C, T) # BHW,C,T
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=new_T, mode='linear')
pos_tokens = pos_tokens.reshape(1, P, P, C,
new_T).permute(0, 4, 1, 2, 3)
pos_tokens = pos_tokens.flatten(1, 3)
model.pos_embed = pos_tokens # update
utils.load_state_dict(
model, checkpoint_model, prefix=args.model_prefix)
default_kernel_sizes = [[3, 3, 3], [13, 13, 13], [13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3], [11, 11, 11]]
model = UniRepLKNet(num_classes=400, depths=[3, 3, 27, 3], dims=[128,256,512,1024], drop_path_rate=0.4,
kernel_sizes=default_kernel_sizes,
custom_set='nolk', disable_iGEMM=True)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
print("Using EMA with decay = %.8f" % args.model_ema_decay)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters()
if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
total_batch_size = args.batch_size * args.update_freq * num_tasks
num_training_steps_per_epoch = len(dataset_train) // total_batch_size
args.lr = args.lr * total_batch_size / 256
#########scale the lr#############
args.min_lr = args.min_lr * total_batch_size / 256
args.warmup_lr = args.warmup_lr * total_batch_size / 256
#########scale the lr#############
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Update frequent = %d" % args.update_freq)
print("Number of training examples = %d" % len(dataset_train))
print("Number of training training per epoch = %d" %
num_training_steps_per_epoch)
# num_layers = model_without_ddp.get_num_layers()
# num_layers = 13
# if args.layer_decay < 1.0:
# assigner = LayerDecayValueAssigner(
# list(args.layer_decay**(num_layers + 1 - i)
# for i in range(num_layers + 2)))
# else:
# assigner = None
# if assigner is not None:
# print("Assigned values = %s" % str(assigner.values))
# if args.layer_decay < 1.0 or args.layer_decay > 1.0:
# num_layers = 12
# # set lower learning rate for lower-level layers.
# # follow the implementation in the code of ConvNeXt and BeiT
# assigner = RepLKNetLayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
# else:
assigner = None
if assigner is None and (args.layer_decay < 1.0 or args.layer_decay > 1.0):
num_layers = 12 # convnext layers divided into 12 parts, each with a different decayed lr value.
assigner = LayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
else:
assigner = None
if assigner is not None:
print("Assigned values = %s" % str(assigner.values))
| optimizer = create_optimizer( | 1 | 2023-11-24 07:28:22+00:00 | 12k |
wenquanlu/HandRefiner | ldm/models/diffusion/ddpm.py | [
{
"identifier": "log_txt_as_img",
"path": "ldm/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts"
},
{
"identifier": "exists",
"path": "ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
},
{
"identifier": "default",
"path": "ldm/util.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "ismap",
"path": "ldm/util.py",
"snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)"
},
{
"identifier": "isimage",
"path": "ldm/util.py",
"snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)"
},
{
"identifier": "mean_flat",
"path": "ldm/util.py",
"snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))"
},
{
"identifier": "count_params",
"path": "ldm/util.py",
"snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params"
},
{
"identifier": "instantiate_from_config",
"path": "ldm/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
},
{
"identifier": "LitEma",
"path": "ldm/modules/ema.py",
"snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)"
},
{
"identifier": "normal_kl",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )"
},
{
"identifier": "DiagonalGaussianDistribution",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean"
},
{
"identifier": "IdentityFirstStage",
"path": "ldm/models/autoencoder.py",
"snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x"
},
{
"identifier": "AutoencoderKL",
"path": "ldm/models/autoencoder.py",
"snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x"
},
{
"identifier": "make_beta_schedule",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()"
},
{
"identifier": "extract_into_tensor",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))"
},
{
"identifier": "noise_like",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
},
{
"identifier": "DDIMSampler",
"path": "ldm/models/diffusion/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * (1. - mask) + mask * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec"
}
] | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import itertools
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler | 10,230 | https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
if monitor is not None:
self.monitor = monitor
self.make_it_fit = make_it_fit
if reset_ema: assert exists(ckpt_path)
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
if reset_ema:
assert self.use_ema
print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
self.model_ema = LitEma(self.model)
if reset_num_ema_updates:
print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
assert self.use_ema
self.model_ema.reset_num_updates()
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
self.loss_type = loss_type
self.learn_logvar = learn_logvar
logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
if self.learn_logvar:
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
else:
self.register_buffer('logvar', logvar)
self.ucg_training = ucg_training or dict()
if self.ucg_training:
self.ucg_prng = np.random.RandomState()
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if exists(given_betas):
betas = given_betas
else:
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
if monitor is not None:
self.monitor = monitor
self.make_it_fit = make_it_fit
if reset_ema: assert exists(ckpt_path)
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
if reset_ema:
assert self.use_ema
print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
self.model_ema = LitEma(self.model)
if reset_num_ema_updates:
print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
assert self.use_ema
self.model_ema.reset_num_updates()
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
self.loss_type = loss_type
self.learn_logvar = learn_logvar
logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
if self.learn_logvar:
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
else:
self.register_buffer('logvar', logvar)
self.ucg_training = ucg_training or dict()
if self.ucg_training:
self.ucg_prng = np.random.RandomState()
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if exists(given_betas):
betas = given_betas
else: | betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, | 13 | 2023-11-24 10:19:23+00:00 | 12k |
VITA-Group/LightGaussian | gaussian_renderer/gaussian_count.py | [
{
"identifier": "GaussianModel",
"path": "scene/gaussian_model.py",
"snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)\n actual_covariance = L @ L.transpose(1, 2)\n symm = strip_symmetric(actual_covariance)\n return symm\n\n self.scaling_activation = torch.exp\n self.scaling_inverse_activation = torch.log\n\n self.covariance_activation = build_covariance_from_scaling_rotation\n\n self.opacity_activation = torch.sigmoid\n self.inverse_opacity_activation = inverse_sigmoid\n\n self.rotation_activation = torch.nn.functional.normalize\n\n def __init__(self, sh_degree: int):\n self.active_sh_degree = 0\n self.max_sh_degree = sh_degree\n self._xyz = torch.empty(0)\n self._features_dc = torch.empty(0)\n self._features_rest = torch.empty(0)\n self._scaling = torch.empty(0)\n self._rotation = torch.empty(0)\n self._opacity = torch.empty(0)\n self.max_radii2D = torch.empty(0)\n self.xyz_gradient_accum = torch.empty(0) # empty or frezze\n self.denom = torch.empty(0)\n self.optimizer = None\n self.percent_dense = 0\n self.spatial_lr_scale = 0\n self.setup_functions()\n\n def capture(self):\n return (\n self.active_sh_degree,\n self._xyz,\n self._features_dc,\n self._features_rest,\n self._scaling,\n self._rotation,\n self._opacity,\n self.max_radii2D,\n self.xyz_gradient_accum,\n self.denom,\n self.optimizer.state_dict(),\n self.spatial_lr_scale,\n )\n\n def restore(self, model_args, training_args):\n (\n self.active_sh_degree,\n self._xyz,\n self._features_dc,\n self._features_rest,\n self._scaling,\n self._rotation,\n self._opacity,\n self.max_radii2D,\n xyz_gradient_accum,\n denom,\n opt_dict,\n self.spatial_lr_scale,\n ) = model_args\n self.training_setup(training_args)\n self.xyz_gradient_accum = xyz_gradient_accum\n self.denom = denom\n self.optimizer.load_state_dict(opt_dict)\n\n @property\n def get_scaling(self):\n return self.scaling_activation(self._scaling)\n\n @property\n def get_rotation(self):\n return self.rotation_activation(self._rotation)\n\n @property\n def get_xyz(self):\n return self._xyz\n\n @property\n def get_features(self):\n features_dc = self._features_dc\n features_rest = self._features_rest\n return torch.cat((features_dc, features_rest), dim=1)\n\n @property\n def get_opacity(self):\n return self.opacity_activation(self._opacity)\n\n def get_covariance(self, scaling_modifier=1):\n return self.covariance_activation(\n self.get_scaling, scaling_modifier, self._rotation\n )\n\n def oneupSHdegree(self):\n if self.active_sh_degree < self.max_sh_degree:\n self.active_sh_degree += 1\n\n def create_from_pcd(self, pcd: BasicPointCloud, spatial_lr_scale: float):\n self.spatial_lr_scale = spatial_lr_scale\n fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda()\n fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors)).float().cuda())\n features = (\n torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2))\n .float()\n .cuda()\n )\n features[:, :3, 0] = fused_color\n features[:, 3:, 1:] = 0.0\n\n print(\"Number of points at initialisation : \", fused_point_cloud.shape[0])\n\n dist2 = torch.clamp_min(\n distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()),\n 0.0000001,\n )\n scales = torch.log(torch.sqrt(dist2))[..., None].repeat(1, 3)\n rots = torch.zeros((fused_point_cloud.shape[0], 4), device=\"cuda\")\n rots[:, 0] = 1\n\n opacities = inverse_sigmoid(\n 0.1\n * torch.ones(\n (fused_point_cloud.shape[0], 1), dtype=torch.float, device=\"cuda\"\n )\n )\n\n self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True))\n self._features_dc = nn.Parameter(\n features[:, :, 0:1].transpose(1, 2).contiguous().requires_grad_(True)\n )\n self._features_rest = nn.Parameter(\n features[:, :, 1:].transpose(1, 2).contiguous().requires_grad_(True)\n )\n self._scaling = nn.Parameter(scales.requires_grad_(True))\n self._rotation = nn.Parameter(rots.requires_grad_(True))\n self._opacity = nn.Parameter(opacities.requires_grad_(True))\n self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device=\"cuda\")\n\n def training_setup(self, training_args):\n self.percent_dense = training_args.percent_dense\n self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device=\"cuda\")\n self.denom = torch.zeros((self.get_xyz.shape[0], 1), device=\"cuda\")\n\n l = [\n {\n \"params\": [self._xyz],\n \"lr\": training_args.position_lr_init * self.spatial_lr_scale,\n \"name\": \"xyz\",\n },\n {\n \"params\": [self._features_dc],\n \"lr\": training_args.feature_lr,\n \"name\": \"f_dc\",\n },\n {\n \"params\": [self._features_rest],\n \"lr\": training_args.feature_lr / 20.0,\n \"name\": \"f_rest\",\n },\n {\n \"params\": [self._opacity],\n \"lr\": training_args.opacity_lr,\n \"name\": \"opacity\",\n },\n {\n \"params\": [self._scaling],\n \"lr\": training_args.scaling_lr,\n \"name\": \"scaling\",\n },\n {\n \"params\": [self._rotation],\n \"lr\": training_args.rotation_lr,\n \"name\": \"rotation\",\n },\n ]\n\n self.optimizer = torch.optim.AdamW(l, lr=0.0, eps=1e-15)\n self.xyz_scheduler_args = get_expon_lr_func(\n lr_init=training_args.position_lr_init * self.spatial_lr_scale,\n lr_final=training_args.position_lr_final * self.spatial_lr_scale,\n lr_delay_mult=training_args.position_lr_delay_mult,\n max_steps=training_args.position_lr_max_steps,\n )\n\n def update_learning_rate(self, iteration):\n \"\"\"Learning rate scheduling per step\"\"\"\n for param_group in self.optimizer.param_groups:\n if param_group[\"name\"] == \"xyz\":\n lr = self.xyz_scheduler_args(iteration)\n param_group[\"lr\"] = lr\n return lr\n\n def construct_list_of_attributes(self):\n l = [\"x\", \"y\", \"z\", \"nx\", \"ny\", \"nz\"]\n # All channels except the 3 DC\n for i in range(self._features_dc.shape[1] * self._features_dc.shape[2]):\n l.append(\"f_dc_{}\".format(i))\n for i in range(self._features_rest.shape[1] * self._features_rest.shape[2]):\n l.append(\"f_rest_{}\".format(i))\n l.append(\"opacity\")\n for i in range(self._scaling.shape[1]):\n l.append(\"scale_{}\".format(i))\n for i in range(self._rotation.shape[1]):\n l.append(\"rot_{}\".format(i))\n return l\n\n def construct_list_of_compress_attributes(self):\n l = [\"x\", \"y\", \"z\", \"nx\", \"ny\", \"nz\"]\n # All channels except the 3 DC\n for i in range(self._features_dc.shape[1] * self._features_dc.shape[2]):\n l.append(\"f_dc_{}\".format(i))\n for i in range(self.centroids.shape[1]):\n l.append(\"centroids_{}\".format(i))\n for i in range(self.idx.shape[1]):\n l.append(\"idx_{}\".format(i))\n l.append(\"opacity\")\n for i in range(self._scaling.shape[1]):\n l.append(\"scale_{}\".format(i))\n for i in range(self._rotation.shape[1]):\n l.append(\"rot_{}\".format(i))\n\n return l\n\n def save_ply(self, path):\n mkdir_p(os.path.dirname(path))\n xyz = self._xyz.detach().cpu().numpy()\n normals = np.zeros_like(xyz)\n f_dc = (\n self._features_dc.detach()\n .transpose(1, 2)\n .flatten(start_dim=1)\n .contiguous()\n .cpu()\n .numpy()\n )\n f_rest = (\n self._features_rest.detach()\n .transpose(1, 2)\n .flatten(start_dim=1)\n .contiguous()\n .cpu()\n .numpy()\n )\n opacities = self._opacity.detach().cpu().numpy()\n scale = self._scaling.detach().cpu().numpy()\n rotation = self._rotation.detach().cpu().numpy()\n dtype_full = [\n (attribute, \"f4\") for attribute in self.construct_list_of_attributes()\n ]\n elements = np.empty(xyz.shape[0], dtype=dtype_full)\n attributes = np.concatenate(\n (xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1\n )\n elements[:] = list(map(tuple, attributes))\n el = PlyElement.describe(elements, \"vertex\")\n PlyData([el]).write(path)\n\n def save_compress(self, path):\n mkdir_p(os.path.dirname(path))\n xyz = self._xyz.detach().cpu().numpy()\n normals = np.zeros_like(xyz)\n f_dc = (\n self._features_dc.detach()\n .transpose(1, 2)\n .flatten(start_dim=1)\n .contiguous()\n .cpu()\n .numpy()\n )\n # f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()\n opacities = self._opacity.detach().cpu().numpy()\n scale = self._scaling.detach().cpu().numpy()\n rotation = self._rotation.detach().cpu().numpy()\n centroids = self.centroids\n idx = self.idx\n dtype_full = [\n (attribute, \"f4\")\n for attribute in self.construct_list_of_compress_attributes()\n ]\n elements = np.empty(xyz.shape[0], dtype=dtype_full)\n attributes = np.concatenate(\n (xyz, normals, f_dc, centroids, idx, opacities, scale, rotation), axis=1\n )\n elements[:] = list(map(tuple, attributes))\n el = PlyElement.describe(elements, \"vertex\")\n PlyData([el]).write(path)\n\n def reset_opacity(self):\n opacities_new = inverse_sigmoid(\n torch.min(self.get_opacity, torch.ones_like(self.get_opacity) * 0.01)\n )\n optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, \"opacity\")\n self._opacity = optimizable_tensors[\"opacity\"]\n\n # Kevin defined function\n def load_ply_sh(self, path, new_sh):\n plydata = PlyData.read(path)\n xyz = np.stack(\n (\n np.asarray(plydata.elements[0][\"x\"]),\n np.asarray(plydata.elements[0][\"y\"]),\n np.asarray(plydata.elements[0][\"z\"]),\n ),\n axis=1,\n )\n opacities = np.asarray(plydata.elements[0][\"opacity\"])[..., np.newaxis]\n\n features_dc = np.zeros((xyz.shape[0], 3, 1))\n features_dc[:, 0, 0] = np.asarray(plydata.elements[0][\"f_dc_0\"])\n features_dc[:, 1, 0] = np.asarray(plydata.elements[0][\"f_dc_1\"])\n features_dc[:, 2, 0] = np.asarray(plydata.elements[0][\"f_dc_2\"])\n\n extra_f_names = [\n p.name\n for p in plydata.elements[0].properties\n if p.name.startswith(\"f_rest_\")\n ]\n extra_f_names = sorted(extra_f_names, key=lambda x: int(x.split(\"_\")[-1]))\n # assert len(extra_f_names)==3*(self.max_sh_degree + 1) ** 2 - 3\n if new_sh > self.max_sh_degree:\n raise ValueError(\n \"Requested max_sh_degree is greater than available in data.\"\n )\n num_coeffs_to_keep = (new_sh + 1) ** 2\n features_extra = np.zeros((xyz.shape[0], len(extra_f_names)))\n for idx, attr_name in enumerate(extra_f_names):\n features_extra[:, idx] = np.asarray(plydata.elements[0][attr_name])\n # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC)\n features_extra = features_extra.reshape(\n (features_extra.shape[0], 3, (self.max_sh_degree + 1) ** 2 - 1)\n )\n features_extra = features_extra[:, :, : num_coeffs_to_keep - 1]\n scale_names = [\n p.name\n for p in plydata.elements[0].properties\n if p.name.startswith(\"scale_\")\n ]\n scale_names = sorted(scale_names, key=lambda x: int(x.split(\"_\")[-1]))\n scales = np.zeros((xyz.shape[0], len(scale_names)))\n for idx, attr_name in enumerate(scale_names):\n scales[:, idx] = np.asarray(plydata.elements[0][attr_name])\n\n rot_names = [\n p.name for p in plydata.elements[0].properties if p.name.startswith(\"rot\")\n ]\n rot_names = sorted(rot_names, key=lambda x: int(x.split(\"_\")[-1]))\n rots = np.zeros((xyz.shape[0], len(rot_names)))\n for idx, attr_name in enumerate(rot_names):\n rots[:, idx] = np.asarray(plydata.elements[0][attr_name])\n\n self._xyz = nn.Parameter(\n torch.tensor(xyz, dtype=torch.float, device=\"cuda\").requires_grad_(True)\n )\n self._features_dc = nn.Parameter(\n torch.tensor(features_dc, dtype=torch.float, device=\"cuda\")\n .transpose(1, 2)\n .contiguous()\n .requires_grad_(True)\n )\n self._features_rest = nn.Parameter(\n torch.tensor(features_extra, dtype=torch.float, device=\"cuda\")\n .transpose(1, 2)\n .contiguous()\n .requires_grad_(True)\n )\n self._opacity = nn.Parameter(\n torch.tensor(opacities, dtype=torch.float, device=\"cuda\").requires_grad_(\n True\n )\n )\n self._scaling = nn.Parameter(\n torch.tensor(scales, dtype=torch.float, device=\"cuda\").requires_grad_(True)\n )\n self._rotation = nn.Parameter(\n torch.tensor(rots, dtype=torch.float, device=\"cuda\").requires_grad_(True)\n )\n self.active_sh_degree = new_sh\n\n def load_ply(self, path):\n plydata = PlyData.read(path)\n\n xyz = np.stack(\n (\n np.asarray(plydata.elements[0][\"x\"]),\n np.asarray(plydata.elements[0][\"y\"]),\n np.asarray(plydata.elements[0][\"z\"]),\n ),\n axis=1,\n )\n opacities = np.asarray(plydata.elements[0][\"opacity\"])[..., np.newaxis]\n\n features_dc = np.zeros((xyz.shape[0], 3, 1))\n features_dc[:, 0, 0] = np.asarray(plydata.elements[0][\"f_dc_0\"])\n features_dc[:, 1, 0] = np.asarray(plydata.elements[0][\"f_dc_1\"])\n features_dc[:, 2, 0] = np.asarray(plydata.elements[0][\"f_dc_2\"])\n\n extra_f_names = [\n p.name\n for p in plydata.elements[0].properties\n if p.name.startswith(\"f_rest_\")\n ]\n extra_f_names = sorted(extra_f_names, key=lambda x: int(x.split(\"_\")[-1]))\n ic(self.max_sh_degree)\n ic(3 * (self.max_sh_degree + 1) ** 2 - 3)\n # ic(extra_f_names)\n assert len(extra_f_names) == 3 * (self.max_sh_degree + 1) ** 2 - 3\n features_extra = np.zeros((xyz.shape[0], len(extra_f_names)))\n for idx, attr_name in enumerate(extra_f_names):\n features_extra[:, idx] = np.asarray(plydata.elements[0][attr_name])\n # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC)\n features_extra = features_extra.reshape(\n (features_extra.shape[0], 3, (self.max_sh_degree + 1) ** 2 - 1)\n )\n\n scale_names = [\n p.name\n for p in plydata.elements[0].properties\n if p.name.startswith(\"scale_\")\n ]\n scale_names = sorted(scale_names, key=lambda x: int(x.split(\"_\")[-1]))\n scales = np.zeros((xyz.shape[0], len(scale_names)))\n for idx, attr_name in enumerate(scale_names):\n scales[:, idx] = np.asarray(plydata.elements[0][attr_name])\n\n rot_names = [\n p.name for p in plydata.elements[0].properties if p.name.startswith(\"rot\")\n ]\n rot_names = sorted(rot_names, key=lambda x: int(x.split(\"_\")[-1]))\n rots = np.zeros((xyz.shape[0], len(rot_names)))\n for idx, attr_name in enumerate(rot_names):\n rots[:, idx] = np.asarray(plydata.elements[0][attr_name])\n\n self._xyz = nn.Parameter(\n torch.tensor(xyz, dtype=torch.float, device=\"cuda\").requires_grad_(True)\n )\n self._features_dc = nn.Parameter(\n torch.tensor(features_dc, dtype=torch.float, device=\"cuda\")\n .transpose(1, 2)\n .contiguous()\n .requires_grad_(True)\n )\n self._features_rest = nn.Parameter(\n torch.tensor(features_extra, dtype=torch.float, device=\"cuda\")\n .transpose(1, 2)\n .contiguous()\n .requires_grad_(True)\n )\n self._opacity = nn.Parameter(\n torch.tensor(opacities, dtype=torch.float, device=\"cuda\").requires_grad_(\n True\n )\n )\n self._scaling = nn.Parameter(\n torch.tensor(scales, dtype=torch.float, device=\"cuda\").requires_grad_(True)\n )\n self._rotation = nn.Parameter(\n torch.tensor(rots, dtype=torch.float, device=\"cuda\").requires_grad_(True)\n )\n\n self.active_sh_degree = self.max_sh_degree\n\n def replace_tensor_to_optimizer(self, tensor, name):\n optimizable_tensors = {}\n for group in self.optimizer.param_groups:\n if group[\"name\"] == name:\n stored_state = self.optimizer.state.get(group[\"params\"][0], None)\n stored_state[\"exp_avg\"] = torch.zeros_like(tensor)\n stored_state[\"exp_avg_sq\"] = torch.zeros_like(tensor)\n\n del self.optimizer.state[group[\"params\"][0]]\n group[\"params\"][0] = nn.Parameter(tensor.requires_grad_(True))\n self.optimizer.state[group[\"params\"][0]] = stored_state\n\n optimizable_tensors[group[\"name\"]] = group[\"params\"][0]\n return optimizable_tensors\n\n def _prune_optimizer(self, mask):\n optimizable_tensors = {}\n for group in self.optimizer.param_groups:\n stored_state = self.optimizer.state.get(group[\"params\"][0], None)\n if stored_state is not None:\n stored_state[\"exp_avg\"] = stored_state[\"exp_avg\"][mask]\n stored_state[\"exp_avg_sq\"] = stored_state[\"exp_avg_sq\"][mask]\n\n del self.optimizer.state[group[\"params\"][0]]\n group[\"params\"][0] = nn.Parameter(\n (group[\"params\"][0][mask].requires_grad_(True))\n )\n self.optimizer.state[group[\"params\"][0]] = stored_state\n\n optimizable_tensors[group[\"name\"]] = group[\"params\"][0]\n else:\n group[\"params\"][0] = nn.Parameter(\n group[\"params\"][0][mask].requires_grad_(True)\n )\n optimizable_tensors[group[\"name\"]] = group[\"params\"][0]\n return optimizable_tensors\n\n def prune_points(self, mask):\n valid_points_mask = ~mask\n optimizable_tensors = self._prune_optimizer(valid_points_mask)\n\n self._xyz = optimizable_tensors[\"xyz\"]\n self._features_dc = optimizable_tensors[\"f_dc\"]\n self._features_rest = optimizable_tensors[\"f_rest\"]\n self._opacity = optimizable_tensors[\"opacity\"]\n self._scaling = optimizable_tensors[\"scaling\"]\n self._rotation = optimizable_tensors[\"rotation\"]\n\n self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask]\n\n self.denom = self.denom[valid_points_mask]\n self.max_radii2D = self.max_radii2D[valid_points_mask]\n\n def cat_tensors_to_optimizer(self, tensors_dict):\n optimizable_tensors = {}\n for group in self.optimizer.param_groups:\n assert len(group[\"params\"]) == 1\n extension_tensor = tensors_dict[group[\"name\"]]\n stored_state = self.optimizer.state.get(group[\"params\"][0], None)\n if stored_state is not None:\n stored_state[\"exp_avg\"] = torch.cat(\n (stored_state[\"exp_avg\"], torch.zeros_like(extension_tensor)), dim=0\n )\n stored_state[\"exp_avg_sq\"] = torch.cat(\n (stored_state[\"exp_avg_sq\"], torch.zeros_like(extension_tensor)),\n dim=0,\n )\n\n del self.optimizer.state[group[\"params\"][0]]\n group[\"params\"][0] = nn.Parameter(\n torch.cat(\n (group[\"params\"][0], extension_tensor), dim=0\n ).requires_grad_(True)\n )\n self.optimizer.state[group[\"params\"][0]] = stored_state\n\n optimizable_tensors[group[\"name\"]] = group[\"params\"][0]\n else:\n group[\"params\"][0] = nn.Parameter(\n torch.cat(\n (group[\"params\"][0], extension_tensor), dim=0\n ).requires_grad_(True)\n )\n optimizable_tensors[group[\"name\"]] = group[\"params\"][0]\n\n return optimizable_tensors\n\n def densification_postfix(\n self,\n new_xyz,\n new_features_dc,\n new_features_rest,\n new_opacities,\n new_scaling,\n new_rotation,\n ):\n d = {\n \"xyz\": new_xyz,\n \"f_dc\": new_features_dc,\n \"f_rest\": new_features_rest,\n \"opacity\": new_opacities,\n \"scaling\": new_scaling,\n \"rotation\": new_rotation,\n }\n\n optimizable_tensors = self.cat_tensors_to_optimizer(d)\n self._xyz = optimizable_tensors[\"xyz\"]\n self._features_dc = optimizable_tensors[\"f_dc\"]\n self._features_rest = optimizable_tensors[\"f_rest\"]\n self._opacity = optimizable_tensors[\"opacity\"]\n self._scaling = optimizable_tensors[\"scaling\"]\n self._rotation = optimizable_tensors[\"rotation\"]\n\n self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device=\"cuda\")\n self.denom = torch.zeros((self.get_xyz.shape[0], 1), device=\"cuda\")\n self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device=\"cuda\")\n\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n n_init_points = self.get_xyz.shape[0]\n # Extract points that satisfy the gradient condition\n padded_grad = torch.zeros((n_init_points), device=\"cuda\")\n padded_grad[: grads.shape[0]] = grads.squeeze()\n selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)\n selected_pts_mask = torch.logical_and(\n selected_pts_mask,\n torch.max(self.get_scaling, dim=1).values\n > self.percent_dense * scene_extent,\n )\n\n stds = self.get_scaling[selected_pts_mask].repeat(N, 1)\n means = torch.zeros((stds.size(0), 3), device=\"cuda\")\n samples = torch.normal(mean=means, std=stds)\n rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N, 1, 1)\n new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[\n selected_pts_mask\n ].repeat(N, 1)\n new_scaling = self.scaling_inverse_activation(\n self.get_scaling[selected_pts_mask].repeat(N, 1) / (0.8 * N)\n )\n new_rotation = self._rotation[selected_pts_mask].repeat(N, 1)\n new_features_dc = self._features_dc[selected_pts_mask].repeat(N, 1, 1)\n new_features_rest = self._features_rest[selected_pts_mask].repeat(N, 1, 1)\n new_opacity = self._opacity[selected_pts_mask].repeat(N, 1)\n\n self.densification_postfix(\n new_xyz,\n new_features_dc,\n new_features_rest,\n new_opacity,\n new_scaling,\n new_rotation,\n )\n\n prune_filter = torch.cat(\n (\n selected_pts_mask,\n torch.zeros(N * selected_pts_mask.sum(), device=\"cuda\", dtype=bool),\n )\n )\n self.prune_points(prune_filter)\n\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n # Extract points that satisfy the gradient condition\n selected_pts_mask = torch.where(\n torch.norm(grads, dim=-1) >= grad_threshold, True, False\n )\n selected_pts_mask = torch.logical_and(\n selected_pts_mask,\n torch.max(self.get_scaling, dim=1).values\n <= self.percent_dense * scene_extent,\n )\n\n new_xyz = self._xyz[selected_pts_mask]\n new_features_dc = self._features_dc[selected_pts_mask]\n new_features_rest = self._features_rest[selected_pts_mask]\n new_opacities = self._opacity[selected_pts_mask]\n new_scaling = self._scaling[selected_pts_mask]\n new_rotation = self._rotation[selected_pts_mask]\n\n self.densification_postfix(\n new_xyz,\n new_features_dc,\n new_features_rest,\n new_opacities,\n new_scaling,\n new_rotation,\n )\n\n def densify(self, max_grad, extent):\n grads = self.xyz_gradient_accum / self.denom\n grads[grads.isnan()] = 0.0\n\n self.densify_and_clone(grads, max_grad, extent)\n self.densify_and_split(grads, max_grad, extent)\n torch.cuda.empty_cache()\n\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):\n grads = self.xyz_gradient_accum / self.denom\n grads[grads.isnan()] = 0.0\n\n self.densify_and_clone(grads, max_grad, extent)\n self.densify_and_split(grads, max_grad, extent)\n\n prune_mask = (self.get_opacity < min_opacity).squeeze()\n if max_screen_size:\n big_points_vs = self.max_radii2D > max_screen_size\n big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent\n prune_mask = torch.logical_or(\n torch.logical_or(prune_mask, big_points_vs), big_points_ws\n )\n self.prune_points(prune_mask)\n\n torch.cuda.empty_cache()\n\n def prune_opacity(self, percent):\n sorted_tensor, _ = torch.sort(self.get_opacity, dim=0)\n index_nth_percentile = int(percent * (sorted_tensor.shape[0] - 1))\n value_nth_percentile = sorted_tensor[index_nth_percentile]\n prune_mask = (self.get_opacity <= value_nth_percentile).squeeze()\n\n # big_points_vs = self.max_radii2D > max_screen_size\n # big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent\n # prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws)\n self.prune_points(prune_mask)\n\n torch.cuda.empty_cache()\n\n def prune_gaussians(self, percent, import_score: list):\n ic(import_score.shape)\n sorted_tensor, _ = torch.sort(import_score, dim=0)\n index_nth_percentile = int(percent * (sorted_tensor.shape[0] - 1))\n value_nth_percentile = sorted_tensor[index_nth_percentile]\n prune_mask = (import_score <= value_nth_percentile).squeeze()\n # TODO(Kevin) Emergent, change it back. This is just for testing\n self.prune_points(prune_mask)\n\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n self.xyz_gradient_accum[update_filter] += torch.norm(\n viewspace_point_tensor.grad[update_filter, :2], dim=-1, keepdim=True\n )\n self.denom[update_filter] += 1"
},
{
"identifier": "eval_sh",
"path": "utils/sh_utils.py",
"snippet": "def eval_sh(deg, sh, dirs):\n \"\"\"\n Evaluate spherical harmonics at unit directions\n using hardcoded SH polynomials.\n Works with torch/np/jnp.\n ... Can be 0 or more batch dimensions.\n Args:\n deg: int SH deg. Currently, 0-3 supported\n sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2]\n dirs: jnp.ndarray unit directions [..., 3]\n Returns:\n [..., C]\n \"\"\"\n assert deg <= 4 and deg >= 0\n coeff = (deg + 1) ** 2\n assert sh.shape[-1] >= coeff\n\n result = C0 * sh[..., 0]\n if deg > 0:\n x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3]\n result = (\n result - C1 * y * sh[..., 1] + C1 * z * sh[..., 2] - C1 * x * sh[..., 3]\n )\n\n if deg > 1:\n xx, yy, zz = x * x, y * y, z * z\n xy, yz, xz = x * y, y * z, x * z\n result = (\n result\n + C2[0] * xy * sh[..., 4]\n + C2[1] * yz * sh[..., 5]\n + C2[2] * (2.0 * zz - xx - yy) * sh[..., 6]\n + C2[3] * xz * sh[..., 7]\n + C2[4] * (xx - yy) * sh[..., 8]\n )\n\n if deg > 2:\n result = (\n result\n + C3[0] * y * (3 * xx - yy) * sh[..., 9]\n + C3[1] * xy * z * sh[..., 10]\n + C3[2] * y * (4 * zz - xx - yy) * sh[..., 11]\n + C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12]\n + C3[4] * x * (4 * zz - xx - yy) * sh[..., 13]\n + C3[5] * z * (xx - yy) * sh[..., 14]\n + C3[6] * x * (xx - 3 * yy) * sh[..., 15]\n )\n\n if deg > 3:\n result = (\n result\n + C4[0] * xy * (xx - yy) * sh[..., 16]\n + C4[1] * yz * (3 * xx - yy) * sh[..., 17]\n + C4[2] * xy * (7 * zz - 1) * sh[..., 18]\n + C4[3] * yz * (7 * zz - 3) * sh[..., 19]\n + C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20]\n + C4[5] * xz * (7 * zz - 3) * sh[..., 21]\n + C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22]\n + C4[7] * xz * (xx - 3 * yy) * sh[..., 23]\n + C4[8]\n * (xx * (xx - 3 * yy) - yy * (3 * xx - yy))\n * sh[..., 24]\n )\n return result"
}
] | import torch
import math
from diff_gaussian_rasterization import (
GaussianRasterizationSettings,
GaussianRasterizer,
)
from scene.gaussian_model import GaussianModel
from utils.sh_utils import eval_sh | 8,771 | # base on __ini__.render
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
def count_render(
viewpoint_camera,
pc: GaussianModel,
pipe,
bg_color: torch.Tensor,
scaling_modifier=1.0,
override_color=None,
):
"""
Render the scene.
Background tensor (bg_color) must be on GPU!
"""
# Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
screenspace_points = (
torch.zeros_like(
pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda"
)
+ 0
)
try:
screenspace_points.retain_grad()
except:
pass
# Set up rasterization configuration
tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
raster_settings = GaussianRasterizationSettings(
image_height=int(viewpoint_camera.image_height),
image_width=int(viewpoint_camera.image_width),
tanfovx=tanfovx,
tanfovy=tanfovy,
bg=bg_color,
scale_modifier=scaling_modifier,
viewmatrix=viewpoint_camera.world_view_transform,
projmatrix=viewpoint_camera.full_proj_transform,
sh_degree=pc.active_sh_degree,
campos=viewpoint_camera.camera_center,
prefiltered=False,
debug=pipe.debug,
)
rasterizer = GaussianRasterizer(raster_settings=raster_settings, f_count=True)
means3D = pc.get_xyz
means2D = screenspace_points
opacity = pc.get_opacity
# If precomputed 3d covariance is provided, use it. If not, then it will be computed from
# scaling / rotation by the rasterizer.
scales = None
rotations = None
cov3D_precomp = None
if pipe.compute_cov3D_python:
cov3D_precomp = pc.get_covariance(scaling_modifier)
else:
scales = pc.get_scaling
rotations = pc.get_rotation
# If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
# from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
shs = None
colors_precomp = None
if override_color is None:
if pipe.convert_SHs_python:
shs_view = pc.get_features.transpose(1, 2).view(
-1, 3, (pc.max_sh_degree + 1) ** 2
)
dir_pp = pc.get_xyz - viewpoint_camera.camera_center.repeat(
pc.get_features.shape[0], 1
)
dir_pp_normalized = dir_pp / dir_pp.norm(dim=1, keepdim=True)
| # base on __ini__.render
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
def count_render(
viewpoint_camera,
pc: GaussianModel,
pipe,
bg_color: torch.Tensor,
scaling_modifier=1.0,
override_color=None,
):
"""
Render the scene.
Background tensor (bg_color) must be on GPU!
"""
# Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
screenspace_points = (
torch.zeros_like(
pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda"
)
+ 0
)
try:
screenspace_points.retain_grad()
except:
pass
# Set up rasterization configuration
tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
raster_settings = GaussianRasterizationSettings(
image_height=int(viewpoint_camera.image_height),
image_width=int(viewpoint_camera.image_width),
tanfovx=tanfovx,
tanfovy=tanfovy,
bg=bg_color,
scale_modifier=scaling_modifier,
viewmatrix=viewpoint_camera.world_view_transform,
projmatrix=viewpoint_camera.full_proj_transform,
sh_degree=pc.active_sh_degree,
campos=viewpoint_camera.camera_center,
prefiltered=False,
debug=pipe.debug,
)
rasterizer = GaussianRasterizer(raster_settings=raster_settings, f_count=True)
means3D = pc.get_xyz
means2D = screenspace_points
opacity = pc.get_opacity
# If precomputed 3d covariance is provided, use it. If not, then it will be computed from
# scaling / rotation by the rasterizer.
scales = None
rotations = None
cov3D_precomp = None
if pipe.compute_cov3D_python:
cov3D_precomp = pc.get_covariance(scaling_modifier)
else:
scales = pc.get_scaling
rotations = pc.get_rotation
# If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
# from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
shs = None
colors_precomp = None
if override_color is None:
if pipe.convert_SHs_python:
shs_view = pc.get_features.transpose(1, 2).view(
-1, 3, (pc.max_sh_degree + 1) ** 2
)
dir_pp = pc.get_xyz - viewpoint_camera.camera_center.repeat(
pc.get_features.shape[0], 1
)
dir_pp_normalized = dir_pp / dir_pp.norm(dim=1, keepdim=True) | sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized) | 1 | 2023-11-26 20:02:23+00:00 | 12k |
eth-sri/language-model-arithmetic | src/model_arithmetic/model_arithmetic.py | [
{
"identifier": "load_model",
"path": "src/model_arithmetic/basic_model_loader.py",
"snippet": "def load_model(dir_or_model, classification=False, token_classification=False, return_tokenizer=False, dtype=torch.bfloat16, load_dtype=True, \n rl=False, peft_config=None):\n \"\"\"\n This function is used to load a model based on several parameters including the type of task it is targeted to perform.\n \n Args:\n dir_or_model: It can be either a directory containing the pre-training model configuration details or a pretrained model.\n\n classification (bool): If True, loads the model for sequence classification.\n\n token_classification (bool): If True, loads the model for token classification.\n\n return_tokenizer (bool): If True, returns the tokenizer along with the model.\n\n dtype: The data type that PyTorch should use internally to store the model’s parameters and do the computation.\n\n load_dtype (bool): If False, sets dtype as torch.float32 regardless of the passed dtype value.\n\n rl (bool): If True, loads model specifically designed to be used in reinforcement learning environment.\n\n peft_config: Configuration details for Peft models. \n \n Returns:\n It returns a model for the required task along with its tokenizer, if specified.\n \"\"\"\n log(logger.debug, f\"Loading model for {dir_or_model} with {classification}, {dtype}, {load_dtype}\")\n is_lora_dir = os.path.isfile(os.path.join(dir_or_model, \"adapter_config.json\"))\n\n if not load_dtype:\n dtype = torch.float32\n\n if is_lora_dir:\n loaded_json = json.load(open(os.path.join(dir_or_model, \"adapter_config.json\"), \"r\"))\n model_name = loaded_json[\"base_model_name_or_path\"]\n else:\n model_name = dir_or_model\n\n original_model_name = model_name\n\n if classification:\n model = AutoModelForSequenceClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\") # to investigate: calling torch_dtype here fails.\n elif token_classification:\n model = AutoModelForTokenClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\")\n elif rl:\n model = AutoModelForCausalLMWithValueHead.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, \n peft_config=peft_config, device_map=\"auto\")\n else:\n if model_name.endswith(\"GPTQ\") or model_name.endswith(\"GGML\"):\n model = AutoGPTQForCausalLM.from_quantized(model_name,\n use_safetensors=True,\n trust_remote_code=True,\n # use_triton=True, # breaks currently, unfortunately generation time of the GPTQ model is quite slow\n quantize_config=None, device_map=\"auto\")\n else:\n model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\")\n\n if is_lora_dir:\n model = PeftModel.from_pretrained(model, dir_or_model)\n \n try:\n tokenizer = load_tokenizer(original_model_name)\n model.config.pad_token_id = tokenizer.pad_token_id\n except Exception:\n pass\n if return_tokenizer:\n return model, load_tokenizer(original_model_name)\n return model"
},
{
"identifier": "load_tokenizer",
"path": "src/model_arithmetic/basic_model_loader.py",
"snippet": "def load_tokenizer(dir_or_model):\n \"\"\"\n This function is used to load the tokenizer for a specific pre-trained model.\n \n Args:\n dir_or_model: It can be either a directory containing the pre-training model configuration details or a pretrained model.\n \n Returns:\n It returns a tokenizer that can convert text to tokens for the specific model input.\n \"\"\"\n log(logger.debug, f\"Loading tokenizer for {dir_or_model}\")\n\n is_lora_dir = os.path.isfile(os.path.join(dir_or_model, \"adapter_config.json\"))\n\n if is_lora_dir:\n loaded_json = json.load(open(os.path.join(dir_or_model, \"adapter_config.json\"), \"r\"))\n model_name = loaded_json[\"base_model_name_or_path\"]\n else:\n model_name = dir_or_model\n \n if os.path.isfile(os.path.join(dir_or_model, \"config.json\")):\n loaded_json = json.load(open(os.path.join(dir_or_model, \"config.json\"), \"r\"))\n model_name = loaded_json[\"_name_or_path\"]\n\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n\n if tokenizer.pad_token is None:\n log(logger.debug, \"Setting pad token to eos token\")\n tokenizer.pad_token = tokenizer.eos_token\n tokenizer.pad_token_id = tokenizer.eos_token_id\n \n return tokenizer"
},
{
"identifier": "get_max_length",
"path": "src/model_arithmetic/utils.py",
"snippet": "def get_max_length(model_config, default_length=1024):\n \"\"\"\n Get the maximum length from the model configuration.\n\n Args:\n model_config (object): The model configuration object.\n default_length (int, optional): The default maximum length. Defaults to 1024.\n\n Returns:\n int: The maximum length.\n \"\"\"\n max_length = None\n for length_setting in [\"n_positions\", \"max_position_embeddings\", \"seq_length\"]:\n max_length = getattr(model_config, length_setting, None)\n if max_length:\n if ENABLE_LOGGING:\n logger.debug(f\"Found max length: {max_length}\")\n break\n if not max_length:\n max_length = default_length\n if ENABLE_LOGGING:\n logger.debug(f\"Using default max length: {max_length}\")\n\n return max_length"
},
{
"identifier": "ENABLE_LOGGING",
"path": "src/model_arithmetic/utils.py",
"snippet": "ENABLE_LOGGING = False"
},
{
"identifier": "log",
"path": "src/model_arithmetic/utils.py",
"snippet": "def log(function, message):\n \"\"\"\n Logs the given message using the provided function if logging is enabled.\n \n Parameters:\n function (callable): The logging function to use.\n message (str): The message to be logged.\n \"\"\"\n if ENABLE_LOGGING:\n function(message)"
},
{
"identifier": "Operator",
"path": "src/model_arithmetic/operators.py",
"snippet": "class Operator(BaseClass):\n def __init__(self, minimum_value=-10 ** 8, **kwargs):\n \"\"\"Initializes an operator with the given keyword arguments.\n\n Args:\n minimum_value (float, optional): The minimum value any element can have: this is important when doing calculations where several logprobs have been made -torch.inf but we still want to do meaningful computations with them.\n **kwargs: The keyword arguments.\n \"\"\"\n super().__init__(**kwargs)\n self.minimum_value = minimum_value\n \n def set_to_minimum(self, output):\n \"\"\"Sets the output to the minimum value if it is smaller than the minimum value.\n\n Args:\n output (List || torch.tensor): List or torch.tensor\n \"\"\"\n if isinstance(output, list):\n for el in range(len(output)):\n if torch.is_tensor(output[el]):\n output[el][output[el] < self.minimum_value] = self.minimum_value\n elif torch.is_tensor(output):\n output[output < self.minimum_value] = self.minimum_value\n return output\n \n def evaluate(self, runnable_operator_outputs : Dict, normalize : bool = True):\n \"\"\"Evaluates the given object in the formula based on the language model outputs\n\n Args:\n runnable_operator_outputs (Dict): Maps Runnable Operators to their outputs\n\n Raises:\n NotImplementedError\n \"\"\"\n raise NotImplementedError\n\n def clone(self):\n \"\"\"Creates a deep copy of the object.\n\n Returns:\n A deep copy of the object.\n \"\"\"\n return copy.deepcopy(self)\n\n def norm(self, runnable_operator_outputs : Dict = None):\n \"\"\"Returns the norm of the object\n \n Args:\n runnable_operator_outputs (Dict): Maps Runnable Operators to their outputs\n\n Raises:\n NotImplementedError\n \"\"\"\n raise NotImplementedError\n \n def runnable_operators(self):\n \"\"\"Returns the Runnable Operators in the object\n\n Raises:\n NotImplementedError\n \"\"\"\n raise NotImplementedError\n\n def is_finished(self, runnable_operator_outputs : Dict) -> bool:\n \"\"\"Returns whether the object is finished\n\n Args:\n runnable_operator_outputs (Dict): Maps Runnable Operators to their outputs\n\n Raises:\n NotImplementedError\n \"\"\"\n raise NotImplementedError\n\n def normalize(self, output, runnable_operator_outputs : Dict):\n \"\"\"\n Normalizes the output of the operator\n \n Args:\n output (torch.tensor || float): The output of the operator\n runnable_operator_outputs (Dict): The outputs of the runnable operators\n \"\"\"\n norm = self.norm(runnable_operator_outputs)\n if (torch.is_tensor(norm) and torch.count_nonzero(norm == 0) > 0) or (not torch.is_tensor(norm) and norm == 0):\n return output\n if not torch.is_tensor(output):\n return output\n output /= norm\n output -= torch.logsumexp(output, dim=-1, keepdim=True)\n return output\n\n\n def __add__(self, other):\n if isinstance(other, (float, int)):\n return Sum([self, Constant(other)])\n return Sum([self, other])\n\n def __radd__(self, other):\n return self.__add__(other)\n \n def __multiply__(self, other):\n if isinstance(other, (float, int)):\n return Product([self, Constant(other)])\n return Product([self, other])\n\n def __div__(self, other):\n if isinstance(other, (float, int)):\n return Product([self, Constant(1 / other)])\n raise NotImplementedError\n\n def __rdiv__(self, other):\n raise NotImplementedError\n\n def __sub__(self, other):\n return self.__add__(-other)\n\n def __neg__(self):\n return self.__multiply__(-1)\n\n def __rmultiply__(self, other):\n return self.__multiply__(other)\n\n def __mul__(self, other):\n return self.__multiply__(other)\n\n def __rmul__(self, other):\n return self.__multiply__(other)\n\n def __rsub__(self, other):\n self_ = self.__neg__()\n return self_.__add__(other)\n \n def __str__(self):\n return f\"{self.__class__.__name__}({self.kwargs})\""
},
{
"identifier": "Monitor",
"path": "src/model_arithmetic/monitor.py",
"snippet": "class Monitor(MultipleMonitor):\n \"\"\"\n Final monitor object that keeps track of values for runnable operators, but also for the whole formula\n \"\"\"\n def __init__(self, runnable_operators):\n \"\"\"\n Initialize the Monitor object.\n \n Args:\n runnable_operators(List[RunnableOperator]): A list of runnable operators.\n \"\"\"\n super().__init__(models_monitor=ModelsMonitor(runnable_operators))\n \n def pop_results(self, n=1, runnable_operator=None, indicator=None):\n \"\"\"Pop results from the monitor.\n\n Args:\n n (int, optional): Number of elements to pop. Defaults to 1.\n runnable_operator (RunnableOperator, optional): From which ModelMonitor to pop the results. Defaults to None.\n indicator (string, optional): Name of the type to pop. Defaults to None.\n \"\"\"\n if runnable_operator is None:\n super().pop_results(n, indicator=indicator)\n else:\n self.models_monitor.pop_results(n, runnable_operator, indicator=indicator)\n \n def merge(self, other):\n \"\"\"\n Merge the elements of another Monitor object with the elements of this object.\n Args:\n other (Monitor): The other Monitor object.\n \"\"\"\n super().merge(other)\n self.models_monitor.merge(other.models_monitor)\n \n def add_result(self, element, runnable_operator=None, indicator=None):\n \"\"\"\n Add a result to the monitor.\n Args:\n element (float): The result to be added.\n runnable_operator (RunnableOperator): The runnable operator associated with the result.\n indicator (string, optional): The name of the time type.\n \"\"\"\n if runnable_operator is None:\n super().add_result(element, indicator=indicator)\n else:\n self.models_monitor.add_result(element, runnable_operator, indicator=indicator)\n \n def get_store_settings(self):\n \"\"\"\n Gets the store settings of the parent class and the models monitor.\n \"\"\"\n sum_vals = [monitor.total() for monitor in self.models_monitor.monitors.values()]\n if len(sum_vals) > 0:\n total_time_no_model_calls = self.total() - sum(sum_vals)\n else:\n total_time_no_model_calls = self.total()\n\n return {\n **super().get_store_settings(),\n \"total_time_no_model_calls\": total_time_no_model_calls,\n \"models_monitor\": self.models_monitor.get_store_settings()\n }"
},
{
"identifier": "RunnableOperator",
"path": "src/model_arithmetic/runnable_operators.py",
"snippet": "class RunnableOperator(Operator):\n def __init__(self, prompt_string=\"\", model=None, speculative_factor=1, \n prompt_template = lambda prompt_string, input_string: prompt_string + input_string, run_priority=0, group=None, \n outputs_logprobs=True, **kwargs):\n \"\"\"\n Initialize a runnable operator instance. A runnable operator is an operator that generates a probability distribution instead of modifies an existing one.\n \n Args:\n prompt_string (str): String to be used as a prompt. Only used in specific runnable operators\n model (optional): Model to be used for operation. If None, the model must be set later to the default model to be used.\n speculative_factor (int): Factor for speculative sampling.\n prompt_template (callable): Function for generating prompt. Takes two arguments: prompt_string and input_string. The operator will be run on prompt_template(..., ...) + continuation_tokens\n run_priority (int): Priority for running the operation. Higher priority means the operation will be run first, especially important for the classifier.\n group (optional): Group to which the operator belongs. This ensures that speculative sampling will not be tried when not all operators of a group are finished.\n outputs_logprobs (bool): Whether the operator outputs logprobs.\n **kwargs: Arbitrary keyword arguments.\n \"\"\"\n super().__init__(speculative_factor=speculative_factor, model=model, prompt_string=prompt_string,\n prompt_template=prompt_template, run_priority=run_priority, group=group, outputs_logprobs=outputs_logprobs, **kwargs)\n self.cache = None\n \n def run_condition(self, new_tokens, trigger_end):\n \"\"\"\n Determine if the run condition is met.\n \n Args:\n new_tokens (List[int]): Number of new tokens per sample in the batch\n trigger_end (List[bool]): Whether to trigger the end for each sample in the batch.\n \n Returns:\n bool: Whether the run condition is met.\n \"\"\"\n new_tokens = [new_tokens[i] if not trigger_end[i] or new_tokens[i] < 0 else max(new_tokens[i], self.speculative_factor) for i in range(len(new_tokens))]\n return np.mean(new_tokens) >= self.speculative_factor \n # other possibility:\n # return np.max(new_tokens) + 1 >= speculative_factor\n \n def delete_cache(self, index=None, from_=None):\n \"\"\"\n Delete the cache.\n \"\"\"\n if from_ is None and index is None:\n self.cache = None\n \n def run(self, tokenized_inputs, **kwargs):\n \"\"\"\n Run the operation. This method needs to be implemented by subclasses.\n \n Args:\n tokenized_inputs (torch.tensor): Inputs that have been tokenized.\n **kwargs: Arbitrary keyword arguments.\n \n Raises:\n NotImplementedError: This method needs to be implemented by subclasses.\n \"\"\"\n raise NotImplementedError(\"This method needs to be implemented by subclasses.\")\n \n def runnable_operators(self):\n \"\"\"\n Get a list of runnable operators used by the operator, usually only this operator itself.\n \n Returns:\n list: List of runnable operators.\n \"\"\"\n return [self]\n \n def same_operator(self, other):\n \"\"\"\n Determine if the other operator is the same as this one. This is important to avoid redundant runs of the same operator in a formula\n \n Args:\n other: Other operator to be compared.\n \n Returns:\n bool: Whether the other operator is the same as this one.\n \"\"\"\n if isinstance(other, str):\n return self.id() == other\n elif isinstance(other, RunnableOperator):\n return self.id() == other.id()\n return False\n\n def norm(self, runnable_operator_outputs=None):\n \"\"\"\n Compute the norm of the operator.\n \n Args:\n runnable_operator_outputs (optional): Outputs of runnable operators.\n \n Returns:\n int: The norm of the operator.\n \"\"\"\n if runnable_operator_outputs is None or self.is_finished(runnable_operator_outputs):\n return 1\n return 0\n \n def is_finished(self, runnable_operator_outputs):\n \"\"\"\n Determine if the operation is finished.\n \n Args:\n runnable_operator_outputs: Outputs of runnable operators.\n \n Returns:\n bool: Whether the operation is finished.\n \"\"\"\n return any([self.same_operator(output) and runnable_operator_outputs[output] is not None for output in runnable_operator_outputs])\n \n def evaluate(self, runnable_operator_outputs : Dict, normalize : bool = True):\n \"\"\"\n Evaluate the operation.\n \n Args:\n runnable_operator_outputs (Dict): Outputs of runnable operators.\n normalize (bool): Whether to normalize the evaluation.\n \n Returns:\n int: The evaluation of the operation.\n \"\"\"\n for output in runnable_operator_outputs:\n if self.same_operator(output) and runnable_operator_outputs[output] is not None:\n return runnable_operator_outputs[output]\n return 0\n \n def generate_settings(self):\n \"\"\"\n Generate settings for the operation.\n \n Returns:\n dict: Settings for the operation.\n \"\"\"\n kwargs = super().generate_settings()\n kwargs[\"prompt_template\"] = self.prompt_template(\"{{prompt_string}}\", \"{{input_string}}\")\n return kwargs\n\n @staticmethod\n def load_from_settings(settings):\n \"\"\"\n Load operator from settings.\n \n Args:\n settings (dict): Settings for the operation.\n \n Returns:\n Operator: Operator loaded from settings.\n \"\"\"\n copy = settings[\"prompt_template\"]\n prompt_template = lambda prompt_string, input_string: copy.replace(\"{{prompt_string}}\", prompt_string).replace(\"{{input_string}}\", input_string)\n settings[\"prompt_template\"] = prompt_template\n return Operator.load_from_settings(settings)\n \n def get_prompt(self, input_string):\n \"\"\"\n Get the prompt for the operation.\n \n Args:\n input_string (str): String to be used as input.\n \n Returns:\n callable: Function for generating prompt.\n \"\"\"\n return self.prompt_template(self.prompt_string, input_string)\n \n def get_store_params(self):\n \"\"\"\n Get parameters for storing the operation.\n \n Returns:\n dict: Parameters for storing the operation.\n \"\"\"\n return {\n \"class\": self.__class__.__name__,\n \"model\": self.model,\n \"speculative_factor\": self.speculative_factor,\n \"prompt_template\": self.prompt_template(self.prompt_string, \"{{input_string}}\")\n }\n \n def id(self):\n \"\"\"\n Get the ID of the operation.\n \n Returns:\n str: ID of the operation.\n \"\"\"\n kwargs = self.kwargs.copy()\n kwargs[\"prompt_template\"] = self.prompt_template(self.prompt_string, \"{{input_string}}\")\n return f\"{self.__class__.__name__}(**{kwargs})\"\n \n def load_model(self, dtype):\n \"\"\"\n Load the model for the operation. Only needs to be overwritten when a model is necessary\n \n Args:\n dtype: Data type for the model.\n \n Returns:\n None\n \"\"\"\n return None\n \n def initialize_after_model_set(self):\n \"\"\"\n Initialize the operation after the model is set (to the default model if necessary).\n \n Raises:\n AssertionError: If the model is not set before initializing.\n \"\"\"\n assert self.model is not None, \"Model must be set before initializing.\""
},
{
"identifier": "PromptedLLM",
"path": "src/model_arithmetic/runnable_operators.py",
"snippet": "class PromptedLLM(RunnableOperator):\n def __init__(self, prompt_string, model=None, speculative_factor=1, \n prompt_template = lambda prompt_string, input_string, : prompt_string + \"\\n\" + input_string, dtype=None, group=None,\n enable_cache=True, dim_keys_past=2, dim_values_past=2, run_eager=False, tokenizer=None, **kwargs):\n \"\"\"\n Initializes an LLM Prompt. This is a runnable operator that uses a language model to generate a probability distribution.\n Args:\n prompt_string (str): String to be used as a prompt. Only used in specific runnable operators\n model (optional): Model to be used for operation. If None, the model must be set later to the default model to be used.\n speculative_factor (int): Factor for speculative sampling.\n prompt_template (callable): Function for generating prompt. Takes two arguments: prompt_string and input_string. The operator will be run on prompt_template(..., ...) + continuation_tokens\n run_priority (int): Priority for running the operation. Higher priority means the operation will be run first, especially important for the classifier.\n dtype (optional): Data type for the model.\n group (optional): Group to which the operator belongs. This ensures that speculative sampling will not be tried when not all operators of a group are finished.\n enable_cache (bool): Whether to enable the key-value cache.\n dim_keys_past (int): Dimension of the keys in the key-value cache. Usually 2, but for other models this can be different.\n dim_values_past (int): Dimension of the values in the key-value cache. Usually 2, but for other models this can be different.\n run_eager (bool): Whether to run the model in eager mode. This is necessary for some models, but incompatible with speculative sampling and some other features.\n tokenizer (Tokenizer): Tokenizer to be used for the operation. If None, the default tokenizer will be used.\n **kwargs: Arbitrary keyword arguments.\n \"\"\"\n if dim_keys_past == 2 and dim_values_past == 2:\n # set the dims based on the model\n if model in [\"tiiuae/falcon-7b\", \"tiiuae/falcon-7b-instruct\", \"tiiuae/falcon-40b\", \"tiiuae/falcon-40b-instruct\"]:\n dim_keys_past = 1\n dim_values_past = 1\n \n super().__init__(prompt_string=prompt_string, model=model, speculative_factor=speculative_factor, \n prompt_template=prompt_template, group=group, enable_cache=enable_cache, \n dim_keys_past=dim_keys_past, dim_values_past=dim_values_past, run_eager=run_eager)\n self.dtype = dtype\n self.tokenizer_length = None\n self.tokenizer = tokenizer\n self.previous_input_ids = None\n self.default_dim = 2\n if self.run_eager:\n log(logger.warning, \"Eager mode is enabled. This will make several features, such as speculative sampling, inaccessible.\")\n \n def load_model(self, dtype):\n \"\"\"\n Loads the model for the operation.\n :param dtype: Data type for the model.\n \"\"\"\n if not isinstance(self.model, str):\n return self.model\n if self.dtype is None:\n return load_model(self.model, dtype=dtype)\n return load_model(self.model, dtype=self.dtype)\n \n def initialize_after_model_set(self):\n if self.tokenizer is None:\n tokenizer = load_tokenizer(self.model)\n self.tokenizer_length = len(tokenizer)\n \n def select_from_sample_cache(self, sample, from_=None, until=None):\n \"\"\"Selects the cache from a sample that needs to be stored\n\n Args:\n sample (torch.tensor): Torch tensor, the samples key-value past as stored by the LLM\n from_ (int, optional): From which value to store the key-value past. Defaults to None.\n until (int, optional): Until which value to store the key-value past. Defaults to None.\n \"\"\"\n for i in range(len(sample)):\n for j in range(len(sample[i])):\n sample[i][j] = sample[i][j][:, from_:until]\n \n return sample\n \n def swap_dimensions(self, sample):\n \"\"\"Swaps dimensions in order to make the dimensions match the default dimensions. This is necessary because models do not use the same semantics for the key-value storage\n\n Args:\n sample (List[torch.tensor]): Key-value past as stored by the LLM\n \"\"\"\n for i in range(len(sample)):\n # keys, values\n if self.default_dim != self.dim_keys_past:\n sample[i][0] = sample[i][0].transpose(self.default_dim - 1, self.dim_keys_past - 1)\n if self.default_dim != self.dim_values_past:\n sample[i][1] = sample[i][1].transpose(self.default_dim - 1, self.dim_values_past - 1)\n \n return sample\n \n def select_sample_cache(self, cache, sample_index):\n \"\"\"Stores the key value past by selecting the sample index from the cache and storing them in a list\n\n Args:\n cache (List[torch.tensor]): Key-value cache as returned by the model\n sample_index (int): Which sample to select\n \"\"\"\n sample = []\n for i in range(len(cache)):\n sample.append([\n cache[i][0][sample_index],\n cache[i][1][sample_index]\n ])\n sample = self.swap_dimensions(sample)\n return sample\n \n def pad_sample(self, sample, target_size):\n \"\"\"Pads all samples key-value cache to a specific size\n\n Args:\n sample (torch.tensor): Key-value cache as stored by the LLM\n target_size (int): Target size\n \"\"\"\n for i in range(len(sample)):\n for j in range(len(sample[i])):\n pad_size = target_size - sample[i][j].size(1)\n pad = (0, 0, pad_size, 0)\n if pad_size > 0:\n sample[i][j] = torch.nn.functional.pad(sample[i][j], pad, \"constant\", 0)\n elif pad_size < 0:\n sample[i][j] = sample[i][j][:, :target_size]\n return sample\n \n def stack_samples(self, samples):\n \"\"\"Stacks the samples key-value cache by removing the List dimension and reordering to be appropriate for storing\n\n Args:\n samples (List[torch.tensor]): Key-value cache as returend by the model\n \"\"\"\n stacked_samples = []\n for i in range(len(samples[0])):\n stacked_mult = []\n for j in range(len(samples[0][i])):\n stacked = torch.stack(\n [samples[k][i][j] for k in range(len(samples))], dim=0\n )\n stacked_mult.append(stacked)\n stacked_samples.append(stacked_mult)\n return stacked_samples\n \n def store_cache(self, past_key_values, input_ids, lengths):\n \"\"\"Stores the past key values returned by the model in an appropriate way\n\n Args:\n past_key_values (List[torch.tensor]): Tensor in which the key values where reutrned\n input_ids (torch.tensor): Input ids\n lengths (List[int]): Length of each sample in the batch\n \"\"\"\n if self.run_eager:\n self.cache = past_key_values\n return\n self.cache = []\n self.previous_input_ids = []\n for i, length in enumerate(lengths):\n self.cache.append(\n self.select_from_sample_cache(self.select_sample_cache(past_key_values, i), from_=-length)\n )\n self.previous_input_ids.append(\n input_ids[i, -length:]\n )\n def common_starting_elements(self, t1, t2):\n \"\"\"Check for the common starting elements in two tensors\n\n Args:\n t1 (torch.tensor): First Tensor\n t2 (torch.tensor): Second Tensor\n \"\"\"\n min_length = min(t1.size(0), t2.size(0))\n eq = torch.eq(t1[:min_length], t2[:min_length])\n if not eq.any():\n return 0\n if eq.all():\n return min_length\n\n return torch.where(eq == 0)[0][0].item()\n \n def delete_previous_cache(self, new_input_ids, lengths):\n \"\"\"Deletes previous cache by only keeping the common elements between the previous input ids and the new input ids\n\n Args:\n new_input_ids (torch.tensor): New input ids\n lengths (List[int]): List of lengths\n \"\"\"\n if self.run_eager:\n return\n input_ids = [\n new_input_ids[i, -lengths[i]:] for i in range(len(lengths))\n ]\n elements = [self.common_starting_elements(input_ids[i], self.previous_input_ids[i]) for i in range(len(lengths))]\n self.cache = [\n self.select_from_sample_cache(self.cache[i], until=elements[i]) for i in range(len(lengths))\n ]\n \n \n def prepare_inputs(self, input_ids, attention_mask, n_new_tokens):\n \"\"\"Prepares the inputs for the model\n\n Args:\n input_ids (torch.tensor): Input ids\n attention_mask (torch.tensor): Attention Mask\n n_new_tokens (int): Number of new tokens since last run\n \"\"\"\n max_new_tokens = max(n_new_tokens)\n past_key_values = None\n if self.cache is not None and self.enable_cache:\n input_ids = input_ids[:, -max_new_tokens:]\n if self.run_eager:\n past_key_values = self.cache\n else:\n past_key_values = self.pad_cache(\n [self.select_from_sample_cache(self.cache[i], until=-max_new_tokens + n_new_tokens[i]) if max_new_tokens > n_new_tokens[i] else self.cache[i]\n for i in range(len(n_new_tokens))],\n attention_mask.shape[1] - max_new_tokens\n )\n return {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"use_cache\": True,\n \"past_key_values\": past_key_values\n }\n \n def pad_cache(self, cache, length):\n \"\"\"Pads the cache and prepares them for the model\n\n Args:\n cache (torch.tensor): Key-value cache as stored by the LLM\n lengths (List[int]): List of lengths\n \"\"\"\n for i in range(len(cache)):\n cache[i] = self.pad_sample(cache[i], length)\n cache[i] = self.swap_dimensions(cache[i])\n stacked_samples = self.stack_samples(cache)\n\n return stacked_samples\n \n def delete_cache(self, index=None, from_=None):\n \"\"\"Deletes all cache\n\n Args:\n index (int, optional): _description_. Defaults to None.\n from_ (int, optional): _description_. Defaults to None.\n \"\"\"\n # if index is not None and self.cache is not None:\n # self.previous_input_ids = self.previous_input_ids[:index] + self.previous_input_ids[index + 1:]\n # cache_shape = list(self.cache[0].shape)\n # device = self.cache[0].device\n # dtype = self.cache[0].dtype\n # cache_shape[-2] = 0\n # self.cache = self.cache[:index] + self.cache[index + 1:]\n # self.previous_input_ids.append(torch.tensor([]))\n # self.cache.append(torch.tensor([], device=device, dtype=dtype).reshape(cache_shape))\n # return\n # else:\n self.previous_input_ids = None\n self.cache = None\n\n def run(self, tokenized_inputs, loaded_models, model_new_tokens, use_cache, **kwargs):\n \"\"\"\n Runs the model on the tokenized inputs.\n Args:\n tokenized_inputs (torch.tensor): Inputs that have been tokenized.\n loaded_models (dict[PreTrainedModel]): Models that have been loaded. The model for this operation is in loaded_models[self.model]\n model_new_tokens (List[int]): Number of new tokens per sample in the batch\n use_cache (bool): Whether to use the key-value cache.\n \"\"\"\n if isinstance(self.model, str):\n model = loaded_models[self.model]\n else:\n model = self.model\n lengths = torch.sum(tokenized_inputs.attention_mask, dim=-1)\n if self.cache is not None and self.enable_cache and use_cache:\n self.delete_previous_cache(tokenized_inputs.input_ids, lengths)\n \n # if self.cache is not None:\n # length_common_input_ids_per_sample = [\n \n # ]\n actual_inputs = self.prepare_inputs(input_ids=tokenized_inputs.input_ids.to(model.device),\n attention_mask=tokenized_inputs.attention_mask.to(model.device),\n n_new_tokens=model_new_tokens)\n # run model \n with torch.no_grad():\n try:\n model_output = model(**actual_inputs, return_dict=True)\n except RuntimeError as e:\n raise RuntimeError(f\"Error thrown when running model. This is probably caused because the model handles the key-value cache differently. Consider setting dim_values_past and dim_keys_past values or disabling the key-value cache. Alternatively, you can set run_eager=True, but this feature is incompatible with speculative sampling and some other features.\")\n logprobs = torch.log_softmax(model_output.logits[:, :, :self.tokenizer_length], dim=-1)\n \n if self.enable_cache and use_cache:\n self.store_cache(model_output.past_key_values, tokenized_inputs.input_ids, lengths)\n \n logprobs = [logprobs[i, -model_new_tokens[i] : ].to(torch.float32) for i in range(logprobs.shape[0])]\n return logprobs\n\n def __str__(self):\n return f\"PromptedLLM('{self.prompt_string}', model='{self.model}')\""
},
{
"identifier": "TokenizedInput",
"path": "src/model_arithmetic/input.py",
"snippet": "class TokenizedInput:\n \"\"\"\n Keeps track of the tokenized input of a runnable operator. Automatically sets the correct tokens, by using the runnable operator's get_prompt method.\n \"\"\"\n def __init__(self, runnable_operator, model_name, model_config, tokenizer):\n \"\"\"\n Initialize the TokenizedInput object.\n\n Args:\n runnable_operator (RunnableOperator): An object that provides a get_prompt method.\n model_name (str): The name of the model.\n model_config (object): The configuration of the model.\n tokenizer (object): The tokenizer to be used.\n \"\"\"\n self.runnable_operator = runnable_operator\n self.input_tokens = []\n self.only_input_tokens = None\n self.tokenizer = tokenizer\n self.max_length = get_max_length(model_config)\n self.set_inputs([\"\"])\n # this is essentially what huggingface also does, but it is kinda hidden in their sample code (GenerationMixin.generate)\n self.tokenizer.padding_side = \"left\"\n \n def extend_batch_size(self, batch_size):\n \"\"\"\n Extend the size of the batch to the given size. If the current size is less than the given size, \n the first element is repeated to fill the batch.\n \n Necessary for compatibility with lm_eval\n\n Args:\n batch_size (int): The desired batch size.\n \"\"\"\n if len(self.input_tokens) != batch_size:\n self.input_tokens = [self.input_tokens[0]] * batch_size\n \n def set_inputs(self, inputs):\n \"\"\"\n Set the inputs for the TokenizedInput object.\n\n Args:\n inputs (list): A list of input strings.\n \"\"\"\n self.input_tokens = [self.runnable_operator.get_prompt(input_string) for input_string in inputs]\n bos_token = \"\"\n if self.tokenizer.bos_token_id is not None:\n self.input_tokens = [\n [self.tokenizer.bos_token_id] + self.tokenizer(input_string, truncation=True, max_length=self.max_length, add_special_tokens=False).input_ids\n for input_string in self.input_tokens\n ]\n bos_token = self.tokenizer.bos_token\n else:\n self.input_tokens = [\n self.tokenizer(input_string, truncation=True, max_length=self.max_length, add_special_tokens=False).input_ids\n for input_string in self.input_tokens\n ]\n \n only_prompt = [bos_token + self.runnable_operator.get_prompt(\"\")]\n self.only_input_tokens = self.tokenizer(only_prompt, padding=True, return_tensors=\"pt\", truncation=True, max_length=self.max_length, add_special_tokens=False)\n \n if \"token_type_ids\" in self.only_input_tokens:\n del self.only_input_tokens[\"token_type_ids\"]\n \n def get_only_input_tokens(self):\n \"\"\"\n Get the input tokens without any continuation tokens.\n\n Returns:\n object: The input tokens without any continuation tokens.\n \"\"\"\n return self.only_input_tokens\n \n def add_continuation_tokens(self, tokens):\n \"\"\"\n Add continuation tokens to the input tokens.\n\n Args:\n tokens (list): A list of continuation tokens.\n\n Returns:\n object: The input tokens with the continuation tokens added.\n \"\"\"\n output = [\n input_token + token for input_token, token in zip(self.input_tokens, tokens)\n ]\n truncated_output = [\n output[:self.max_length] for output in output\n ]\n padded_output = self.tokenizer.pad({\"input_ids\": truncated_output}, padding=True, return_tensors=\"pt\")\n return padded_output"
},
{
"identifier": "Compatibility",
"path": "src/model_arithmetic/lm_eval_compatibility.py",
"snippet": "class Compatibility:\n \"\"\"Compatibility class to allow the use of LM eval. Main compatibility issue is that lm eval does not allow to distinguish between the input tokens and the continuation tokens. This class fixes this manually by going\n through the task inputs and finding the one that matches the input tokens.\n \"\"\"\n def __init__(\n self,\n task_name,\n needs_input_tokens_lm_eval,\n tokenizer,\n device,\n max_length,\n ): \n \n \"\"\"Initializes the compatibility class.\n \n Args:\n task_name (str): Name of the task.\n needs_input_tokens_lm_eval (bool): Whether the task needs the input tokens or not. If it does, the program will try to find the input tokens in the task inputs.\n tokenizer (transformers.tokenization_utils_base.PreTrainedTokenizerBase): Tokenizer to be used.\n device (torch.device): Device to be used.\n max_length (int): Maximum length of the input tokens.\n \"\"\"\n self.task_name = task_name\n self.needs_input_tokens_lm_eval = needs_input_tokens_lm_eval\n self.tokenizer = tokenizer\n self.task_inputs = []\n self.device = device\n self.task_initialized = False\n self.max_length = max_length\n \n def initialize_task(self):\n \"\"\"Initializes the task. Looks up all the task inputs and stores them in a list. Gets encoded inputs along with the input length\n \"\"\"\n if self.task_initialized:\n return\n self.task_initialized = True\n self.task_inputs = []\n task = get_task(self.task_name)()\n \n if task.has_test_docs():\n task_doc_func = task.test_docs\n elif task.has_validation_docs():\n task_doc_func = task.validation_docs\n \n dataset = pd.DataFrame(task_doc_func())\n rnd = random.Random()\n rnd.seed(42)\n list_indices = list(range(len(dataset)))\n rnd.shuffle(list_indices)\n dataset = dataset.iloc[list_indices]\n # rnd.shuffle(dataset)\n \n for index in range(len(dataset)):\n doc = dict(dataset.iloc[index])\n ctx = task.fewshot_context(\n doc=doc, num_fewshot=0, rnd=rnd, description=\"\"\n )\n requests = task.construct_requests(doc, ctx)\n input_ = task.doc_to_text(doc)\n input_encoded = self.tokenizer(input_, return_tensors=\"pt\", truncation=True, max_length=self.max_length).input_ids[0]\n for request in requests:\n task_input = self.tokenizer(\"\".join(request.args), return_tensors=\"pt\", truncation=True, max_length=self.max_length).input_ids.to(self.device)[0]\n task_input_length = len(input_encoded)\n # double encoding decoding is necessary for the llama tokenizer (for example, a \"...\" got an extra space in front of it if you don't do this)\n self.task_inputs.append((task_input, len(task_input) - task_input_length, self.tokenizer.decode(task_input[:-1])))\n \n def is_target(self, input_tokens, task_input):\n \"\"\"Checks whether the input tokens are the target tokens starting from the end of the input tokens.\n\n Args:\n input_tokens (torch.tensor): Input tokens\n task_input (torch.tensor): Task Input Tokens\n \"\"\"\n return torch.all(input_tokens[-len(task_input):] == task_input)\n \n def find_in_task(self, input_tokens):\n \"\"\"Finds the input tokens in the task inputs. First does an exact match and then a fuzzy match if the exact match came up empty .\n\n Args:\n input_tokens (torch.tensor): Input Tokens\n \"\"\"\n if not self.task_initialized:\n self.initialize_task()\n \n decoded = self.tokenizer.decode(input_tokens)\n for i in range(len(self.task_inputs)):\n guess = self.task_inputs[i][2]\n if guess in decoded:\n return self.task_inputs[i]\n fuzzes = []\n for i in range(len(self.task_inputs)):\n guess = self.task_inputs[i][2]\n fuzzes.append(fuzz.partial_ratio(guess, decoded))\n\n return self.task_inputs[fuzzes.index(max(fuzzes))]\n \n def forward_preprocessing(self, input_ids, model_input_tokens, **kwargs):\n \"\"\"Implements the main preprocessing step. This is necessary to be able to use lm-evaluation-harness. This function finds the input tokens in the task inputs and then extends the batch size of the model input tokens\n\n Args:\n input_ids (torch.tensor): Input ids\n model_input_tokens (Input): Input classes to be used for the various models in the Model Arithmetic class\n \"\"\"\n ### this is a bit cheeky, but in order to be compatible with lm-evaluation-harness, we need to implement this method\n if not isinstance(input_ids, list):\n continuation_tokens = input_ids.tolist()\n else:\n continuation_tokens = input_ids\n \n # necessary for no context\n if self.needs_input_tokens_lm_eval and get_task is not None:\n inputs = []\n continuation_tokens = []\n for i in range(len(input_ids)):\n task_element = self.find_in_task(input_ids[i])\n if task_element[1] > 1:\n inputs.append(self.tokenizer.decode(input_ids[i][:-task_element[1] + 1]))\n continuation_tokens.append(input_ids[i][-task_element[1] + 1:].tolist())\n else:\n inputs.append(self.tokenizer.decode(input_ids[i]))\n continuation_tokens.append([])\n \n for runnable_operator_id in model_input_tokens:\n model_input_tokens[runnable_operator_id].extend_batch_size(len(continuation_tokens))\n model_input_tokens[runnable_operator_id].set_inputs(inputs)\n else: \n for runnable_operator_id in model_input_tokens:\n model_input_tokens[runnable_operator_id].extend_batch_size(len(continuation_tokens))\n \n return continuation_tokens\n \n def forward_post_processing(self, logprobs, input_shape):\n \"\"\"Does some small post processing steps to make sure the correct shape is returned for the logprobs.\n\n Args:\n logprobs (torch.tensor): Returned logprobs\n input_shape (torch.tensor): The shape of the input tokens\n \"\"\"\n if self.needs_input_tokens_lm_eval:\n if torch.is_tensor(logprobs) and len(logprobs.shape) == 3 and logprobs.shape[1] != input_shape[1]:\n # set the output to the correct shape, by adding zeros in the beggining in the first axis\n logprobs = torch.cat([torch.zeros((logprobs.shape[0], input_shape[1] - logprobs.shape[1], logprobs.shape[2]), device=logprobs.device), logprobs], dim=1)\n \n return logprobs"
}
] | from transformers import PreTrainedModel
from .basic_model_loader import load_model, load_tokenizer
from .utils import get_max_length, ENABLE_LOGGING, log
from collections import namedtuple
from transformers import top_k_top_p_filtering
from loguru import logger
from .operators import Operator
from .monitor import Monitor
from .runnable_operators import RunnableOperator, PromptedLLM
from .input import TokenizedInput
from .lm_eval_compatibility import Compatibility
import json
import numpy as np
import torch
import os
import time
import random | 10,659 |
class ModelArithmetic(PreTrainedModel):
"""
Main class for prompt arithmetic. Handles the generation of text based on the formula.
"""
SAVE_FILE = "prompt_arithmetic.json"
_supports_sdpa = True
|
class ModelArithmetic(PreTrainedModel):
"""
Main class for prompt arithmetic. Handles the generation of text based on the formula.
"""
SAVE_FILE = "prompt_arithmetic.json"
_supports_sdpa = True
| def __init__(self, formula : Operator, default_model : str = None, dtype=torch.bfloat16, intermediate_argmax : bool = False, epsilon = 1e-12, | 5 | 2023-11-21 20:01:08+00:00 | 12k |
huang-yh/SelfOcc | model/head/nerfacc_head/nerfacc_head.py | [
{
"identifier": "BaseTaskHead",
"path": "model/head/base_head.py",
"snippet": "class BaseTaskHead(BaseModule):\n \"\"\"Segmentation heads.\n image backbone -> neck -> lifter -> encoder -> segmentor\n Predicts semantic labels for voxels (and points for lidar segmentation).\n \"\"\"\n\n def __init__(self, init_cfg=None, **kwargs):\n super().__init__(init_cfg)\n \n def forward(\n self, \n representation,\n points=None,\n **kwargs\n ):\n pass"
},
{
"identifier": "RaySampler",
"path": "model/head/nerfacc_head/ray_sampler.py",
"snippet": "class RaySampler(nn.Module):\n\n def __init__(\n self,\n ray_sample_mode='fixed', # fixed, cellular\n ray_number=[192, 400], # 192 * 400\n ray_img_size=[768, 1600],\n ray_upper_crop=0,\n ray_x_dsr_max=None,\n ray_y_dsr_max=None):\n super().__init__()\n\n self.ray_sample_mode = ray_sample_mode\n self.ray_number = ray_number[0] * ray_number[1]\n self.ray_resize = ray_number\n self.ray_img_size = ray_img_size\n assert ray_sample_mode in ['fixed', 'cellular', 'random'] # TODO\n\n if ray_sample_mode == 'fixed':\n ray_x_dsr = 1.0 * ray_img_size[1] / ray_number[1]\n ray_y_dsr = 1.0 * ray_img_size[0] / ray_number[0]\n ray_x = torch.arange(ray_number[1], dtype=torch.float) * ray_x_dsr\n ray_y = torch.arange(ray_number[0], dtype=torch.float) * ray_y_dsr\n rays = torch.stack([\n ray_x.unsqueeze(0).expand(ray_number[0], -1),\n ray_y.unsqueeze(1).expand(-1, ray_number[1])], dim=-1).flatten(0, 1) # HW, 2\n self.register_buffer('rays', rays, False)\n elif ray_sample_mode == 'cellular':\n self.ray_upper_crop = ray_upper_crop\n self.ray_x_dsr_max = 1.0 * ray_img_size[1] / ray_number[1]\n self.ray_y_dsr_max = 1.0 * (ray_img_size[0] - ray_upper_crop) / ray_number[0]\n if ray_x_dsr_max is not None:\n self.ray_x_dsr_max = ray_x_dsr_max\n if ray_y_dsr_max is not None:\n self.ray_y_dsr_max = ray_y_dsr_max\n assert self.ray_x_dsr_max > 1 and self.ray_y_dsr_max > 1\n ray_x = torch.arange(ray_number[1], dtype=torch.float)\n ray_y = torch.arange(ray_number[0], dtype=torch.float)\n rays = torch.stack([\n ray_x.unsqueeze(0).expand(ray_number[0], -1),\n ray_y.unsqueeze(1).expand(-1, ray_number[1])], dim=-1) # H, W, 2\n self.register_buffer('rays', rays, False)\n\n def forward(self):\n device = self.rays.device\n \n if self.ray_sample_mode == 'fixed':\n return self.rays\n elif self.ray_sample_mode == 'random':\n rays = torch.rand(self.ray_number, 2, device=device)\n rays[:, 0] = rays[:, 0] * self.ray_img_size[1]\n rays[:, 1] = rays[:, 1] * self.ray_img_size[0]\n return rays\n elif self.ray_sample_mode == 'cellular':\n ray_x_dsr = np.random.uniform() * (self.ray_x_dsr_max - 1) + 1\n ray_y_dsr = np.random.uniform() * (self.ray_y_dsr_max - 1) + 1\n ray_x_emp_max = self.ray_img_size[1] - self.ray_resize[1] * ray_x_dsr\n ray_y_emp_max = self.ray_img_size[0] - self.ray_upper_crop - self.ray_resize[0] * ray_y_dsr\n ray_x_emp = np.random.uniform() * ray_x_emp_max\n ray_y_emp = np.random.uniform() * ray_y_emp_max\n rays = self.rays.clone() # H, W, 2\n rays[..., 0] = rays[..., 0] * ray_x_dsr + ray_x_emp\n rays[..., 1] = rays[..., 1] * ray_y_dsr + ray_y_emp + self.ray_upper_crop\n return rays.flatten(0, 1)"
},
{
"identifier": "Img2LiDAR",
"path": "model/head/nerfacc_head/img2lidar.py",
"snippet": "class Img2LiDAR(nn.Module):\n\n def __init__(\n self, \n trans_kw,\n trans_kw_eval=None,\n novel_view=None):\n super().__init__()\n if not isinstance(trans_kw, list):\n trans_kw = [trans_kw]\n self.two_split = False\n else:\n assert trans_kw == ['img2lidar', 'temImg2lidar']\n self.two_split = True\n self.trans_kw = trans_kw\n self.trans_kw_eval = trans_kw if trans_kw_eval is None else trans_kw_eval\n self.novel_view = novel_view\n\n @torch.cuda.amp.autocast(enabled=False)\n def forward(self, metas, rays):\n rays = rays.float()\n # prepare img2lidar\n img2lidar = []\n # for key in self.trans_kw:\n # for img_meta in metas:\n # img2lidar.extend(img_meta[key])\n if os.environ.get('eval', 'false') == 'true':\n trans_kw = self.trans_kw_eval\n else:\n trans_kw = self.trans_kw\n for img_meta in metas:\n temp = []\n for key in trans_kw:\n temp.extend(img_meta[key])\n if isinstance(temp[0], (np.ndarray, list)):\n temp = np.asarray(temp)\n else:\n temp = torch.stack(temp, dim=0)\n img2lidar.append(temp)\n if isinstance(img2lidar[0], np.ndarray):\n img2lidar = np.asarray(img2lidar) # B, N, 4, 4\n img2lidar = rays.new_tensor(img2lidar)\n else:\n img2lidar = torch.stack(img2lidar, dim=0)\n\n if self.novel_view is not None:\n z_r = self.novel_view[3]\n rot_mat = rays.new_tensor(get_rm(z_r, 'z', True))\n img2lidar[..., :3, :3] = rot_mat.unsqueeze(0).unsqueeze(0) @ \\\n img2lidar[..., :3, :3]\n \n origin = img2lidar[..., :3, 3] # B, N, 3\n if self.novel_view is not None:\n origin[..., 0] = origin[..., 0] + self.novel_view[0]\n origin[..., 1] = origin[..., 1] + self.novel_view[1]\n origin[..., 2] = origin[..., 2] + self.novel_view[2]\n\n rays = rays.reshape(1, 1, -1, 2)\n # origin = img2lidar[..., :3, 3] # B, N, 3\n rays_pad = torch.cat([\n rays, torch.ones_like(rays[..., :1])], dim=-1) # 1, 1, HW, 3\n direction = torch.matmul(\n img2lidar[..., :3, :3].unsqueeze(2),\n rays_pad.unsqueeze(-1)).squeeze(-1) # B, N, HW, 3\n return origin, direction"
},
{
"identifier": "BEVNeRF",
"path": "model/head/nerfacc_head/bev_nerf.py",
"snippet": "class BEVNeRF(BaseModule):\n\n def __init__(\n self, \n mapping_args: dict,\n # bev_inner=128,\n # bev_outer=32,\n # range_inner=51.2,\n # range_outer=51.2,\n # nonlinear_mode='linear_upscale',\n # z_inner=20,\n # z_outer=10,\n # z_ranges=[-5.0, 3.0, 11.0],\n\n # mlp decoder \n embed_dims=128,\n color_dims=0,\n sem_dims=0,\n density_layers=2,\n\n sh_deg=2,\n sh_act='relu',\n tpv=False,\n init_cfg=None):\n\n super().__init__(init_cfg)\n \n self.mapping = GridMeterMapping(\n # bev_inner,\n # bev_outer,\n # range_inner,\n # range_outer,\n # nonlinear_mode,\n # z_inner,\n # z_outer,\n # z_ranges\n **mapping_args)\n \n self.embed_dims = embed_dims\n self.color_dims = color_dims\n self.sem_dims = sem_dims\n self.z_size = self.mapping.size_d\n self.h_size = self.mapping.size_h\n self.w_size = self.mapping.size_w\n self.bev_size = [self.mapping.size_h, self.mapping.size_w]\n self.density_layers = density_layers\n self.tpv = tpv\n self._init_layers()\n\n self.color_converter = SHRender\n self.sh_deg = sh_deg\n self.sh_act = sh_act\n self.density_color = None\n \n def _init_layers(self):\n density_net = []\n for i in range(self.density_layers - 1):\n density_net.extend([nn.Softplus(), nn.Linear(self.embed_dims, self.embed_dims)])\n if not self.tpv:\n density_net.extend([nn.Softplus(), nn.Linear(self.embed_dims, (1 + self.color_dims+ self.sem_dims) * self.z_size)])\n else:\n density_net.extend([nn.Softplus(), nn.Linear(self.embed_dims, 1 + self.color_dims + self.sem_dims)])\n density_net = nn.Sequential(*density_net)\n self.density_net = density_net\n\n @torch.cuda.amp.autocast(enabled=False)\n def pre_compute_density_color(self, bev):\n if not self.tpv:\n assert bev.dim() == 3\n # bev = bev.unflatten(1, (self.bev_size, self.bev_size))\n bev = bev.unflatten(1, self.bev_size)\n density_color = self.density_net(bev).reshape(*bev.shape[:-1], self.z_size, -1)\n density_color = density_color.permute(0, 4, 1, 2, 3) # bs, C, h, w, d\n else:\n tpv_hw, tpv_zh, tpv_wz = bev\n tpv_hw = tpv_hw.reshape(-1, self.h_size, self.w_size, 1, self.embed_dims)\n tpv_hw = tpv_hw.expand(-1, -1, -1, self.z_size, -1)\n\n tpv_zh = tpv_zh.reshape(-1, self.z_size, self.h_size, 1, self.embed_dims).permute(0, 2, 3, 1, 4)\n tpv_zh = tpv_zh.expand(-1, -1, self.w_size, -1, -1)\n\n tpv_wz = tpv_wz.reshape(-1, self.w_size, self.z_size, 1, self.embed_dims).permute(0, 3, 1, 2, 4)\n tpv_wz = tpv_wz.expand(-1, self.h_size, -1, -1, -1)\n\n tpv = tpv_hw + tpv_zh + tpv_wz\n density_color = self.density_net(tpv).permute(0, 4, 1, 2, 3)\n\n self.density_color = density_color\n # print(f'type of self.density_color: {self.density_color.dtype}')\n\n @torch.cuda.amp.autocast(enabled=False)\n def query_density(self, x):\n if self.density_color.dtype == torch.float16:\n x = x.half()\n grid = self.mapping.meter2grid(x, True)\n\n # grid[..., :2] = grid[..., :2] / (self.bev_size - 1)\n # grid[..., 2:] = grid[..., 2:] / (self.z_size - 1)\n grid = 2 * grid - 1\n grid = grid.reshape(1, -1, 1, 1, 3)\n\n density_color = F.grid_sample(\n self.density_color,\n grid[..., [2, 1, 0]],\n mode='bilinear',\n align_corners=True) # bs, c, n, 1, 1 \n \n density_color = density_color.permute(0, 2, 3, 4, 1).flatten(0, 3) # bs*n, c\n sigma = density_color[:, :1]\n return F.softplus(sigma) # F.relu(sigma)\n\n @torch.cuda.amp.autocast(enabled=False)\n def forward(self, x, condition=None):\n if self.density_color.dtype == torch.float16:\n x = x.half()\n condition = condition.half() if condition is not None else None\n\n grid = self.mapping.meter2grid(x, True)\n\n # grid[..., :2] = grid[..., :2] / (self.bev_size - 1)\n # grid[..., 2:] = grid[..., 2:] / (self.z_size - 1)\n grid = 2 * grid - 1\n grid = grid.reshape(1, -1, 1, 1, 3)\n\n density_color = F.grid_sample(\n self.density_color,\n grid[..., [2, 1, 0]],\n mode='bilinear',\n align_corners=True) # bs, c, n, 1, 1 \n \n density_color = density_color.permute(0, 2, 3, 4, 1).flatten(0, 3) # bs*n, c\n sigma, sample_colors, sample_sems = density_color[:, :1], \\\n density_color[:, 1:(1+self.color_dims)], density_color[:, (1+self.color_dims):]\n if self.color_dims > 0:\n sample_colors = self.color_converter(\n None, condition, sample_colors, self.sh_deg, self.sh_act)\n rgb = sample_colors.reshape(-1, 3)\n else:\n rgb = torch.empty((sigma.shape[0], 0), device=sigma.device, dtype=sigma.dtype)\n if self.sem_dims > 0:\n sems = torch.softmax(sample_sems, dim=-1)\n else:\n sems = torch.empty((sigma.shape[0], 0), device=sigma.device, dtype=sigma.dtype)\n return rgb, F.softplus(sigma), sems # F.relu(sigma)\n\n\n @torch.cuda.amp.autocast(enabled=False)\n def forward_geo(self, x):\n if self.density_color.dtype == torch.float16:\n x = x.half()\n\n grid = self.mapping.meter2grid(x, True)\n grid = 2 * grid - 1\n grid = grid.reshape(1, -1, 1, 1, 3)\n\n density_color = F.grid_sample(\n self.density_color,\n grid[..., [2, 1, 0]],\n mode='bilinear',\n align_corners=True) # bs, c, n, 1, 1 \n \n density_color = density_color.permute(0, 2, 3, 4, 1).flatten(0, 3) # bs*n, c\n sigma, sample_sems = density_color[:, :1], density_color[:, (1+self.color_dims):]\n if self.sem_dims > 0:\n sems = torch.softmax(sample_sems, dim=-1)\n else:\n sems = torch.empty((sigma.shape[0], 0), device=sigma.device, dtype=sigma.dtype)\n return F.softplus(sigma), sems # F.relu(sigma)"
},
{
"identifier": "custom_rendering",
"path": "model/head/nerfacc_head/rendering.py",
"snippet": "def custom_rendering(\n # ray marching results\n t_starts: Tensor,\n t_ends: Tensor,\n ray_indices: Optional[Tensor] = None,\n n_rays: Optional[int] = None,\n # radiance field\n rgb_sigma_fn: Optional[Callable] = None,\n rgb_alpha_fn: Optional[Callable] = None,\n # rendering options\n render_bkgd: Optional[Tensor] = None,\n) -> Tuple[Tensor, Tensor, Tensor, Dict]:\n \"\"\"Render the rays through the radience field defined by `rgb_sigma_fn`.\n\n This function is differentiable to the outputs of `rgb_sigma_fn` so it can\n be used for gradient-based optimization. It supports both batched and flattened input tensor.\n For flattened input tensor, both `ray_indices` and `n_rays` should be provided.\n\n\n Note:\n Either `rgb_sigma_fn` or `rgb_alpha_fn` should be provided.\n\n Warning:\n This function is not differentiable to `t_starts`, `t_ends` and `ray_indices`.\n\n Args:\n t_starts: Per-sample start distance. Tensor with shape (n_rays, n_samples) or (all_samples,).\n t_ends: Per-sample end distance. Tensor with shape (n_rays, n_samples) or (all_samples,).\n ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples).\n n_rays: Number of rays. Only useful when `ray_indices` is provided.\n rgb_sigma_fn: A function that takes in samples {t_starts, t_ends,\n ray indices} and returns the post-activation rgb (..., 3) and density\n values (...,). The shape `...` is the same as the shape of `t_starts`.\n rgb_alpha_fn: A function that takes in samples {t_starts, t_ends,\n ray indices} and returns the post-activation rgb (..., 3) and opacity\n values (...,). The shape `...` is the same as the shape of `t_starts`.\n render_bkgd: Background color. Tensor with shape (3,).\n\n Returns:\n Ray colors (n_rays, 3), opacities (n_rays, 1), depths (n_rays, 1) and a dict\n containing extra intermediate results (e.g., \"weights\", \"trans\", \"alphas\")\n\n Examples:\n\n .. code-block:: python\n\n >>> t_starts = torch.tensor([0.1, 0.2, 0.1, 0.2, 0.3], device=\"cuda:0\")\n >>> t_ends = torch.tensor([0.2, 0.3, 0.2, 0.3, 0.4], device=\"cuda:0\")\n >>> ray_indices = torch.tensor([0, 0, 1, 1, 1], device=\"cuda:0\")\n >>> def rgb_sigma_fn(t_starts, t_ends, ray_indices):\n >>> # This is a dummy function that returns random values.\n >>> rgbs = torch.rand((t_starts.shape[0], 3), device=\"cuda:0\")\n >>> sigmas = torch.rand((t_starts.shape[0],), device=\"cuda:0\")\n >>> return rgbs, sigmas\n >>> colors, opacities, depths, extras = rendering(\n >>> t_starts, t_ends, ray_indices, n_rays=2, rgb_sigma_fn=rgb_sigma_fn)\n >>> print(colors.shape, opacities.shape, depths.shape)\n torch.Size([2, 3]) torch.Size([2, 1]) torch.Size([2, 1])\n >>> extras.keys()\n dict_keys(['weights', 'alphas', 'trans'])\n\n \"\"\"\n if ray_indices is not None:\n assert (\n t_starts.shape == t_ends.shape == ray_indices.shape\n ), \"Since nerfacc 0.5.0, t_starts, t_ends and ray_indices must have the same shape (N,). \"\n\n if rgb_sigma_fn is None and rgb_alpha_fn is None:\n raise ValueError(\n \"At least one of `rgb_sigma_fn` and `rgb_alpha_fn` should be specified.\"\n )\n\n # Query sigma/alpha and color with gradients\n if rgb_sigma_fn is not None:\n if t_starts.shape[0] != 0:\n rgbs, sigmas, sems = rgb_sigma_fn(t_starts, t_ends, ray_indices)\n else:\n rgbs = torch.empty((0, 3), device=t_starts.device)\n sigmas = torch.empty((0,), device=t_starts.device)\n sems = torch.empty((0, 3), device=t_starts.device)\n assert rgbs.shape[-1] == 3 or rgbs.shape[-1] == 0, \"rgbs must have 3 channels, got {}\".format(\n rgbs.shape\n )\n assert (\n sigmas.shape == t_starts.shape\n ), \"sigmas must have shape of (N,)! Got {}\".format(sigmas.shape)\n # Rendering: compute weights.\n weights, trans, alphas = render_weight_from_density(\n t_starts,\n t_ends,\n sigmas,\n ray_indices=ray_indices,\n n_rays=n_rays,\n )\n extras = {\n \"weights\": weights,\n \"alphas\": alphas,\n \"trans\": trans,\n \"sigmas\": sigmas,\n \"rgbs\": rgbs,\n 'sample_sems': sems\n }\n elif rgb_alpha_fn is not None:\n if t_starts.shape[0] != 0:\n rgbs, alphas = rgb_alpha_fn(t_starts, t_ends, ray_indices)\n else:\n rgbs = torch.empty((0, 3), device=t_starts.device)\n alphas = torch.empty((0,), device=t_starts.device)\n assert rgbs.shape[-1] == 3, \"rgbs must have 3 channels, got {}\".format(\n rgbs.shape\n )\n assert (\n alphas.shape == t_starts.shape\n ), \"alphas must have shape of (N,)! Got {}\".format(alphas.shape)\n # Rendering: compute weights.\n weights, trans = render_weight_from_alpha(\n alphas,\n ray_indices=ray_indices,\n n_rays=n_rays,\n )\n extras = {\n \"weights\": weights,\n \"trans\": trans,\n \"rgbs\": rgbs,\n \"alphas\": alphas,\n }\n\n # Rendering: accumulate rgbs, opacities, and depths along the rays.\n colors = accumulate_along_rays(\n weights, values=rgbs, ray_indices=ray_indices, n_rays=n_rays\n )\n sem = accumulate_along_rays(\n weights, values=sems, ray_indices=ray_indices, n_rays=n_rays\n )\n opacities = accumulate_along_rays(\n weights, values=None, ray_indices=ray_indices, n_rays=n_rays\n )\n depths = accumulate_along_rays(\n weights,\n values=(t_starts + t_ends)[..., None] / 2.0,\n ray_indices=ray_indices,\n n_rays=n_rays,\n )\n depths = depths / opacities.clamp_min(torch.finfo(rgbs.dtype).eps)\n\n # opacity_on_ray = torch.gather(opacities.squeeze(-1), dim=0, index=ray_indices)\n ts = (t_starts + t_ends) / 2.0 #/ opacity_on_ray.clamp_min(torch.finfo(rgbs.dtype).eps)\n extras.update({\"ts\": ts})\n\n # Background composition.\n if render_bkgd is not None and colors.shape[1] > 0:\n if render_bkgd == 'random':\n render_bkgd = torch.rand_like(colors).to(colors.device)\n colors = colors + render_bkgd * (1.0 - opacities)\n\n return colors, opacities, depths, sem, extras"
},
{
"identifier": "CustomOccGridEstimator",
"path": "model/head/nerfacc_head/estimator.py",
"snippet": "class CustomOccGridEstimator(OccGridEstimator):\n\n @torch.no_grad()\n def sampling(\n self,\n # rays\n rays_o: Tensor, # [n_rays, 3]\n rays_d: Tensor, # [n_rays, 3]\n # sigma/alpha function for skipping invisible space\n sigma_fn: Optional[Callable] = None,\n alpha_fn: Optional[Callable] = None,\n near_plane: float = 0.0,\n far_plane: float = 1e10,\n t_min: Optional[Tensor] = None, # [n_rays]\n t_max: Optional[Tensor] = None, # [n_rays]\n # rendering options\n render_step_size: float = 1e-3,\n early_stop_eps: float = 1e-4,\n alpha_thre: float = 0.0,\n stratified: bool = False,\n cone_angle: float = 0.0,\n ) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Sampling with spatial skipping.\n\n Note:\n This function is not differentiable to any inputs.\n\n Args:\n rays_o: Ray origins of shape (n_rays, 3).\n rays_d: Normalized ray directions of shape (n_rays, 3).\n sigma_fn: Optional. If provided, the marching will skip the invisible space\n by evaluating the density along the ray with `sigma_fn`. It should be a\n function that takes in samples {t_starts (N,), t_ends (N,),\n ray indices (N,)} and returns the post-activation density values (N,).\n You should only provide either `sigma_fn` or `alpha_fn`.\n alpha_fn: Optional. If provided, the marching will skip the invisible space\n by evaluating the density along the ray with `alpha_fn`. It should be a\n function that takes in samples {t_starts (N,), t_ends (N,),\n ray indices (N,)} and returns the post-activation opacity values (N,).\n You should only provide either `sigma_fn` or `alpha_fn`.\n near_plane: Optional. Near plane distance. Default: 0.0.\n far_plane: Optional. Far plane distance. Default: 1e10.\n t_min: Optional. Per-ray minimum distance. Tensor with shape (n_rays).\n If profided, the marching will start from maximum of t_min and near_plane.\n t_max: Optional. Per-ray maximum distance. Tensor with shape (n_rays).\n If profided, the marching will stop by minimum of t_max and far_plane.\n render_step_size: Step size for marching. Default: 1e-3.\n early_stop_eps: Early stop threshold for skipping invisible space. Default: 1e-4.\n alpha_thre: Alpha threshold for skipping empty space. Default: 0.0.\n stratified: Whether to use stratified sampling. Default: False.\n cone_angle: Cone angle for linearly-increased step size. 0. means\n constant step size. Default: 0.0.\n\n Returns:\n A tuple of {LongTensor, Tensor, Tensor}:\n\n - **ray_indices**: Ray index of each sample. IntTensor with shape (n_samples).\n - **t_starts**: Per-sample start distance. Tensor with shape (n_samples,).\n - **t_ends**: Per-sample end distance. Tensor with shape (n_samples,).\n\n Examples:\n\n .. code-block:: python\n\n >>> ray_indices, t_starts, t_ends = grid.sampling(\n >>> rays_o, rays_d, render_step_size=1e-3)\n >>> t_mid = (t_starts + t_ends) / 2.0\n >>> sample_locs = rays_o[ray_indices] + t_mid * rays_d[ray_indices]\n\n \"\"\"\n\n near_planes = torch.full_like(rays_o[..., 0], fill_value=near_plane)\n far_planes = torch.full_like(rays_o[..., 0], fill_value=far_plane)\n\n if t_min is not None:\n near_planes = torch.clamp(near_planes, min=t_min)\n if t_max is not None:\n far_planes = torch.clamp(far_planes, max=t_max)\n\n if stratified:\n near_planes += torch.rand_like(near_planes) * render_step_size\n intervals, samples, _ = traverse_grids(\n rays_o,\n rays_d,\n self.binaries,\n self.aabbs,\n near_planes=near_planes,\n far_planes=far_planes,\n step_size=render_step_size,\n cone_angle=cone_angle,\n )\n t_starts = intervals.vals[intervals.is_left]\n t_ends = intervals.vals[intervals.is_right]\n ray_indices = samples.ray_indices\n packed_info = samples.packed_info\n\n # # skip invisible space\n # if (alpha_thre > 0.0 or early_stop_eps > 0.0) and (\n # sigma_fn is not None or alpha_fn is not None\n # ):\n # alpha_thre = min(alpha_thre, self.occs.mean().item())\n\n # # Compute visibility of the samples, and filter out invisible samples\n # if sigma_fn is not None:\n # if t_starts.shape[0] != 0:\n # sigmas = sigma_fn(t_starts, t_ends, ray_indices)\n # else:\n # sigmas = torch.empty((0,), device=t_starts.device)\n # assert (\n # sigmas.shape == t_starts.shape\n # ), \"sigmas must have shape of (N,)! Got {}\".format(sigmas.shape)\n # masks = render_visibility_from_density(\n # t_starts=t_starts,\n # t_ends=t_ends,\n # sigmas=sigmas,\n # packed_info=packed_info,\n # early_stop_eps=early_stop_eps,\n # alpha_thre=alpha_thre,\n # )\n # elif alpha_fn is not None:\n # if t_starts.shape[0] != 0:\n # alphas = alpha_fn(t_starts, t_ends, ray_indices)\n # else:\n # alphas = torch.empty((0,), device=t_starts.device)\n # assert (\n # alphas.shape == t_starts.shape\n # ), \"alphas must have shape of (N,)! Got {}\".format(alphas.shape)\n # masks = render_visibility_from_alpha(\n # alphas=alphas,\n # packed_info=packed_info,\n # early_stop_eps=early_stop_eps,\n # alpha_thre=alpha_thre,\n # )\n # ray_indices, t_starts, t_ends = (\n # ray_indices[masks],\n # t_starts[masks],\n # t_ends[masks],\n # )\n return ray_indices, t_starts, t_ends"
}
] | import os
import nerfacc, torch, collections, math
from ..base_head import BaseTaskHead
from .ray_sampler import RaySampler
from .img2lidar import Img2LiDAR
from .bev_nerf import BEVNeRF
from .rendering import custom_rendering
from .estimator import CustomOccGridEstimator
from mmseg.models import HEADS
from mmengine.logging import MMLogger | 7,637 | logger = MMLogger.get_instance('selfocc')
def namedtuple_map(fn, tup):
"""Apply `fn` to each element of `tup` and cast to `tup`'s namedtuple."""
return type(tup)(*(None if x is None else fn(x) for x in tup))
Rays = collections.namedtuple("Rays", ("origins", "viewdirs"))
OCC_THRE = 0.0
@HEADS.register_module()
| logger = MMLogger.get_instance('selfocc')
def namedtuple_map(fn, tup):
"""Apply `fn` to each element of `tup` and cast to `tup`'s namedtuple."""
return type(tup)(*(None if x is None else fn(x) for x in tup))
Rays = collections.namedtuple("Rays", ("origins", "viewdirs"))
OCC_THRE = 0.0
@HEADS.register_module() | class NeRFAccHead(BaseTaskHead): | 0 | 2023-11-20 12:49:14+00:00 | 12k |
togethercomputer/stripedhyena | src/model.py | [
{
"identifier": "InferenceParams",
"path": "src/cache.py",
"snippet": "class InferenceParams:\n \"\"\"Inference parameters that are passed to the main model in order\n to efficienly calculate and store the context during inference.\"\"\"\n\n max_seqlen: int\n max_batch_size: int\n seqlen_offset: int = 0\n batch_size_offset: int = 0\n key_value_memory_dict: dict = field(default_factory=dict)\n lengths_per_sample: Optional[Tensor] = None\n\n def reset(self, max_seqlen, max_batch_size):\n self.max_seqlen = max_seqlen\n self.max_batch_size = max_batch_size\n self.seqlen_offset = 0\n if self.lengths_per_sample is not None:\n self.lengths_per_sample.zero_()"
},
{
"identifier": "RecurrentInferenceParams",
"path": "src/cache.py",
"snippet": "class RecurrentInferenceParams:\n \"\"\"Inference parameters passed to blocks with recurrent mode.\"\"\"\n\n fir_filter_length: int = 3\n state_dim: int = 16\n # seqlen_offset not used\n seqlen_offset: int = 0\n fir_state_dict: dict = field(default_factory=dict)\n state_dict: dict = field(default_factory=dict)\n\n def reset(self):\n self.fir_filter_length = 3\n self.state_dim = 16\n self.seqlen_offset = 0"
},
{
"identifier": "HyenaInferenceEngine",
"path": "src/engine.py",
"snippet": "class HyenaInferenceEngine:\n def __init__(\n self,\n fir_fn=None,\n iir_prefill_style=\"modal-fft\",\n layer_idx=None,\n ) -> None:\n self.fir_fn = fir_fn\n assert iir_prefill_style in IIR_PREFILL_MODES, f\"iir_prefill_style must be one of {IIR_PREFILL_MODES}\"\n self.iir_prefill_style = iir_prefill_style\n self.layer_idx = layer_idx\n self.low_mem_mode = False\n\n def parallel_fir(\n self,\n fir_fn,\n u,\n weight,\n bias,\n L,\n fir_length=3,\n inference_params=None,\n prefill_mode=None,\n padding_mask=None,\n ):\n \"\"\"Compute the output state of the long convolutional filter.\"\"\"\n # prepare input layout, dimensions and dispatch to fir kernel\n if fir_fn != torch.nn.functional.conv1d:\n z_pre = fir_fn(u)[:, :L] # B, L, D\n z_pre = z_pre.permute(0, 2, 1)\n else:\n u = u.permute(0, 2, 1) # B, D, L\n z_pre = fir_fn(\n u,\n weight,\n bias,\n stride=1,\n padding=fir_length - 1,\n groups=u.shape[1],\n )[..., :L]\n\n # handle padding post fir, the only place with biases\n if type(padding_mask) == torch.Tensor:\n z_pre = z_pre * padding_mask[:, None]\n\n if inference_params is not None:\n # handle seqlen last and dim last cases for `u`\n if fir_fn != torch.nn.functional.conv1d:\n fir_state = u[:, -fir_length + 1 :].permute(0, 2, 1)\n else:\n fir_state = u[..., -fir_length + 1 :]\n else:\n fir_state = None\n\n return z_pre, fir_state\n\n def parallel_iir(\n self,\n z_pre,\n h,\n D,\n L,\n poles,\n residues,\n t,\n dims,\n layer_idx,\n inference_params=None,\n prefill_style=\"fft\",\n fftconv_fn=None,\n padding_mask=None,\n use_flashfft=False,\n column_split_hyena=False,\n long_fir_threshold=None,\n ):\n \"\"\"Compute the output state of the short convolutional filter.\"\"\"\n fft_size = 2 * L\n hidden_size, num_attention_heads, hidden_size_per_attention_head, _, _ = dims\n # Compatibility with training infra that column splits the projections\n if column_split_hyena:\n z = z_pre.reshape(\n z_pre.shape[0],\n num_attention_heads,\n 3 * hidden_size_per_attention_head,\n z_pre.shape[2],\n )\n x2, x1, v = (\n z[:, :, :hidden_size_per_attention_head],\n z[\n :,\n :,\n hidden_size_per_attention_head : 2 * hidden_size_per_attention_head,\n ],\n z[:, :, 2 * hidden_size_per_attention_head :],\n )\n x2, x1, v = (\n x2.reshape(x2.shape[0], -1, x2.shape[-1]),\n x1.reshape(x1.shape[0], -1, x1.shape[-1]),\n v.reshape(v.shape[0], -1, v.shape[-1]),\n )\n else:\n x2, x1, v = z_pre.split([hidden_size, hidden_size, hidden_size], dim=1)\n\n x1v = x1 * v\n\n if inference_params is not None and prefill_style == \"recurrence\":\n y = self.prefill_via_direct_recurrence(\n inference_params=inference_params,\n x1v=x1v,\n L=L,\n poles=poles,\n residues=residues,\n )\n\n else:\n if use_flashfft and (L % 2) == 0: # only works with even L\n y = fftconv_fn(\n x1v.to(dtype=torch.bfloat16).contiguous(),\n h.to(dtype=torch.float32),\n )\n X_s = None\n\n elif long_fir_threshold is None:\n H = torch.fft.rfft(h.to(dtype=torch.float32), n=fft_size) / fft_size\n X_s = torch.fft.fft(x1v.to(dtype=torch.float32), n=fft_size)\n X = X_s[..., : H.shape[-1]]\n if len(z_pre.shape) > 3:\n H = H.unsqueeze(1)\n y = torch.fft.irfft(X * H, n=fft_size, norm=\"forward\")[..., :L]\n\n else:\n assert h.shape[0] == 1, \"batch size must be 1 for long_fir_threshold\"\n h = h[0][:, None] # rearrange to d, 1, l for depthwise conv1d\n h = h[..., :long_fir_threshold]\n y = F.conv1d(\n x1v,\n h.to(dtype=x1v.dtype),\n stride=1,\n groups=x1v.shape[1],\n padding=h.shape[-1] - 1,\n )[..., :L]\n\n y = y.to(dtype=x1v.dtype)\n y = (y + x1v * D.unsqueeze(-1)) * x2\n\n if inference_params is not None:\n if prefill_style == \"fft\":\n self.prefill_via_modal_fft(\n inference_params=inference_params,\n x1v=x1v,\n X_s=X_s,\n L=L,\n t=t,\n poles=poles,\n dims=dims,\n layer_idx=layer_idx,\n use_flashfft=use_flashfft,\n fftconv_fn=fftconv_fn,\n )\n\n elif prefill_style == \"recurrence\":\n # recurrent prefill is done before\n pass\n else:\n raise NotImplementedError\n if self.low_mem_mode:\n # TODO: smarter gc\n del z_pre, x2, x1, v, x1v, h, poles, residues\n torch.cuda.empty_cache()\n\n return y.permute(0, 2, 1)\n\n def step_fir(self, u, fir_state, weight, bias=None):\n \"\"\"Step the FIR filter.\n\n Note:\n `fir_state` contains the last `short_filter_length - 1` elements of `u`: `u_(L-2), u_{L-1), ...`\n We assume dimensions of `short_filter_weight` to be `[d, 1, short_filter_len]` (SISO / multi SISO layout).\n \"\"\"\n h0, h = weight[..., 0, -1], weight[..., 0, :-1]\n h0, h = h0[None], h[None]\n y = h0 * u + torch.sum(fir_state * h, dim=-1) + bias\n\n # update\n fir_state = torch.roll(fir_state, -1, dims=2)\n fir_state[..., -1] = u\n return y, fir_state\n\n def step_iir(self, x2, x1, v, D, residues, poles, iir_state, iir_groups=1):\n x1v = x1 * v\n\n residues, poles = (\n torch.view_as_complex(residues.to(torch.float32)),\n torch.view_as_complex(poles.to(torch.float32)),\n )\n # squeeze the dummy seqlen dimension\n # D, state_dim, 1 -> 1, D, state_dim\n residues, poles = residues[..., 0][None], poles[..., 0][None]\n iir_state = poles * iir_state + x1v[..., None]\n\n res_state = torch.sum(residues * iir_state, dim=-1).real\n\n if iir_groups > 1:\n raise NotImplementedError\n y = x2 * (res_state + D * x1v)\n\n return y, iir_state\n\n def prefill_via_fir_caching(self, u, inference_params, L, *args, **kwargs):\n \"\"\"Turns the IIR filter into a FIR and uses a cache for decoding.\"\"\"\n raise NotImplementedError(\":)\")\n\n def prefill_via_direct_recurrence(\n self, inference_params, x1v, L, residues, poles, *args, **kwargs\n ) -> torch.Tensor:\n \"\"\"\n Compute the IIR state via explicit SSM recurrence (modal form)\n\n This is the most memory efficient prefilling method for Hyena filters.\n\n Note:\n dtypes: [state: float32, poles: float32, x1v: bfloat16, output: bfloat16]\n \"\"\"\n state_dim = poles.shape[1]\n x1v_ = x1v[..., None, None] # b, d, l, sdim, reim\n x1v_ = x1v_.repeat(1, 1, 1, state_dim, 2) # b, d, l, sdim, reim\n x1v_[..., 1] = 0\n\n state = 0 * x1v_[:, :, 0]\n output = 0 * x1v_[:, :, :, 0, 0] # b, d, l\n\n # suppress dummy seqlen dimension\n poles = poles[:, :, 0][None]\n residues = residues[:, :, 0][None].repeat(x1v_.shape[0], 1, 1, 1) # b, d, sdim, reim\n\n # state: b, d, sdim, reim\n # poles: 1, d, sdim, reim\n # x1v_: b, d, l, sdim, reim\n for i in range(L):\n state[..., 0] = poles[..., 0] * state[..., 0] - poles[..., 1] * state[..., 1] + x1v_[:, :, i, :, 0]\n state[..., 1] = poles[..., 0] * state[..., 1] + poles[..., 1] * state[..., 0] + x1v_[:, :, i, :, 1] \n output[:, :, i] = torch.sum(residues * state, dim=-2)[..., 0] # .real\n \n inference_params.state_dict[self.layer_idx] = torch.view_as_complex(state.to(dtype=torch.float32))\n\n return output\n\n def prefill_via_hybrid_recurrence(self, inference_params, u, log_poles, x1v_f_a, L, *args, **kwargs):\n \"\"\"\n Compute the IIR state via hybrid recurrence-convolution over blocks\n \"\"\"\n raise NotImplementedError(\":)\")\n\n def prefill_via_scan(self, u, inference_params=None, *args, **kwargs):\n raise NotImplementedError\n\n def prefill_via_canonical_fft(self, u, inference_params=None, *args, **kwargs):\n \"\"\"\n Compute the IIR state via a single FFT with the denominator of the SSM in companion form.\n\n This is the most memory efficient \"parallelized\" prefilling method for Hyena.\n\n From: https://arxiv.org/abs/2310.18780\n \"\"\"\n raise NotImplementedError(\":)\")\n\n def prefill_via_modal_fft(\n self,\n inference_params,\n x1v,\n L,\n poles,\n t,\n dims,\n layer_idx,\n X_s=None,\n use_flashfft=False,\n fftconv_fn=None,\n state_dtype=torch.complex64,\n *args,\n **kwargs,\n ):\n \"\"\"\n Compute the IIR state via a single FFT, using the poles of the SSM in modal form.\n \"\"\"\n # When the model has a long convolution derived from a SSM in modal form and prefill_style is \"fft\",\n # we split the filter into poles and residues and reuse FFT computation on the input.\n # This optimization is currently not supported when using flashfftconv.\n hidden_size, _, _, state_size, hyena_filter_groups = dims\n\n if use_flashfft:\n # using real states\n poles = poles.squeeze().reshape(poles.shape[0], -1)[..., None]\n\n state_s = poles**t\n if hyena_filter_groups > 1:\n raise NotImplementedError\n\n x1v = x1v[:, :, None].repeat(1, 1, 2 * state_size, 1)\n x1v = x1v.reshape(x1v.shape[0], -1, x1v.shape[-1])\n state_s = state_s[None]\n\n state = fftconv_fn(\n x1v.contiguous(),\n state_s.to(dtype=torch.float32),\n )\n state = state[..., L - 1].reshape(x1v.shape[0], hidden_size, state_size, 2)\n state = torch.view_as_complex(state.contiguous().to(dtype=torch.float32))\n inference_params.state_dict[self.layer_idx] = state\n else:\n assert X_s is not None\n bs = x1v.shape[0]\n fft_size = 2 * L\n poles = torch.view_as_complex(poles.to(torch.float32))\n state_s = poles**t\n state_S = torch.fft.fft(state_s, n=fft_size).repeat(bs, 1, 1, 1) # B, D, state_dim, 2 * L\n if hyena_filter_groups > 1:\n state_S = state_S.repeat_interleave(hidden_size // hyena_filter_groups, 1)\n state = torch.fft.ifft(X_s[..., None, :] * state_S, n=fft_size)\n inference_params.state_dict[layer_idx] = state[..., L - 1].to(dtype=state_dtype)\n\n def _compute_state(self, log_poles, u, t, L, *args, **kwargs):\n \"\"\"\n Compute the IIR state given an input `u` and log_poles of the modal system.\n \"\"\"\n bs = u.shape[0]\n fft_size = 2 * L\n U = torch.fft.rfft(u.to(torch.float32), n=fft_size)\n fft_size = 2 * L\n x = (log_poles * t).exp()\n # [batch, hidden_size, state_dim, 2 * seqlen]\n X = torch.fft.fft(x, n=fft_size).repeat(bs, 1, 1, 1)\n state = torch.fft.ifft(U[..., None, :] * X, n=fft_size)[..., :L]\n return state"
},
{
"identifier": "ParallelGatedMLP",
"path": "src/layers.py",
"snippet": "class ParallelGatedMLP(nn.Module):\n def __init__(\n self,\n config,\n ):\n super().__init__()\n\n multiple_of = config.get(\"inner_size_multiple_of\", 64)\n self.act = F.silu\n self.multiple_of = multiple_of * config.model_parallel_size\n\n inner_size = int(2 * config.hidden_size * 4 / 3)\n inner_size = self.multiple_of * ((inner_size + self.multiple_of - 1) // self.multiple_of)\n if config.get(\"inner_mlp_size\", None) is not None:\n inner_size = config.inner_mlp_size\n\n self.l1 = nn.Linear(\n in_features=config.hidden_size,\n out_features=inner_size,\n bias=False,\n )\n self.l2 = nn.Linear(\n in_features=config.hidden_size,\n out_features=inner_size,\n bias=False,\n )\n self.l3 = nn.Linear(\n in_features=inner_size,\n out_features=config.hidden_size,\n bias=False,\n )\n\n def forward(self, z):\n z1, z2 = self.l1(z), self.l2(z)\n z1, z2 = grab_first_if_tuple(z1), grab_first_if_tuple(z2)\n y = self.l3(self.act(z1) * z2)\n return grab_first_if_tuple(y)"
},
{
"identifier": "RMSNorm",
"path": "src/layers.py",
"snippet": "class RMSNorm(torch.nn.Module):\n def __init__(self, config):\n super(RMSNorm, self).__init__()\n self.eps, self.hidden_size = config.eps, config.hidden_size\n self.scale = torch.nn.Parameter(torch.ones(self.hidden_size))\n self.register_parameter(\"scale\", self.scale)\n self.scale = self.scale.to(config.params_dtype)\n self.use_flash_rmsnorm = config.get(\"use_flash_rmsnorm\", False)\n\n if self.use_flash_rmsnorm:\n from flash_attn.ops.rms_norm import rms_norm as rmsnorm_func\n\n self.rmsnorm_func = rmsnorm_func\n\n def forward(self, x):\n if self.use_flash_rmsnorm:\n return self.rmsnorm_func(x, self.scale, self.eps)\n else:\n y = x / (x.norm(2, dim=-1, keepdim=True) * self.hidden_size ** (-1.0 / 2) + self.eps)\n return self.scale * y"
},
{
"identifier": "VocabParallelEmbedding",
"path": "src/layers.py",
"snippet": "class VocabParallelEmbedding(nn.Embedding):\n \"Adapted from https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/modules/embedding.py\"\n\n def __init__(self, config):\n vocab_size, process_group, padding_idx = (\n config.vocab_size,\n config.get(\"process_group\", None),\n config.get(\"padding_idx\", None),\n )\n self.process_group = process_group\n if process_group is not None:\n world_size = torch.distributed.get_world_size(process_group)\n if vocab_size % world_size != 0:\n raise ValueError(f\"vocab_size ({vocab_size}) must be divisible by \" f\"world_size ({world_size})\")\n if world_size > 1 and padding_idx is not None:\n raise RuntimeError(\"ParallelEmbedding does not support padding_idx\")\n else:\n world_size = 1\n super().__init__(\n vocab_size // world_size,\n embedding_dim=config.hidden_size,\n padding_idx=padding_idx,\n )\n\n def embed(self, input: Tensor) -> Tensor:\n if self.process_group is None:\n return self.forward(input)\n else:\n rank = torch.distributed.get_rank(self.process_group)\n vocab_size = self.num_embeddings\n vocab_start_index, vocab_end_index = (\n rank * vocab_size,\n (rank + 1) * vocab_size,\n )\n # Create a mask of valid vocab ids (1 means it needs to be masked).\n input_ids_mask = (input < vocab_start_index) | (input >= vocab_end_index)\n input = input - vocab_start_index\n input[input_ids_mask] = 0\n embeddings = self.forward(input)\n embeddings[input_ids_mask] = 0.0\n # Reduce to the global process group\n torch.distributed.all_reduce(embeddings, group=self.process_group)\n return embeddings\n\n def unembed(self, u: Tensor) -> Tensor:\n if self.process_group is None:\n return u @ self.weight.T\n else:\n raise NotImplementedError"
},
{
"identifier": "column_split",
"path": "src/utils.py",
"snippet": "def column_split(x, num_heads, head_size):\n \"\"\"Split a tensor with `num_heads` alongside the head dimension, instead of\n across heads. Fixed to three projections\n \"\"\"\n\n x_reshaped = x.reshape(\n x.shape[0],\n num_heads,\n 3 * head_size,\n )\n\n x2, x1, v = (\n x_reshaped[:, :, :head_size],\n x_reshaped[\n :,\n :,\n head_size : 2 * head_size,\n ],\n x_reshaped[:, :, 2 * head_size :],\n )\n x2, x1, v = (\n x2.reshape(x2.shape[0], -1),\n x1.reshape(x1.shape[0], -1),\n v.reshape(v.shape[0], -1),\n )\n return x2, x1, v"
},
{
"identifier": "print_rank_0",
"path": "src/utils.py",
"snippet": "def print_rank_0(message, debug=False, end=\"\\n\"):\n \"\"\"Print from rank 0 only.\"\"\"\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n print(message, flush=True, end=end)\n else:\n print(message, flush=True, end=end)"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.cache import InferenceParams, RecurrentInferenceParams
from src.engine import HyenaInferenceEngine
from src.layers import ParallelGatedMLP, RMSNorm, VocabParallelEmbedding
from src.utils import column_split, print_rank_0
from flash_attn.modules.mha import MHA
from flashfftconv import FlashFFTConv | 7,939 | h = (residues * (log_poles * self.t).exp()).real.sum(1)[None]
return h, filter_dtype, log_poles, residues
class ParallelGatedConvBlock(nn.Module):
def __init__(self, config, layer_idx) -> None:
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.low_mem_mode = config.get("low_mem_mode", False)
dtype = config.get("hyena_block_dtype", torch.float32)
mlp_dtype = config.get("mlp_dtype", torch.bfloat16)
self.pre_norm, self.post_norm = RMSNorm(config).to(dtype=dtype), RMSNorm(config).to(dtype=dtype)
self.filter = ParallelHyenaFilter(config, layer_idx).to(dtype=dtype)
self.projections = nn.Linear(config.hidden_size, 3 * config.hidden_size)
self.out_filter_dense = nn.Linear(config.hidden_size, config.hidden_size).to(dtype)
self.mlp = ParallelGatedMLP(config).to(dtype=mlp_dtype)
self.proj_norm_fn = self.proj_norm
self.res_mlp_norm_fn = self.res_mlp_norm
if self.config.get("compile", False):
self.proj_norm_fn = torch.compile(self.proj_norm, fullgraph=True, dynamic=False, mode="reduce-overhead")
self.res_mlp_norm_fn = torch.compile(
self.res_mlp_norm, fullgraph=True, dynamic=False, mode="reduce-overhead"
)
def proj_norm(self, x):
return self.projections(self.pre_norm(x))
def res_mlp_norm(self, x):
return self.mlp(self.post_norm(x)) + x
def forward(self, u, inference_params=None, padding_mask=None, *args, **kwargs):
z = self.proj_norm_fn(u)
if type(padding_mask) == torch.Tensor: # guard against bias
z = z * padding_mask[..., None]
z, inference_params = self.filter(z, inference_params=inference_params, padding_mask=padding_mask)
z_in = self.out_filter_dense(z) + u
if type(padding_mask) == torch.Tensor: # guard against bias
z_in = z_in * padding_mask[..., None]
y = self.res_mlp_norm_fn(z_in)
return y, inference_params
def get_block(config, layer_idx, flash_fft=None):
if layer_idx in config.attn_layer_idxs:
return AttentionBlock(config, layer_idx)
elif layer_idx in config.hyena_layer_idxs:
block = ParallelGatedConvBlock(config, layer_idx)
if config.get("use_flashfft", "False"):
block.filter.fftconv_fn = flash_fft
return block
else:
raise NotImplementedError
class StripedHyena(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embedding_layer = VocabParallelEmbedding(config)
self.norm = RMSNorm(config) if config.get("final_norm", True) else None
self.unembed = self.emb if config.tie_embeddings else VocabParallelEmbedding(config)
if config.get("use_flashfft", "False"):
self.flash_fft = FlashFFTConv(2 * config.seqlen, dtype=torch.bfloat16)
else:
self.flash_fft = None
self.blocks = nn.ModuleList(
get_block(config, layer_idx, flash_fft=self.flash_fft) for layer_idx in range(config.num_layers)
)
def forward(self, x, inference_params_dict=None, padding_mask=None):
L = x.shape[1]
x = self.embedding_layer.embed(x)
if inference_params_dict is not None:
x, inference_params_dict_out = self.stateful_forward(
x,
inference_params_dict=inference_params_dict,
)
else:
x, inference_params_dict_out = self.stateless_forward(x, padding_mask=padding_mask)
x = self.norm(x)
x = self.unembed.unembed(x)
return x, inference_params_dict_out
def stateful_forward(self, x, inference_params_dict=None):
for block_idx, block in enumerate(self.blocks):
block_name = "mha" if block_idx in self.config.attn_layer_idxs else "hyena"
inference_params = inference_params_dict[block_name]
x, _ = block(x, inference_params=inference_params)
return x, inference_params_dict
def stateless_forward(self, x, padding_mask=None):
if type(padding_mask) == torch.Tensor:
x = x * padding_mask[..., None]
for _, block in enumerate(self.blocks):
x, _ = block(x, inference_params=None, padding_mask=padding_mask)
return x, None
def initialize_inference_params(self):
print_rank_0("Initializing inference params...")
inference_params_dict = {
"mha": InferenceParams(
max_seqlen=self.config.get("max_seqlen", 8192),
max_batch_size=self.config.get("max_batch_size", 1),
seqlen_offset=0,
),
| # Copyright (c) Together
# This software is distributed under the terms of the Apache License, Version 2.0
# Author: Michael Poli
# Note: MP and PP utilities are removed for ease of use and editing.
try:
except ImportError:
"flash_attn not installed"
class AttentionBlock(nn.Module):
def __init__(self, config, layer_idx) -> None:
super().__init__()
self.config = config
self.pre_norm, self.post_norm = RMSNorm(config), RMSNorm(config)
self.layer_idx = layer_idx
self.proj_groups = config.get("proj_groups", 1)
dtype = config.get("attn_block_dtype", torch.bfloat16)
mlp_dtype = config.get("mlp_dtype", torch.bfloat16)
self.num_attention_heads = config.num_attention_heads
self.hidden_size_per_attention_head = config.hidden_size // config.num_attention_heads
self.counter = 0
self.inner_mha_cls = MHA(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
num_heads_kv=config.num_attention_heads // self.proj_groups,
rotary_emb_dim=config.hidden_size // config.num_attention_heads,
qkv_proj_bias=config.get("qkv_proj_bias", True),
rotary_emb_base=config.get("rotary_emb_base", 10000),
causal=True,
layer_idx=layer_idx,
out_proj_bias=config.get("mha_out_proj_bias", True),
use_flash_attn=self.config.use_flash_attn,
).to(dtype=dtype)
if self.config.get("smeared_gqa", False):
self.inner_mha_cls.num_heads_kv = self.inner_mha_cls.num_heads
self.inner_mha_cls.rotary_emb.register_buffer("inv_freq", self.inner_mha_cls.rotary_emb.inv_freq)
self.mlp = ParallelGatedMLP(config).to(dtype=mlp_dtype)
def forward(self, u, inference_params=None, padding_mask=None, *args, **kwargs):
if (
type(padding_mask) == torch.Tensor
): # workaround for masking bug in FA. This works because Wqkv does not have bias
# and attention scores will be also automatically zeroed.
u = u * padding_mask[..., None]
u = (
self.inner_mha_cls(
self.pre_norm(u),
inference_params=inference_params,
)
+ u
)
if type(padding_mask) == torch.Tensor: # guard against bias
u = u * padding_mask[..., None]
u = self.mlp(self.post_norm(u)) + u
return u, None
class ParallelHyenaFilter(nn.Module):
def __init__(self, config, layer_idx) -> None:
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.hyena_filter_groups = config.get("hyena_filter_groups", self.config.hidden_size)
self.use_flashfft = config.get("use_flashfft", False)
self.state_size = config.state_size
self.hidden_size = config.hidden_size
self.num_filters = config.num_filters
self.inference_mode = config.get("inference_mode", True)
self.counter = 0
self.column_split_hyena = config.get("column_split_hyena", True)
assert self.hidden_size % self.num_filters == 0 and self.num_filters <= self.hidden_size
self.D = nn.Parameter(torch.zeros(self.hidden_size))
# attention heads are not used except to split post short_filter
# projections in the same way as the checkpoint
self.num_attention_heads = config.num_attention_heads
self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads
# after preprocessing here we can save the new checkpoint
self.short_filter_length = config.short_filter_length
self.short_filter_weight = nn.Parameter(torch.randn(3 * config.hidden_size, 1, config.short_filter_length))
self.short_filter_bias = (
nn.Parameter(torch.randn(3 * config.hidden_size)) if config.short_filter_bias else None
)
self.engine = HyenaInferenceEngine(layer_idx=layer_idx)
self.use_flash_depthwise = config.get("use_flash_depthwise", False)
self.data_dtype = None
if self.use_flash_depthwise:
self.fir_fn = FlashDepthwiseConv1d(
channels=3 * self.hidden_size,
kernel_size=self.short_filter_length,
padding=self.short_filter_length - 1,
weights=self.short_filter_weight,
bias=self.short_filter_bias,
device=None,
dtype=self.config.get("depthwise_dtype", torch.bfloat16),
)
else:
self.fir_fn = F.conv1d
self.fftconv_fn = None
self.long_fir_threshold = config.get("long_fir_threshold", None)
if self.long_fir_threshold is not None:
assert self.use_flashfft is False, "long_fir_threshold not compatible with fused flashfft"
self.num_systems = self.hidden_size // self.hyena_filter_groups
poles = torch.randn(self.num_systems, self.state_size, 1, 2)
# TODO: bring over init from internals
poles[..., 0] = 1e-2 * torch.randn(self.num_systems, self.state_size, 1)
poles[..., 1] = 1e-3 * torch.randn(self.num_systems, self.state_size, 1)
self.poles = nn.Parameter(poles)
self.residues = nn.Parameter(torch.randn(self.num_systems, self.state_size, 1, 2))
self.h = None
def forward(self, u, inference_params=None, padding_mask=None, *args, **kwargs):
if inference_params is not None and self.layer_idx in inference_params.fir_state_dict.keys():
return self.sequential_forward(u, inference_params)
else:
return self.parallel_forward(u, inference_params, padding_mask)
def parallel_forward(self, u, inference_params=None, padding_mask=None):
L = u.shape[1]
z_pre, fir_state = self.engine.parallel_fir(
self.fir_fn,
u,
self.short_filter_weight,
self.short_filter_bias,
L,
fir_length=self.short_filter_length,
inference_params=inference_params,
padding_mask=padding_mask,
)
if inference_params:
inference_params.fir_state_dict[self.layer_idx] = fir_state
if self.h is None:
h, filter_dtype, poles, residues = self.compute_filter(L, u.device)
else:
h = self.h
filter_dtype = self.h.dtype
if self.hyena_filter_groups > 1:
h = h.repeat_interleave(self.hidden_size // self.hyena_filter_groups, 1)
# if inference_params is not None, we plan to perform generation:
# prefilling is handled by the engine.
dims = (
self.hidden_size,
self.num_attention_heads,
self.hidden_size_per_attention_head,
self.state_size,
self.hyena_filter_groups,
)
y = self.engine.parallel_iir(
z_pre,
h,
self.D,
L,
t=self.t,
poles=self.poles,
residues=self.residues,
dims=dims,
inference_params=inference_params,
layer_idx=self.layer_idx,
prefill_style=self.config.get("prefill_style", "fft"),
use_flashfft=self.use_flashfft,
fftconv_fn=self.fftconv_fn,
column_split_hyena=self.column_split_hyena,
long_fir_threshold=self.long_fir_threshold,
padding_mask=padding_mask,
)
return y, inference_params
def sequential_forward(self, u, inference_params):
if self.data_dtype is None:
self.data_dtype = u.dtype
if len(u.shape) > 2:
u = u[:, -1]
fir_state, iir_state = (
inference_params.fir_state_dict[self.layer_idx],
inference_params.state_dict[self.layer_idx],
)
z_pre, fir_state = self.engine.step_fir(
u, fir_state, weight=self.short_filter_weight, bias=self.short_filter_bias
)
x2, x1, v = (
column_split(z_pre, self.num_attention_heads, self.hidden_size_per_attention_head)
if self.column_split_hyena
else z_pre.split([self.hidden_size, self.hidden_size, self.hidden_size], dim=1)
)
y, iir_state = self.engine.step_iir(
x2,
x1,
v,
self.D,
self.residues,
self.poles,
iir_state,
iir_groups=self.hyena_filter_groups,
)
inference_params.fir_state_dict[self.layer_idx] = fir_state
inference_params.state_dict[self.layer_idx] = iir_state
y = y.to(dtype=self.data_dtype)
return y[:, None], inference_params
def update_time(self, L, device):
"""
Set [0, 1, ..., L-1] where L is the length of the current batch of inputs.
If L is greater than the length of the previous batch, then the time vector is
reinitialized. Otherwise, the time vector is truncated from cache.
"""
if not hasattr(self, "t"):
self.t = torch.arange(L, device=device)[None, None]
elif self.t.shape[-1] < L:
self.t = torch.arange(L, device=device)[None, None]
else:
self.t = self.t[..., :L]
def compute_filter(self, L, device):
self.update_time(L, device)
filter_dtype = torch.float32
residues, log_poles = (
torch.view_as_complex(self.residues.to(filter_dtype)),
torch.view_as_complex(self.poles.to(filter_dtype)).log(),
)
h = (residues * (log_poles * self.t).exp()).real.sum(1)[None]
return h, filter_dtype, log_poles, residues
class ParallelGatedConvBlock(nn.Module):
def __init__(self, config, layer_idx) -> None:
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.low_mem_mode = config.get("low_mem_mode", False)
dtype = config.get("hyena_block_dtype", torch.float32)
mlp_dtype = config.get("mlp_dtype", torch.bfloat16)
self.pre_norm, self.post_norm = RMSNorm(config).to(dtype=dtype), RMSNorm(config).to(dtype=dtype)
self.filter = ParallelHyenaFilter(config, layer_idx).to(dtype=dtype)
self.projections = nn.Linear(config.hidden_size, 3 * config.hidden_size)
self.out_filter_dense = nn.Linear(config.hidden_size, config.hidden_size).to(dtype)
self.mlp = ParallelGatedMLP(config).to(dtype=mlp_dtype)
self.proj_norm_fn = self.proj_norm
self.res_mlp_norm_fn = self.res_mlp_norm
if self.config.get("compile", False):
self.proj_norm_fn = torch.compile(self.proj_norm, fullgraph=True, dynamic=False, mode="reduce-overhead")
self.res_mlp_norm_fn = torch.compile(
self.res_mlp_norm, fullgraph=True, dynamic=False, mode="reduce-overhead"
)
def proj_norm(self, x):
return self.projections(self.pre_norm(x))
def res_mlp_norm(self, x):
return self.mlp(self.post_norm(x)) + x
def forward(self, u, inference_params=None, padding_mask=None, *args, **kwargs):
z = self.proj_norm_fn(u)
if type(padding_mask) == torch.Tensor: # guard against bias
z = z * padding_mask[..., None]
z, inference_params = self.filter(z, inference_params=inference_params, padding_mask=padding_mask)
z_in = self.out_filter_dense(z) + u
if type(padding_mask) == torch.Tensor: # guard against bias
z_in = z_in * padding_mask[..., None]
y = self.res_mlp_norm_fn(z_in)
return y, inference_params
def get_block(config, layer_idx, flash_fft=None):
if layer_idx in config.attn_layer_idxs:
return AttentionBlock(config, layer_idx)
elif layer_idx in config.hyena_layer_idxs:
block = ParallelGatedConvBlock(config, layer_idx)
if config.get("use_flashfft", "False"):
block.filter.fftconv_fn = flash_fft
return block
else:
raise NotImplementedError
class StripedHyena(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embedding_layer = VocabParallelEmbedding(config)
self.norm = RMSNorm(config) if config.get("final_norm", True) else None
self.unembed = self.emb if config.tie_embeddings else VocabParallelEmbedding(config)
if config.get("use_flashfft", "False"):
self.flash_fft = FlashFFTConv(2 * config.seqlen, dtype=torch.bfloat16)
else:
self.flash_fft = None
self.blocks = nn.ModuleList(
get_block(config, layer_idx, flash_fft=self.flash_fft) for layer_idx in range(config.num_layers)
)
def forward(self, x, inference_params_dict=None, padding_mask=None):
L = x.shape[1]
x = self.embedding_layer.embed(x)
if inference_params_dict is not None:
x, inference_params_dict_out = self.stateful_forward(
x,
inference_params_dict=inference_params_dict,
)
else:
x, inference_params_dict_out = self.stateless_forward(x, padding_mask=padding_mask)
x = self.norm(x)
x = self.unembed.unembed(x)
return x, inference_params_dict_out
def stateful_forward(self, x, inference_params_dict=None):
for block_idx, block in enumerate(self.blocks):
block_name = "mha" if block_idx in self.config.attn_layer_idxs else "hyena"
inference_params = inference_params_dict[block_name]
x, _ = block(x, inference_params=inference_params)
return x, inference_params_dict
def stateless_forward(self, x, padding_mask=None):
if type(padding_mask) == torch.Tensor:
x = x * padding_mask[..., None]
for _, block in enumerate(self.blocks):
x, _ = block(x, inference_params=None, padding_mask=padding_mask)
return x, None
def initialize_inference_params(self):
print_rank_0("Initializing inference params...")
inference_params_dict = {
"mha": InferenceParams(
max_seqlen=self.config.get("max_seqlen", 8192),
max_batch_size=self.config.get("max_batch_size", 1),
seqlen_offset=0,
), | "hyena": RecurrentInferenceParams( | 1 | 2023-11-21 15:56:04+00:00 | 12k |
MobileTeleSystems/CoolGraph | cool_graph/runners.py | [
{
"identifier": "RawDataProcessor",
"path": "cool_graph/data/data_processor.py",
"snippet": "class RawDataProcessor:\n \"\"\"\n Preprocessing datasets.\n\n Args:\n groups_names (Dict[int, str]): Name of groups in nodes.\n group_names_node_features (Dict[str, List[str]]): Name of features in groups in nodes.\n mon_nodes_path (str): path to nodes\n mon_edges_path (str): path to edges\n mon_labels_path (str): path to labels\n edge_index_cols (List[str]): columns of edge index in dataset\n label_index_col (str): columns of label index in dataset\n label_mask_col (str): mask of label columns\n read_edge_attr (bool): is set True - read edge features. Default to True.\n group_mask_col (str): Mask for group in data. Default to None.\n features_edges_names (List[str]): List of features on edge. Default to None.\n label_cols (List[str]): List of label columns. Default to None.\n target_names (List[str]): List of target names. Default to None.\n \"\"\"\n\n @staticmethod\n def _check_cols_in_parquet(columns: List[str], path: str) -> bool:\n \"\"\"Cheking colomns in parquet files.\n\n Args:\n columns (List[str]): columns of dataset\n path (str): path to dataset\n\n Raises:\n ValueError: if there is no any files with parquet extension\n ValueError: if there is no path with parquet extension\n\n Returns:\n bool: True if columns and path are right\n \"\"\"\n if columns:\n set_cols = set(columns if type(columns) == list else [columns])\n try:\n parquet_file = [path] if path.endswith(\".parquet\") else []\n parquet_file = (\n parquet_file\n + glob.glob(os.path.join(path, \"*.parquet\"), recursive=True)\n + glob.glob(os.path.join(path, \"**/*.parquet\"), recursive=True)\n )\n parquet_file = parquet_file[0]\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n Couldn't find any files with parquet extension in {path}\\n\n Original exception: \\n\n {str(ex)}\n \"\"\"\n )\n pqt_cols = set(pq.read_schema(parquet_file).names)\n if not set_cols.issubset(pqt_cols):\n diff = set_cols - pqt_cols\n raise ValueError(\n f\"\"\"\n \"{'\", \"'.join(diff)}\" were not found in {path}\n \"\"\"\n )\n return True\n\n def __init__(\n self,\n groups_names: Dict[int, str],\n group_names_node_features: Dict[str, List[str]],\n mon_nodes_path: str,\n mon_edges_path: str,\n mon_labels_path: str,\n edge_index_cols: List[str],\n label_index_col: str,\n label_mask_col: Optional[str] = None,\n read_edge_attr: bool = True,\n group_mask_col: Optional[str] = None,\n features_edges_names: Optional[List[str]] = None,\n label_cols: Optional[List[str]] = None,\n target_names: Optional[List[str]] = None,\n ) -> None:\n self._check_cols_in_parquet(group_mask_col, mon_nodes_path)\n self._check_cols_in_parquet(label_cols, mon_labels_path)\n self._check_cols_in_parquet([label_mask_col], mon_labels_path)\n self._check_cols_in_parquet([label_index_col], mon_labels_path)\n\n for key, val in group_names_node_features.items():\n try:\n self._check_cols_in_parquet(val, mon_nodes_path)\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n {str(ex)} for group {key} aka {groups_names[key]}\n \"\"\"\n )\n\n df_node_feats = pq.read_table(mon_nodes_path).to_pandas()\n df_labels = pq.read_table(mon_labels_path, columns=label_cols).to_pandas()\n df_edge_index = pq.read_table(\n mon_edges_path, columns=edge_index_cols\n ).to_pandas()\n\n # Nodes\n node_features = torch.FloatTensor(df_node_feats.values)\n group_mask = torch.IntTensor(df_node_feats[group_mask_col].values)\n node_features_names_fixed = df_node_feats.columns.tolist()\n\n # Labels\n df_labels.set_index(label_index_col, inplace=True)\n df_labels.sort_index(inplace=True)\n df_labels.reset_index(inplace=True)\n targets = {t: torch.LongTensor(df_labels[t].values) for t in target_names}\n label_mask = torch.BoolTensor(df_labels[label_mask_col].values)\n index = torch.LongTensor(df_labels[label_index_col].values)\n\n try:\n df_node_feats.shape[0] == df_labels.shape[0]\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n Length of features must be equal to the length of labels.\n \"\"\"\n )\n\n # Edges\n edge_index = torch.LongTensor(df_edge_index.values).T\n\n # Nodes\n self.node_features = node_features\n self.group_mask = group_mask\n self.targets = targets\n self.label_mask = label_mask\n self.index = index\n self.edge_index = edge_index\n\n # Edge features\n if read_edge_attr:\n df_edge_feats = pq.read_table(\n mon_edges_path, columns=features_edges_names\n ).to_pandas()\n\n self.edge_features = torch.FloatTensor(df_edge_feats.values)\n self.edge_features_names = df_edge_feats.columns.tolist()\n else:\n self.edge_features = None\n self.edge_features_names = None\n\n self.read_edge_attr = read_edge_attr\n\n # Mappings\n inverse = {v: k for k, v in groups_names.items()}\n self.group_indices_node_findex = {\n inverse[key]: [node_features_names_fixed.index(f) for f in value]\n for key, value in group_names_node_features.items()\n }\n self.groups_names = groups_names\n\n def sample_data(\n self, num_neighbors: int, batch_size: int, seed: int = 0\n ) -> Dict[str, List[torch.utils.data.DataLoader]]:\n \"\"\"Samling data.\n\n Args:\n num_neighbors (int): Number of neighbors are sampled for each node in each iteration.\n batch_size (int): Numbers of samples per batch to load.\n seed (int, optional): Number of seed of samples. Defaults to 0.\n\n Returns:\n Dict[str, List[torch.utils.data.DataLoader]]: Sampled data.\n \"\"\"\n\n return create_loaders(\n self.node_features,\n self.edge_features,\n self.edge_index,\n self.read_edge_attr,\n num_neighbors,\n batch_size,\n self.group_mask,\n self.group_indices_node_findex,\n self.groups_names,\n self.label_mask,\n self.index,\n targets=self.targets,\n )"
},
{
"identifier": "get_auto_batch_size",
"path": "cool_graph/data/batch.py",
"snippet": "def get_auto_batch_size(\n groups_num_features: List[int],\n conv_type: Optional[Literal[\"NNConv\", \"GraphConv\"]] = None,\n conv1_aggrs: Optional[Dict[Literal[\"mean\", \"max\", \"add\"], int]] = None,\n conv2_aggrs: Optional[Dict[Literal[\"mean\", \"max\", \"add\"], int]] = None,\n conv3_aggrs: Optional[Dict[Literal[\"mean\", \"max\", \"add\"], int]] = None,\n n_hops: Optional[int] = None,\n lin_prep_size_common: Optional[int] = None,\n lin_prep_sizes: Optional[List[int]] = None,\n edge_attr_repr_sizes: Optional[List[int]] = None,\n num_edge_features: Optional[int] = None,\n device: str = \"cuda:0\",\n num_neighbors: Optional[List[int]] = None,\n) -> int:\n \"\"\"\n Аutomatic batch size calculation.\n Depending on model size and free GPU memory.\n\n Args:\n groups_num_features (List[int]): Number of feats in groups on nodes.\n conv_type (Literal[NNConv, GraphConv]): Model type\n conv1_aggrs (Dict[Literal[mean, max, add], int]]):\n An aggregation per features across a set of elements in conv layer 1. Defaults to None.\n conv2_aggrs (Dict[Literal[mean, max, add], int]]):\n An aggregation per features across a set of elements in conv layer 2. Defaults to None.\n conv3_aggrs (Dict[Literal[mean, max, add], int]]):\n An aggregation per features across a set of elements in conv layer 3. Defaults to None.\n n_hops (int): Hop with neighbors. Defaults to None.\n lin_prep_size_common (int): Size of linear layer (in). Defaults to None.\n lin_prep_sizes (int): Size of linear layer (out). Defaults to None.\n edge_attr_repr_sizes (List[int]): Size of layer of edges attributes. Defaults to None.\n num_edge_features (int): Number of feats on edges. Defaults to None.\n device (str): The current GPU memory usage. Defaults to \"cuda:0\".\n num_neighbors (List[int]): Number of neighbors are sampled for each node in each iteration. Defaults to None.\n\n Returns:\n batch_size (int): Numbers of samples per batch to load.\n \"\"\"\n if lin_prep_sizes is None:\n lin_prep_sizes = []\n if device is None:\n device = \"cuda:0\"\n\n hop1_size = sum(conv1_aggrs.values())\n hop2_size = sum(conv2_aggrs.values()) if n_hops >= 2 else 0\n hop3_size = sum(conv3_aggrs.values()) if n_hops == 3 else 0\n\n max_size_node = max(\n *groups_num_features,\n lin_prep_size_common,\n *lin_prep_sizes,\n hop1_size,\n hop2_size,\n hop3_size,\n )\n\n max_size_edge = 0\n if conv_type == \"NNConv\":\n max_size_edge = max(\n *edge_attr_repr_sizes,\n num_edge_features,\n )\n\n max_size = max_size_node + max_size_edge * 1.5\n\n try:\n all([n != -1 for n in num_neighbors])\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n Found -1, Need to know max neighbors per hop.\n \"\"\"\n )\n m_neighbors = np.prod(num_neighbors)\n\n free_memory = torch.cuda.mem_get_info(device=device)[0] / (1024**3) # GB\n\n floats_per_node_ = 320000\n batch_size_ = 250\n memory_reserved_max_ = 3.8\n\n batch_size = (\n 0.5\n * batch_size_\n * floats_per_node_\n / (m_neighbors * max_size)\n * (free_memory / memory_reserved_max_)\n )\n\n if conv_type == \"NNConv\":\n batch_size /= edge_attr_repr_sizes[-1] * 4\n\n batch_size = int(batch_size)\n\n return batch_size"
},
{
"identifier": "create_loaders",
"path": "cool_graph/data/loaders.py",
"snippet": "def create_loaders(\n data: Data = None,\n node_features: torch.FloatTensor = None,\n edge_features: torch.FloatTensor = None,\n edge_index: torch.LongTensor = None,\n read_edge_attr: bool = None,\n num_neighbors: List[int] = None,\n batch_size: int = None,\n group_mask: torch.LongTensor = None,\n groups_features: Dict[int, List[int]] = None,\n groups_names: Dict[int, str] = None,\n label_mask: torch.BoolTensor = None,\n index: torch.LongTensor = None,\n targets: Dict[str, torch.Tensor] = None,\n input_nodes: Optional[List] = None,\n node_feature_indices: Optional[List] = None,\n unique_groups: Optional[int] = None,\n) -> List[torch.utils.data.DataLoader]:\n \"\"\"\n Creating list loaders.\n\n Args:\n node_features (torch.FloatTensor): features on nodes on FloatTensor\n edge_features (torch.FloatTensor): features on edge on FloatTensor\n edge_index (torch.LongTensor): edge indices\n read_edge_attr (bool): if set True - read edge features.\n num_neighbors (List[int]): Number of neighbors are sampled for each node in each iteration.\n batch_size (int): Numbers of samples per batch to load.\n group_mask (torch.LongTensor): Mask for groups in nodes.\n groups_features (Dict[int, List[int]]): Features in groups in nodes.\n groups_names (Dict[int, str]): Name of featutes in groups in nodes.\n label_mask (torch.BoolTensor): Mask for label.\n index (torch.LongTensor): index\n targets (Dict[str, torch.Tensor]): Labels.\n\n Returns:\n List[torch.utils.data.DataLoader]: Created DataLoader object. https://pytorch.org/docs/stable/data.html\n \"\"\"\n unique_groups = np.unique(group_mask)\n try:\n set(unique_groups).issubset(set(groups_features.keys()))\n except Exception as ex:\n raise ValueError(\n f\"\"\"Group mask values should be a subset of feature groups keys\"\"\"\n )\n\n try:\n set(groups_features).issubset(set(groups_names.keys()))\n except Exception as ex:\n raise ValueError(\n f\"\"\"Feature groups keys should be a subset of feature_groups_names\"\"\"\n )\n if data is None:\n data = Data(\n x=node_features,\n edge_index=edge_index,\n edge_attr=edge_features if read_edge_attr else None,\n group_mask=group_mask,\n label_mask=label_mask,\n index=index,\n **targets,\n )\n input_nodes = torch.nonzero(label_mask)[:, 0]\n\n loader = NeighborLoader(\n data,\n num_neighbors=num_neighbors,\n batch_size=batch_size,\n shuffle=True,\n input_nodes=input_nodes,\n )\n\n list_loader = []\n for sampled_data in tqdm(loader, desc=\"Sample data\"):\n sampled_data.label_mask[sampled_data.batch_size :] = False\n\n for group in unique_groups:\n name = groups_names[group]\n mask = sampled_data.group_mask == group\n features = groups_features[group]\n setattr(sampled_data, name, sampled_data.x[mask][:, features])\n\n del sampled_data.x\n\n list_loader.append(sampled_data)\n\n return list_loader"
},
{
"identifier": "setup_mlflow_from_config",
"path": "cool_graph/logging/mlflow_logging.py",
"snippet": "def setup_mlflow_from_config(config: Dict) -> None:\n \"\"\"\n Setup mlflow using logging.mlflow section of a config\n \"\"\"\n\n if config.get(\"MLFLOW_DISABLE_INSECURE_REQUEST_WARNING\", False):\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n for key, value in config.items():\n os.environ[key] = str(value)\n\n mlflow.set_tracking_uri(config.get(\"MLFLOW_TRACKING_URI\"))"
},
{
"identifier": "model_params_to_trial_params",
"path": "cool_graph/parameter_search/example_objective.py",
"snippet": "def model_params_to_trial_params(\n **model_params: Dict[str, Union[Literal[str], int, float, List, Dict]]\n) -> Dict[str, Union[Literal[str], int, float, List, Dict]]:\n \"\"\"\n Convert readable model_params to trial_params\n for example to run study.enqueue_trial(trial_params)\n \"\"\"\n trial = {}\n trial[\"activation\"] = model_params[\"activation\"]\n trial[\"lin_prep_len\"] = model_params[\"lin_prep_len\"]\n trial[\"lin_prep_dropout_rate\"] = model_params[\"lin_prep_dropout_rate\"]\n trial[\"lin_prep_weight_norm_flag\"] = model_params[\"lin_prep_weight_norm_flag\"]\n last_size = model_params[\"lin_prep_size_common\"]\n trial[\"lin_prep_size_common\"] = last_size\n for i in range(model_params[\"lin_prep_len\"]):\n trial[f\"lin_prep_size{i}_fraction\"] = np.clip(\n model_params[\"lin_prep_sizes\"][i] / last_size, 0.2, 1.0\n )\n last_size = model_params[\"lin_prep_sizes\"][i]\n\n trial[\"conv1_aggrs_mean_fraction\"] = np.clip(\n model_params[\"conv1_aggrs\"][\"mean\"] / last_size, 0.1, 1.0\n )\n trial[\"conv1_aggrs_max_fraction\"] = np.clip(\n model_params[\"conv1_aggrs\"][\"max\"] / last_size, 0.05, 0.7\n )\n trial[\"conv1_aggrs_add_fraction\"] = np.clip(\n model_params[\"conv1_aggrs\"][\"add\"] / last_size, 0.05, 0.7\n )\n\n trial[\"conv1_dropout_rate\"] = model_params[\"conv1_dropout_rate\"]\n\n if model_params[\"n_hops\"] == 2:\n last_size = sum(model_params[\"conv1_aggrs\"].values())\n\n trial[\"conv2_aggrs_mean_fraction\"] = np.clip(\n model_params[\"conv2_aggrs\"][\"mean\"] / last_size, 0.1, 0.7\n )\n trial[\"conv2_aggrs_max_fraction\"] = np.clip(\n model_params[\"conv2_aggrs\"][\"max\"] / last_size, 0.05, 0.5\n )\n trial[\"conv2_aggrs_add_fraction\"] = np.clip(\n model_params[\"conv2_aggrs\"][\"add\"] / last_size, 0.05, 0.5\n )\n\n trial[\"conv2_dropout_rate\"] = model_params[\"conv2_dropout_rate\"]\n\n if model_params[\"conv_type\"] == \"GraphConv\":\n trial[\"graph_conv_weight_norm_flag\"] = model_params[\n \"graph_conv_weight_norm_flag\"\n ]\n\n if model_params[\"conv_type\"] == \"NNConv\":\n trial[\"edge_attr_repr_len\"] = model_params[\"edge_attr_repr_len\"]\n for i in range(model_params[\"edge_attr_repr_len\"] - 1):\n if i == 0:\n trial[f\"edge_attr_repr_size{i}\"] = model_params[\"edge_attr_repr_sizes\"][\n i\n ]\n\n else:\n trial[f\"edge_attr_repr_size{i}_fraction\"] = np.clip(\n model_params[\"edge_attr_repr_sizes\"][i]\n / model_params[\"edge_attr_repr_sizes\"][i - 1],\n 0.2,\n 1.0,\n )\n\n trial[\"edge_attr_repr_size_last\"] = model_params[\"edge_attr_repr_sizes\"][-1]\n\n trial[\"edge_attr_repr_dropout_rate\"] = model_params[\n \"edge_attr_repr_dropout_rate\"\n ]\n\n trial[\"edge_attr_repr_last_dropout_rate_zero\"] = (\n model_params[\"edge_attr_repr_last_dropout_rate\"] == 0\n )\n if not trial[\"edge_attr_repr_last_dropout_rate_zero\"]:\n trial[\"edge_attr_repr_last_dropout_rate\"] = model_params[\n \"edge_attr_repr_last_dropout_rate\"\n ]\n\n trial[\"edge_attr_repr_weight_norm_flag\"] = model_params[\n \"edge_attr_repr_weight_norm_flag\"\n ]\n\n return trial"
},
{
"identifier": "sample_model_params",
"path": "cool_graph/parameter_search/example_objective.py",
"snippet": "def sample_model_params(trial: optuna.Trial, conv_type: str = \"GraphConv\") -> Dict:\n params = {}\n params[\"conv_type\"] = conv_type\n params[\"activation\"] = trial.suggest_categorical(\n \"activation\",\n [\n \"relu\", # 1st place\n \"prelu\", # 2nd place\n \"leakyrelu\",\n \"elu\",\n \"gelu\",\n ],\n )\n # NODE FEATURES PREP params\n params[\"lin_prep_len\"] = trial.suggest_int(\"lin_prep_len\", low=0, high=2)\n params[\"lin_prep_dropout_rate\"] = trial.suggest_uniform(\n \"lin_prep_dropout_rate\", low=0, high=0.5\n )\n params[\"lin_prep_weight_norm_flag\"] = trial.suggest_categorical(\n \"lin_prep_weight_norm_flag\", [False, True]\n )\n\n min_lin_prep_size_common = 32\n max_lin_prep_size_common = 1024\n\n last_size = trial.suggest_int(\n \"lin_prep_size_common\",\n min_lin_prep_size_common,\n max_lin_prep_size_common,\n log=True,\n )\n params[\"lin_prep_size_common\"] = last_size\n params[\"lin_prep_sizes\"] = []\n for i in range(params[\"lin_prep_len\"]):\n fraction = trial.suggest_loguniform(\n f\"lin_prep_size{i}_fraction\", low=0.2, high=1.0\n )\n last_size = max(16, int(np.round(last_size * fraction)))\n params[\"lin_prep_sizes\"].append(last_size)\n params[\"n_hops\"] = 2\n\n # CONV1 params\n\n params[\"conv1_aggrs\"] = {}\n fraction = trial.suggest_loguniform(\"conv1_aggrs_mean_fraction\", low=0.1, high=1.0)\n params[\"conv1_aggrs\"][\"mean\"] = max(8, int(np.round(last_size * fraction)))\n\n fraction = trial.suggest_loguniform(\"conv1_aggrs_max_fraction\", low=0.05, high=0.7)\n params[\"conv1_aggrs\"][\"max\"] = int(np.round(last_size * fraction))\n\n fraction = trial.suggest_loguniform(\"conv1_aggrs_add_fraction\", low=0.05, high=0.7)\n params[\"conv1_aggrs\"][\"add\"] = int(np.round(last_size * fraction))\n\n params[\"conv1_dropout_rate\"] = trial.suggest_uniform(\n \"conv1_dropout_rate\", low=0, high=0.5\n )\n\n # return params\n # CONV2 params\n if params[\"n_hops\"] == 2:\n last_size = sum(params[\"conv1_aggrs\"].values())\n params[\"conv2_aggrs\"] = {}\n fraction = trial.suggest_loguniform(\n \"conv2_aggrs_mean_fraction\", low=0.1, high=0.7\n )\n params[\"conv2_aggrs\"][\"mean\"] = max(8, int(np.round(last_size * fraction)))\n\n fraction = trial.suggest_loguniform(\n \"conv2_aggrs_max_fraction\", low=0.05, high=0.5\n )\n params[\"conv2_aggrs\"][\"max\"] = int(np.round(last_size * fraction))\n\n fraction = trial.suggest_loguniform(\n \"conv2_aggrs_add_fraction\", low=0.05, high=0.5\n )\n params[\"conv2_aggrs\"][\"add\"] = int(np.round(last_size * fraction))\n\n params[\"conv2_dropout_rate\"] = trial.suggest_uniform(\n \"conv2_dropout_rate\", low=0, high=0.5\n )\n if params[\"conv_type\"] == \"GraphConv\":\n params[\"graph_conv_weight_norm_flag\"] = trial.suggest_categorical(\n \"graph_conv_weight_norm_flag\", [False, True]\n )\n\n # EDGE ATTR params\n if params[\"conv_type\"] == \"NNConv\":\n params[\"edge_attr_repr_len\"] = trial.suggest_int(\n \"edge_attr_repr_len\", low=1, high=3\n )\n params[\"edge_attr_repr_sizes\"] = []\n for i in range(params[\"edge_attr_repr_len\"] - 1):\n if i == 0:\n params[\"edge_attr_repr_sizes\"].append(\n trial.suggest_int(\n f\"edge_attr_repr_size{i}\", low=4, high=40, log=True\n )\n )\n else:\n fraction = trial.suggest_loguniform(\n f\"edge_attr_repr_size{i}_fraction\", low=0.2, high=1.0\n )\n params[\"edge_attr_repr_sizes\"].append(\n max(4, int(np.round(params[\"edge_attr_repr_sizes\"][-1] * fraction)))\n )\n params[\"edge_attr_repr_sizes\"].append(\n trial.suggest_int(\"edge_attr_repr_size_last\", low=1, high=5, log=True)\n )\n\n params[\"edge_attr_repr_dropout_rate\"] = trial.suggest_uniform(\n \"edge_attr_repr_dropout_rate\", low=0, high=0.5\n )\n if trial.suggest_categorical(\n \"edge_attr_repr_last_dropout_rate_zero\", [True, False]\n ):\n params[\"edge_attr_repr_last_dropout_rate\"] = 0.0\n else:\n params[\"edge_attr_repr_last_dropout_rate\"] = trial.suggest_uniform(\n \"edge_attr_repr_last_dropout_rate\", low=0, high=0.5\n )\n\n params[\"edge_attr_repr_weight_norm_flag\"] = trial.suggest_categorical(\n \"edge_attr_repr_weight_norm_flag\", [False, True]\n )\n\n params[\"edge_attr_repr_last_activation\"] = \"sigmoid\"\n\n return params"
},
{
"identifier": "Trainer",
"path": "cool_graph/train/trainer.py",
"snippet": "class Trainer(object):\n def __init__(\n self,\n list_loader_train: List[torch.utils.data.DataLoader],\n list_loader_test: List[torch.utils.data.DataLoader],\n checkpoint_dir: Union[str, pathlib.PosixPath],\n device: str = \"cuda:0\",\n eval_freq: int = 5,\n fill_value: Union[int, float] = -100,\n initial_lr: float = 0.0023,\n weight_decay: float = 0.001,\n loss_name: str = \"CrossEntropyLoss\",\n loss_label_smoothing: bool = False,\n loss_target_weights: Optional[Dict[str, Union[int, float]]] = None,\n loss_group_weights: Optional[List[float]] = None,\n groups_names: Optional[Dict[int, str]] = None,\n groups_names_num_features: Optional[Dict[str, int]] = None,\n num_edge_features: Optional[int] = None,\n main_metric_name: str = \"main_metric\",\n mlflow_experiment_name: Optional[str] = None,\n n_epochs: int = 10,\n scheduler_params: Dict[Literal[\"milestones\", \"gamma\"], int] = {\n \"milestones\": [10, 20, 35, 50, 70, 90, 105],\n \"gamma\": 0.25,\n },\n scheduler_type: str = \"MultiStepLR\",\n target_names: List[str] = [\"y\"],\n target_sizes: Optional[List[int]] = None,\n use_mlflow: bool = False,\n tqdm_disable=False,\n conv_type: Literal[\"NNConv\", \"GraphConv\"] = \"NNConv\",\n metrics: Optional[float] = None,\n log_all_metrics: bool = True,\n **model_params,\n ) -> None:\n \"\"\"\n Training model (GraphConv or NNConv).\n Class that training / logging / saving model. Using train_epoch\n and eval_epoch from helpers.py in training loop below.\n\n Args:\n list_loader_train (List[torch.utils.data.DataLoader]): Train list with Data loader. Combines a dataset\n and a sampler, and provides an iterable over the given dataset.\n https://pytorch.org/docs/stable/data.html\n list_loader_test (List[torch.utils.data.DataLoader]): Test list with Data loader. Combines a dataset\n and a sampler, and provides an iterable over the given dataset.\n https://pytorch.org/docs/stable/data.html\n checkpoint_dir (Union[str, pathlib.PosixPath]): Path for training checkpoints\n device (_type_, optional): The device is an object representing the device on\n which a torch.Tensor is or will be allocated.. Defaults to \"cuda:0\".\n eval_freq (int, optional): Number of epoch group. Defaults to 5.\n fill_value (Union[int, float], optional): If value is None. Defaults to -100.\n initial_lr (float, optional): The learning rate param for Optimization. Defaults to 0.0023.\n weight_decay (float, optional): weight decay (L2 penalty). Defaults to 0.001.\n loss_name (str, optional): This criterion computes the cross entropy loss between\n input logits and target. Defaults to \"CrossEntropyLoss\".\n https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html\n loss_label_smoothing (bool, optional): If set True, use label smoothing. Defaults to False.\n loss_target_weights (Optional[Dict[str, Union[int, float]]], optional): Weights for targets. Defaults to None.\n loss_group_weights (Optional[List[float]], optional): Weights for groups. Defaults to None.\n groups_names (Optional[Dict[int, str]], optional): List with group names in nodes. Defaults to None.\n groups_names_num_features (Optional[Dict[str, int]], optional): Number of feats in groups in nodes. Defaults to None.\n num_edge_features (Optional[int], optional): Number of feats on edges. Defaults to None.\n main_metric_name (str, optional): Main metric for maximaze. Defaults to \"main_metric\".\n mlflow_experiment_name (Optional[str], optional): Name of mlflow experiment. Defaults to None.\n n_epochs (int, optional): Number of epochs. Defaults to 10.\n scheduler_params (Dict, optional): Milestones (list) – List of epoch indices. Must be increasing.\n gamma (float) – Multiplicative factor of learning rate decay.\n Defaults to { \"milestones\": [10, 20, 35, 50, 70, 90, 105], \"gamma\": 0.25, }.\n scheduler_type (str, optional): Decays the learning rate of each parameter group\n by gamma once the number of epoch reaches one of the milestones. Defaults to \"MultiStepLR\".\n target_names (List[str], optional): List of target names. Defaults to [\"y\"].\n target_sizes (Optional[List[int]], optional): Size of list with target. Defaults to None.\n use_mlflow (bool, optional): If set True, use MLFlow. Defaults to False.\n tqdm_disable (bool, optional): Display progress. Defaults to False.\n conv_type (Literal[NNConv, GraphConv], optional): The graph neural network operator. Defaults to \"NNConv\".\n metrics (float, optional): Metrics. Defaults to None.\n log_all_metrics (bool, optional): If set True, logging all metrics. Defaults to True.\n\n Raises:\n NotImplementedError: _description_\n \"\"\"\n for key, value in locals().items():\n setattr(self, key, value)\n\n self._metrics = {}\n self._main_metric = {}\n if isinstance(metrics, str):\n metrics = [metrics]\n if isinstance(\n metrics,\n (\n list,\n tuple,\n ),\n ):\n metrics = {name: metrics for name in target_names}\n\n for k, names in metrics.items():\n self._metrics[k] = {name: get_metric(name) for name in names}\n self._main_metric[k] = names[0]\n\n os.makedirs(checkpoint_dir, exist_ok=True)\n\n torch.cuda.empty_cache()\n gc.collect()\n\n if conv_type == \"NNConv\":\n self._model = NNConvGNN(\n **model_params,\n target_names=target_names,\n target_sizes=target_sizes,\n groups_names=groups_names,\n groups_names_num_features=groups_names_num_features,\n num_edge_features=num_edge_features,\n )\n elif conv_type == \"GraphConv\":\n self._model = GraphConvGNN(\n **model_params,\n target_names=target_names,\n target_sizes=target_sizes,\n groups_names=groups_names,\n groups_names_num_features=groups_names_num_features,\n num_edge_features=num_edge_features,\n )\n else:\n raise NotImplementedError(f\"{conv_type} is not implemented\")\n\n self._model.to(device)\n\n self._optimizer = torch.optim.Adam(\n self._model.parameters(),\n lr=initial_lr,\n weight_decay=weight_decay,\n )\n\n self._loss_criteria = getattr(torch.nn, loss_name)(\n reduction=\"none\", label_smoothing=loss_label_smoothing\n )\n self._use_edge_attr = conv_type == \"NNConv\"\n\n self._scheduler = getattr(torch.optim.lr_scheduler, scheduler_type)(\n self._optimizer, **scheduler_params\n )\n\n self._best_loss = {main_metric_name: -np.inf}\n\n self._train_run_lst = []\n self._test_metric_lst = []\n self._train_metric_lst = []\n\n def train(\n self, start_epoch: int = 0, end_epoch: Optional[int] = None\n ) -> Dict[\n Literal[\n \"best_loss\", \"global_calc_time\", \"train_loss\", \"test_metric\", \"train_metric\"\n ],\n float,\n ]:\n \"\"\"\n Training model and logging metrics.\n \"\"\"\n if end_epoch is None:\n end_epoch = self.n_epochs\n\n self.global_start_time = time.time()\n\n if self.use_mlflow:\n mlflow.end_run()\n mlflow.set_experiment(self.mlflow_experiment_name)\n mlflow.start_run()\n mlflow.log_params(\n {\n \"LossCriteria\": self._loss_criteria,\n \"checkpoint_dir\": self.checkpoint_dir,\n **self.model_params,\n }\n )\n\n for epoch in range(start_epoch, end_epoch):\n self.epoch = epoch\n # TRAIN\n train_run = train_epoch(\n self._model,\n self.list_loader_train,\n self.device,\n self._optimizer,\n self._use_edge_attr,\n target_weights=self.loss_target_weights,\n loss_criteria=self._loss_criteria,\n group_weights=self.loss_group_weights,\n tqdm_disable=self.tqdm_disable,\n )\n train_run[\"lr\"] = self._optimizer.param_groups[0][\"lr\"]\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(train_run, \"run_\"), step=epoch\n )\n train_run[\"epoch\"] = epoch\n self._train_run_lst.append(train_run)\n with open(\n os.path.join(self.checkpoint_dir, \"train_running_loss.txt\"), \"a\"\n ) as f:\n json.dump(train_run, f)\n f.write(\"\\n\")\n\n # calc metrics and perform scheduler step\n if (epoch - 0) % self.eval_freq == 0:\n # calc metrics\n # test\n logger.info(\"\\nEpoch {:03d}: \".format(epoch))\n test_metric = eval_epoch(\n self._model,\n self.list_loader_test,\n self.device,\n self.target_names,\n self.groups_names,\n postfix=\"test\",\n use_edge_attr=self._use_edge_attr,\n tqdm_disable=self.tqdm_disable,\n fill_value=self.fill_value,\n metrics=self._metrics,\n main_metric=self._main_metric,\n log_all_metrics=self.log_all_metrics,\n )\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(test_metric, \"test_\"), step=epoch\n )\n test_metric[\"epoch\"] = epoch\n self._test_metric_lst.append(test_metric)\n with open(\n os.path.join(self.checkpoint_dir, \"test_metric.txt\"), \"a\"\n ) as f:\n json.dump(test_metric, f)\n f.write(\"\\n\")\n\n # train\n logger.info(\"Epoch {:03d}: \".format(epoch))\n train_metric = eval_epoch(\n self._model,\n self.list_loader_train,\n self.device,\n self.target_names,\n self.groups_names,\n postfix=\"train\",\n use_edge_attr=self._use_edge_attr,\n tqdm_disable=self.tqdm_disable,\n metrics=self._metrics,\n main_metric=self._main_metric,\n log_all_metrics=self.log_all_metrics,\n )\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(train_metric, \"train_\"), step=epoch\n )\n train_metric[\"epoch\"] = epoch\n self._train_metric_lst.append(train_metric)\n with open(\n os.path.join(self.checkpoint_dir, \"train_metric.txt\"), \"a\"\n ) as f:\n json.dump(train_metric, f)\n f.write(\"\\n\")\n\n # save model\n checkpoint_file = os.path.join(\n self.checkpoint_dir, f\"state_dict_{epoch:0>4d}.pt\"\n )\n torch.save(self._model.cpu().state_dict(), checkpoint_file)\n self._model.to(self.device)\n\n if (\n test_metric[self.main_metric_name]\n > self._best_loss[self.main_metric_name]\n ):\n self._best_loss = test_metric\n self._best_loss[\"epoch\"] = epoch\n checkpoint_file = os.path.join(\n self.checkpoint_dir, \"state_dict_best.pt\"\n )\n torch.save(self._model.cpu().state_dict(), checkpoint_file)\n self._model.to(self.device)\n with open(\n os.path.join(self.checkpoint_dir, \"best_loss.txt\"), \"w\"\n ) as f:\n json.dump(self._best_loss, f, indent=4)\n\n self.mlflow_log_metrics(\n {\n \"best_epoch\": self._best_loss[\"epoch\"],\n f\"best_{self.main_metric_name}\": self._best_loss[\n self.main_metric_name\n ],\n },\n step=epoch,\n )\n\n if self.scheduler_type == \"ReduceLROnPlateau\":\n self._scheduler.step(train_run[\"total_loss\"])\n if (\n self._optimizer.param_groups[0][\"lr\"]\n <= self.scheduler_params[\"min_lr\"]\n ):\n break\n else:\n self._scheduler.step()\n\n self.global_calc_time = time.time() - self.global_start_time\n train_loss = pd.DataFrame(self._train_run_lst)\n test_metric = pd.DataFrame(self._test_metric_lst)\n train_metric = pd.DataFrame(self._train_metric_lst)\n\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(self._best_loss, \"best_\")\n )\n self.mlflow_log_metrics({\"global_calc_time\": self.global_calc_time})\n\n if self.use_mlflow:\n mlflow.end_run()\n torch.cuda.empty_cache()\n\n return {\n \"best_loss\": self._best_loss,\n \"global_calc_time\": self.global_calc_time,\n \"train_loss\": train_loss,\n \"test_metric\": test_metric,\n \"train_metric\": train_metric,\n }\n\n def mlflow_log_metrics(\n self, metrics: Dict[str, Any], step: Optional[int] = None\n ) -> None:\n if self.use_mlflow:\n try:\n mlflow.log_metrics(metrics, step)\n except MlflowException as e:\n save_str_e = traceback.format_exc()\n logger.info(\n \"Epoch {:03d}::\\nCaught exception:\\n{}\".format(\n self.epoch, save_str_e\n )\n )\n with open(\n os.path.join(self.checkpoint_dir, \"MlflowExceptions.txt\"), \"a\"\n ) as f:\n f.write(\n \"Epoch {:03d}::\\nCaught exception:\\n{}\".format(\n self.epoch, save_str_e\n )\n )"
}
] | import os
import pathlib
import hydra
import numpy as np
import optuna
import pandas as pd
import torch
from datetime import datetime
from itertools import product
from pathlib import Path
from typing import Dict, List, Literal, Optional
from hydra import (
compose,
core,
initialize,
initialize_config_dir,
initialize_config_module,
)
from omegaconf import DictConfig, OmegaConf
from optuna.trial import TrialState
from sklearn.model_selection import train_test_split
from torch_geometric.data import Data
from torch_geometric.loader import NeighborLoader, NeighborSampler
from tqdm import tqdm
from cool_graph.data import RawDataProcessor
from cool_graph.data.batch import get_auto_batch_size
from cool_graph.data.loaders import create_loaders
from cool_graph.logging import setup_mlflow_from_config
from cool_graph.parameter_search import (
model_params_to_trial_params,
sample_model_params,
)
from cool_graph.train import Trainer | 10,037 |
def create_cfg(config: str, overrides: List[str], path_base: str = "cfg") -> Dict:
assert path_base in ("cfg", "cwd")
core.global_hydra.GlobalHydra.instance().clear()
if os.path.isabs(config):
config_path = pathlib.Path(config).parent
else:
config_path = pathlib.Path(os.getcwd()) / pathlib.Path(config).parent
config_name = pathlib.Path(config).name.replace(".yaml", "")
initialize_config_dir(str(config_path), version_base=None)
cfg = compose(config_name=config_name, overrides=overrides)
return cfg
class ConfigRunner:
r"""Runner for cli mode. Using only in cli.
This class allows to load data + split data per batchs + split data per train/val + training.
See the config full.yaml in ./config for knowing what excactly using as data/logging/model_params/training/metrics.
You can use default params, but also you can change it.
Steps for changing confis:
- make get_config --configs path_where_you_need_configs (default: new path ./configs by itself)
"""
def __init__(self, config: Optional[DictConfig]) -> None:
cfg = OmegaConf.to_container(config, resolve=True)
self.cfg = cfg
self.target_names = cfg["training"]["targets"]
self.groups_names = cfg["data"]["groups_names"]
self.target_weights = cfg["training"]["loss"]["target_weights"]
self.read_edge_attr = cfg["data"].get("read_edge_attr", True)
self.batch_size = cfg["training"]["batch_size"]
self.group_mask_col = cfg["data"]["group_mask_col"]
self.label_mask_col = cfg["data"]["label_mask_col"]
self.label_cols = cfg["data"]["label_cols"]
self.label_index_col = cfg["data"]["label_index_col"]
self.edge_index_cols = cfg["data"]["edge_index_cols"]
self.num_neighbors = cfg["training"]["num_neighbors"]
self.features_edges_names = cfg["data"].get("features_edges")
self.group_names_node_features = cfg["data"]["features"]
self.train_paths = cfg["data"]["train"]
self.val_paths = cfg["data"]["validation"]
self.metrics = cfg["metrics"]
self.chkpt_dir = (
pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19]
)
os.makedirs(self.chkpt_dir, exist_ok=True)
if self.cfg["logging"].get("use_mlflow", False):
|
def create_cfg(config: str, overrides: List[str], path_base: str = "cfg") -> Dict:
assert path_base in ("cfg", "cwd")
core.global_hydra.GlobalHydra.instance().clear()
if os.path.isabs(config):
config_path = pathlib.Path(config).parent
else:
config_path = pathlib.Path(os.getcwd()) / pathlib.Path(config).parent
config_name = pathlib.Path(config).name.replace(".yaml", "")
initialize_config_dir(str(config_path), version_base=None)
cfg = compose(config_name=config_name, overrides=overrides)
return cfg
class ConfigRunner:
r"""Runner for cli mode. Using only in cli.
This class allows to load data + split data per batchs + split data per train/val + training.
See the config full.yaml in ./config for knowing what excactly using as data/logging/model_params/training/metrics.
You can use default params, but also you can change it.
Steps for changing confis:
- make get_config --configs path_where_you_need_configs (default: new path ./configs by itself)
"""
def __init__(self, config: Optional[DictConfig]) -> None:
cfg = OmegaConf.to_container(config, resolve=True)
self.cfg = cfg
self.target_names = cfg["training"]["targets"]
self.groups_names = cfg["data"]["groups_names"]
self.target_weights = cfg["training"]["loss"]["target_weights"]
self.read_edge_attr = cfg["data"].get("read_edge_attr", True)
self.batch_size = cfg["training"]["batch_size"]
self.group_mask_col = cfg["data"]["group_mask_col"]
self.label_mask_col = cfg["data"]["label_mask_col"]
self.label_cols = cfg["data"]["label_cols"]
self.label_index_col = cfg["data"]["label_index_col"]
self.edge_index_cols = cfg["data"]["edge_index_cols"]
self.num_neighbors = cfg["training"]["num_neighbors"]
self.features_edges_names = cfg["data"].get("features_edges")
self.group_names_node_features = cfg["data"]["features"]
self.train_paths = cfg["data"]["train"]
self.val_paths = cfg["data"]["validation"]
self.metrics = cfg["metrics"]
self.chkpt_dir = (
pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19]
)
os.makedirs(self.chkpt_dir, exist_ok=True)
if self.cfg["logging"].get("use_mlflow", False): | setup_mlflow_from_config(cfg["logging"]["mlflow"]) | 3 | 2023-11-22 09:44:16+00:00 | 12k |
HeliosZhao/Animate124 | dnerf/provider.py | [
{
"identifier": "get_rays",
"path": "nerf/utils.py",
"snippet": "@torch.cuda.amp.autocast(enabled=False)\ndef get_rays(poses, intrinsics, H, W, N=-1, error_map=None):\n ''' get rays\n Args:\n poses: [B, 4, 4], cam2world\n intrinsics: [4]\n H, W, N: int\n error_map: [B, 128 * 128], sample probability based on training error\n Returns:\n rays_o, rays_d: [B, N, 3]\n inds: [B, N]\n '''\n\n device = poses.device\n B = poses.shape[0]\n fx, fy, cx, cy = intrinsics\n\n i, j = custom_meshgrid(torch.linspace(0, W-1, W, device=device), torch.linspace(0, H-1, H, device=device))\n i = i.t().reshape([1, H*W]).expand([B, H*W]) + 0.5\n j = j.t().reshape([1, H*W]).expand([B, H*W]) + 0.5\n\n results = {}\n\n if N > 0:\n N = min(N, H*W)\n\n if error_map is None:\n inds = torch.randint(0, H*W, size=[N], device=device) # may duplicate\n inds = inds.expand([B, N])\n else:\n\n # weighted sample on a low-reso grid\n inds_coarse = torch.multinomial(error_map.to(device), N, replacement=False) # [B, N], but in [0, 128*128)\n\n # map to the original resolution with random perturb.\n inds_x, inds_y = inds_coarse // 128, inds_coarse % 128 # `//` will throw a warning in torch 1.10... anyway.\n sx, sy = H / 128, W / 128\n inds_x = (inds_x * sx + torch.rand(B, N, device=device) * sx).long().clamp(max=H - 1)\n inds_y = (inds_y * sy + torch.rand(B, N, device=device) * sy).long().clamp(max=W - 1)\n inds = inds_x * W + inds_y\n\n results['inds_coarse'] = inds_coarse # need this when updating error_map\n\n i = torch.gather(i, -1, inds)\n j = torch.gather(j, -1, inds)\n\n results['inds'] = inds\n\n else:\n inds = torch.arange(H*W, device=device).expand([B, H*W])\n\n zs = - torch.ones_like(i)\n xs = - (i - cx) / fx * zs\n ys = (j - cy) / fy * zs\n directions = torch.stack((xs, ys, zs), dim=-1) # 1,N=HW,3\n # directions = safe_normalize(directions)\n rays_d = directions @ poses[:, :3, :3].transpose(-1, -2) # (B, N, 3)\n\n rays_o = poses[..., :3, 3] # [B, 3]\n rays_o = rays_o[..., None, :].expand_as(rays_d) # [B, N, 3]\n\n results['rays_o'] = rays_o\n results['rays_d'] = rays_d\n\n return results"
},
{
"identifier": "safe_normalize",
"path": "nerf/utils.py",
"snippet": "def safe_normalize(x, eps=1e-20):\n return x / torch.sqrt(torch.clamp(torch.sum(x * x, -1, keepdim=True), min=eps))"
},
{
"identifier": "NeRFDataset",
"path": "nerf/provider.py",
"snippet": "class NeRFDataset:\n def __init__(self, opt, device, type='train', H=256, W=256, size=100):\n super().__init__()\n\n self.opt = opt\n self.device = device\n self.type = type # train, val, test\n\n self.H = H # 128\n self.W = W # 128\n self.size = size\n\n self.training = self.type in ['train', 'all']\n\n self.cx = self.H / 2\n self.cy = self.W / 2\n\n self.near = self.opt.min_near\n self.far = 1000 # infinite\n\n # [debug] visualize poses\n # poses, dirs, _, _, _ = rand_poses(100, self.device, opt, radius_range=self.opt.radius_range, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, jitter=self.opt.jitter_pose, uniform_sphere_rate=1)\n # visualize_poses(poses.detach().cpu().numpy(), dirs.detach().cpu().numpy())\n\n def get_default_view_data(self):\n\n H = int(self.opt.known_view_scale * self.H)\n W = int(self.opt.known_view_scale * self.W)\n cx = H / 2\n cy = W / 2\n\n radii = torch.FloatTensor(self.opt.ref_radii).to(self.device)\n thetas = torch.FloatTensor(self.opt.ref_polars).to(self.device)\n phis = torch.FloatTensor(self.opt.ref_azimuths).to(self.device)\n poses, dirs = circle_poses(self.device, radius=radii, theta=thetas, phi=phis, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front)\n fov = self.opt.default_fovy\n focal = H / (2 * np.tan(np.deg2rad(fov) / 2))\n intrinsics = np.array([focal, focal, cx, cy])\n\n projection = torch.tensor([\n [2*focal/W, 0, 0, 0],\n [0, -2*focal/H, 0, 0],\n [0, 0, -(self.far+self.near)/(self.far-self.near), -(2*self.far*self.near)/(self.far-self.near)],\n [0, 0, -1, 0]\n ], dtype=torch.float32, device=self.device).unsqueeze(0).repeat(len(radii), 1, 1)\n\n mvp = projection @ torch.inverse(poses) # [B, 4, 4]\n\n # sample a low-resolution but full image\n rays = get_rays(poses, intrinsics, H, W, -1)\n\n data = {\n 'H': H,\n 'W': W,\n 'rays_o': rays['rays_o'],\n 'rays_d': rays['rays_d'],\n 'dir': dirs,\n 'mvp': mvp,\n 'polar': self.opt.ref_polars,\n 'azimuth': self.opt.ref_azimuths,\n 'radius': self.opt.ref_radii,\n }\n\n return data\n\n def collate(self, index):\n\n B = len(index)\n\n if self.training:\n # random pose on the fly, size 1,4,4\n poses, dirs, thetas, phis, radius = rand_poses(B, self.device, self.opt, radius_range=self.opt.radius_range, theta_range=self.opt.theta_range, phi_range=self.opt.phi_range, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, uniform_sphere_rate=self.opt.uniform_sphere_rate)\n\n # random focal\n fov = random.random() * (self.opt.fovy_range[1] - self.opt.fovy_range[0]) + self.opt.fovy_range[0]\n\n elif self.type == 'six_views':\n # six views\n thetas_six = [90]*4 + [1e-6] + [180]\n phis_six = [0, 90, 180, -90, 0, 0]\n thetas = torch.FloatTensor([thetas_six[index[0]]]).to(self.device)\n phis = torch.FloatTensor([phis_six[index[0]]]).to(self.device)\n radius = torch.FloatTensor([self.opt.default_radius]).to(self.device)\n poses, dirs = circle_poses(self.device, radius=radius, theta=thetas, phi=phis, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front)\n\n # fixed focal\n fov = self.opt.default_fovy\n\n else:\n # circle pose\n thetas = torch.FloatTensor([self.opt.default_polar]).to(self.device)\n phis = torch.FloatTensor([(index[0] / self.size) * 360]).to(self.device)\n phis = phis + self.opt.default_azimuth\n radius = torch.FloatTensor([self.opt.default_radius]).to(self.device)\n poses, dirs = circle_poses(self.device, radius=radius, theta=thetas, phi=phis, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front)\n\n # fixed focal\n fov = self.opt.default_fovy\n\n focal = self.H / (2 * np.tan(np.deg2rad(fov) / 2))\n intrinsics = np.array([focal, focal, self.cx, self.cy])\n\n projection = torch.tensor([\n [2*focal/self.W, 0, 0, 0],\n [0, -2*focal/self.H, 0, 0],\n [0, 0, -(self.far+self.near)/(self.far-self.near), -(2*self.far*self.near)/(self.far-self.near)],\n [0, 0, -1, 0]\n ], dtype=torch.float32, device=self.device).unsqueeze(0)\n\n mvp = projection @ torch.inverse(poses) # [1, 4, 4]\n\n # sample a low-resolution but full image\n # ipdb.set_trace()\n rays = get_rays(poses, intrinsics, self.H, self.W, -1)\n\n # delta polar/azimuth/radius to default view\n delta_polar = thetas - self.opt.default_polar\n delta_azimuth = phis - self.opt.default_azimuth\n delta_azimuth[delta_azimuth > 180] -= 360 # range in [-180, 180]\n delta_radius = radius - self.opt.default_radius\n\n data = {\n 'H': self.H,\n 'W': self.W,\n 'rays_o': rays['rays_o'], # 1,HW,3\n 'rays_d': rays['rays_d'], # 1,HW,3\n 'dir': dirs,\n 'mvp': mvp,\n 'polar': delta_polar,\n 'azimuth': delta_azimuth,\n 'radius': delta_radius,\n }\n\n return data\n\n def dataloader(self, batch_size=None):\n batch_size = batch_size or self.opt.batch_size\n loader = DataLoader(list(range(self.size)), batch_size=batch_size, collate_fn=self.collate, shuffle=self.training, num_workers=0)\n loader._data = self\n return loader"
},
{
"identifier": "visualize_poses",
"path": "nerf/provider.py",
"snippet": "def visualize_poses(poses, dirs, size=0.1):\n # poses: [B, 4, 4], dirs: [B]\n import trimesh\n axes = trimesh.creation.axis(axis_length=4)\n sphere = trimesh.creation.icosphere(radius=1)\n objects = [axes, sphere]\n\n for pose, dir in zip(poses, dirs):\n # a camera is visualized with 8 line segments.\n pos = pose[:3, 3]\n a = pos + size * pose[:3, 0] + size * pose[:3, 1] - size * pose[:3, 2]\n b = pos - size * pose[:3, 0] + size * pose[:3, 1] - size * pose[:3, 2]\n c = pos - size * pose[:3, 0] - size * pose[:3, 1] - size * pose[:3, 2]\n d = pos + size * pose[:3, 0] - size * pose[:3, 1] - size * pose[:3, 2]\n\n segs = np.array([[pos, a], [pos, b], [pos, c], [pos, d], [a, b], [b, c], [c, d], [d, a]])\n segs = trimesh.load_path(segs)\n\n # different color for different dirs\n segs.colors = DIR_COLORS[[dir]].repeat(len(segs.entities), 0)\n\n objects.append(segs)\n\n trimesh.Scene(objects).show()"
},
{
"identifier": "DIR_COLORS",
"path": "nerf/provider.py",
"snippet": "DIR_COLORS = np.array([\n [255, 0, 0, 255], # front\n [0, 255, 0, 255], # side\n [0, 0, 255, 255], # back\n [255, 255, 0, 255], # side\n [255, 0, 255, 255], # overhead\n [0, 255, 255, 255], # bottom\n], dtype=np.uint8)"
},
{
"identifier": "get_view_direction",
"path": "nerf/provider.py",
"snippet": "def get_view_direction(thetas, phis, overhead, front):\n # phis [B,]; thetas: [B,]\n # front = 0 [0, front)\n # side (right) = 1 [front, 180)\n # back = 2 [180, 180+front)\n # side (left) = 3 [180+front, 360)\n # top = 4 [0, overhead]\n # bottom = 5 [180-overhead, 180]\n res = torch.zeros(thetas.shape[0], dtype=torch.long)\n # first determine by phis\n phis = phis % (2 * np.pi)\n res[(phis < front / 2) | (phis >= 2 * np.pi - front / 2)] = 0\n res[(phis >= front / 2) & (phis < np.pi - front / 2)] = 1\n res[(phis >= np.pi - front / 2) & (phis < np.pi + front / 2)] = 2\n res[(phis >= np.pi + front / 2) & (phis < 2 * np.pi - front / 2)] = 3\n # override by thetas\n res[thetas <= overhead] = 4\n res[thetas >= (np.pi - overhead)] = 5\n return res"
},
{
"identifier": "rand_poses",
"path": "nerf/provider.py",
"snippet": "def rand_poses(size, device, opt, radius_range=[1, 1.5], theta_range=[0, 120], phi_range=[0, 360], return_dirs=False, angle_overhead=30, angle_front=60, uniform_sphere_rate=0.5):\n ''' generate random poses from an orbit camera\n Args:\n size: batch size of generated poses.\n device: where to allocate the output.\n radius: camera radius\n theta_range: [min, max], should be in [0, pi]\n phi_range: [min, max], should be in [0, 2 * pi]\n Return:\n poses: [size, 4, 4]\n '''\n\n theta_range = np.array(theta_range) / 180 * np.pi\n phi_range = np.array(phi_range) / 180 * np.pi\n angle_overhead = angle_overhead / 180 * np.pi\n angle_front = angle_front / 180 * np.pi\n\n radius = torch.rand(size, device=device) * (radius_range[1] - radius_range[0]) + radius_range[0]\n\n if random.random() < uniform_sphere_rate: # 0.5\n unit_centers = F.normalize(\n torch.stack([\n (torch.rand(size, device=device) - 0.5) * 2.0,\n torch.rand(size, device=device),\n (torch.rand(size, device=device) - 0.5) * 2.0,\n ], dim=-1), p=2, dim=1\n )\n thetas = torch.acos(unit_centers[:,1])\n phis = torch.atan2(unit_centers[:,0], unit_centers[:,2])\n phis[phis < 0] += 2 * np.pi\n centers = unit_centers * radius.unsqueeze(-1)\n else:\n thetas = torch.rand(size, device=device) * (theta_range[1] - theta_range[0]) + theta_range[0]\n phis = torch.rand(size, device=device) * (phi_range[1] - phi_range[0]) + phi_range[0]\n phis[phis < 0] += 2 * np.pi\n\n centers = torch.stack([\n radius * torch.sin(thetas) * torch.sin(phis),\n radius * torch.cos(thetas),\n radius * torch.sin(thetas) * torch.cos(phis),\n ], dim=-1) # [B, 3]\n\n targets = 0\n\n # jitters\n if opt.jitter_pose:\n jit_center = opt.jitter_center # 0.015 # was 0.2\n jit_target = opt.jitter_target\n centers += torch.rand_like(centers) * jit_center - jit_center/2.0\n targets += torch.randn_like(centers) * jit_target\n\n # lookat\n forward_vector = safe_normalize(centers - targets)\n up_vector = torch.FloatTensor([0, 1, 0]).to(device).unsqueeze(0).repeat(size, 1)\n right_vector = safe_normalize(torch.cross(forward_vector, up_vector, dim=-1))\n\n if opt.jitter_pose:\n up_noise = torch.randn_like(up_vector) * opt.jitter_up\n else:\n up_noise = 0\n\n up_vector = safe_normalize(torch.cross(right_vector, forward_vector, dim=-1) + up_noise)\n\n poses = torch.eye(4, dtype=torch.float, device=device).unsqueeze(0).repeat(size, 1, 1)\n poses[:, :3, :3] = torch.stack((right_vector, up_vector, forward_vector), dim=-1)\n poses[:, :3, 3] = centers\n\n if return_dirs:\n dirs = get_view_direction(thetas, phis, angle_overhead, angle_front)\n else:\n dirs = None\n\n # back to degree\n thetas = thetas / np.pi * 180\n phis = phis / np.pi * 180\n\n return poses, dirs, thetas, phis, radius"
},
{
"identifier": "circle_poses",
"path": "nerf/provider.py",
"snippet": "def circle_poses(device, radius=torch.tensor([3.2]), theta=torch.tensor([60]), phi=torch.tensor([0]), return_dirs=False, angle_overhead=30, angle_front=60):\n\n theta = theta / 180 * np.pi\n phi = phi / 180 * np.pi\n angle_overhead = angle_overhead / 180 * np.pi\n angle_front = angle_front / 180 * np.pi\n \n centers = torch.stack([\n radius * torch.sin(theta) * torch.sin(phi),\n radius * torch.cos(theta),\n radius * torch.sin(theta) * torch.cos(phi),\n ], dim=-1) # [B, 3]\n\n # lookat\n forward_vector = safe_normalize(centers)\n up_vector = torch.FloatTensor([0, 1, 0]).to(device).unsqueeze(0).repeat(len(centers), 1)\n right_vector = safe_normalize(torch.cross(forward_vector, up_vector, dim=-1))\n up_vector = safe_normalize(torch.cross(right_vector, forward_vector, dim=-1))\n\n poses = torch.eye(4, dtype=torch.float, device=device).unsqueeze(0).repeat(len(centers), 1, 1)\n poses[:, :3, :3] = torch.stack((right_vector, up_vector, forward_vector), dim=-1)\n poses[:, :3, 3] = centers\n\n if return_dirs:\n dirs = get_view_direction(theta, phi, angle_overhead, angle_front)\n else:\n dirs = None\n\n return poses, dirs"
},
{
"identifier": "generate_grid_points",
"path": "nerf/provider.py",
"snippet": "def generate_grid_points(resolution=128, device='cuda'):\n # resolution: number of points along each dimension\n # Generate the grid points\n x = torch.linspace(0, 1, resolution)\n y = torch.linspace(0, 1, resolution)\n z = torch.linspace(0, 1, resolution)\n # Create the meshgrid\n grid_x, grid_y, grid_z = torch.meshgrid(x, y, z)\n\n # Flatten the grid points if needed\n grid_points = torch.stack((grid_x.flatten(), grid_y.flatten(), grid_z.flatten()), dim=1).to(device)\n return grid_points"
}
] | import random
import numpy as np
import math
import torch
import torch.nn.functional as F
import ipdb
import logging
from scipy.spatial.transform import Slerp, Rotation
from torch.utils.data import DataLoader
from nerf.utils import get_rays, safe_normalize
from nerf.provider import NeRFDataset, visualize_poses, DIR_COLORS, get_view_direction, rand_poses, circle_poses, generate_grid_points | 7,229 | ## scale delta will make the camera not exceed the range, also, it cannot across the range
# for example, phi from -pi/4 to pi/4 is a reasonable motion but scale delta will make it impossible
d_thetas = d_thetas.clamp(theta_range[0]-thetas, theta_range[1]-thetas) # d_theta + theta in range [theta_range[0], theta_range[1]]
d_phis = d_phis.clamp(phi_range[0]-init_phis, phi_range[1]-init_phis) # d_phi + init_phi in range [phi_range[0], phi_range[1]] # init phi is before convert to 0-2pi
##
num_frames = opt.num_frames
scale = torch.arange(num_frames, device=device) / num_frames # 0,1/f, ... f-1/f, F
thetas_dyn = thetas + scale * d_thetas # F
phis_dyn = init_phis + scale * d_phis # F
phis_dyn[phis_dyn < 0] += 2 * np.pi
assert thetas_dyn[0] == thetas[0] and phis_dyn[0] == init_phis[0]
centers = torch.stack([
radius * torch.sin(thetas_dyn) * torch.sin(phis_dyn),
radius * torch.cos(thetas_dyn),
radius * torch.sin(thetas_dyn) * torch.cos(phis_dyn),
], dim=-1) # [B, 3] # F,3
# lookat
forward_vector = safe_normalize(centers - targets)
up_vector = torch.FloatTensor([0, 1, 0]).to(device).unsqueeze(0).repeat(num_frames, 1)
right_vector = safe_normalize(torch.cross(forward_vector, up_vector, dim=-1))
if opt.jitter_pose:
up_noise = torch.randn_like(up_vector) * opt.jitter_up
else:
up_noise = 0
up_vector = safe_normalize(torch.cross(right_vector, forward_vector, dim=-1) + up_noise)
poses = torch.eye(4, dtype=torch.float, device=device).unsqueeze(0).repeat(num_frames, 1, 1)
poses[:, :3, :3] = torch.stack((right_vector, up_vector, forward_vector), dim=-1)
poses[:, :3, 3] = centers
if return_dirs:
dirs = get_view_direction(thetas_dyn, phis_dyn, angle_overhead, angle_front)
else:
dirs = None
# back to degree
thetas_dyn = thetas_dyn / np.pi * 180
phis_dyn = phis_dyn / np.pi * 180
radius = radius.repeat(num_frames)
return poses, dirs, thetas_dyn, phis_dyn, radius
class DNeRFDataset(NeRFDataset):
def __init__(self, opt, device, type='train', H=256, W=256, size=100):
super().__init__(opt, device, type, H, W, size)
self.num_frames = opt.num_frames
self.num_test_frames = opt.get("num_test_frames", self.num_frames)
self.dynamic_cam_rate = self.opt.dynamic_cam_rate
self.precision = opt.get('precision', 64)
self.zero_precision = opt.get('zero_precision', self.precision)
logger.info(f"Training dataset, random time sampling precision is {self.precision}, zero time sampling precision is {self.zero_precision}")
def get_default_view_data(self):
H = int(self.opt.known_view_scale * self.H)
W = int(self.opt.known_view_scale * self.W)
cx = H / 2
cy = W / 2
radii = torch.FloatTensor(self.opt.ref_radii).to(self.device)
thetas = torch.FloatTensor(self.opt.ref_polars).to(self.device)
phis = torch.FloatTensor(self.opt.ref_azimuths).to(self.device)
poses, dirs = circle_poses(self.device, radius=radii, theta=thetas, phi=phis, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front)
fov = self.opt.default_fovy
focal = H / (2 * np.tan(np.deg2rad(fov) / 2))
intrinsics = np.array([focal, focal, cx, cy])
projection = torch.tensor([
[2*focal/W, 0, 0, 0],
[0, -2*focal/H, 0, 0],
[0, 0, -(self.far+self.near)/(self.far-self.near), -(2*self.far*self.near)/(self.far-self.near)],
[0, 0, -1, 0]
], dtype=torch.float32, device=self.device).unsqueeze(0).repeat(len(radii), 1, 1)
mvp = projection @ torch.inverse(poses) # [B, 4, 4]
# sample a low-resolution but full image
rays = get_rays(poses, intrinsics, H, W, -1)
if rays['rays_o'].size(0):
time = torch.FloatTensor([0]).reshape(rays['rays_o'].size(0), 1)
else:
time = None
data = {
'H': H,
'W': W,
'rays_o': rays['rays_o'],
'rays_d': rays['rays_d'],
'dir': dirs,
'mvp': mvp,
'time': time,
'polar': self.opt.ref_polars,
'azimuth': self.opt.ref_azimuths,
'radius': self.opt.ref_radii,
}
return data
def collate(self, index):
B = len(index)
dynamic_cam = False
start_from_zero = False
if self.training:
if np.random.random() < self.dynamic_cam_rate:
dynamic_cam = True
poses, dirs, thetas, phis, radius = rand_poses_trajectory(B, self.device, self.opt, radius_range=self.opt.radius_range, theta_range=self.opt.theta_range, phi_range=self.opt.phi_range, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, uniform_sphere_rate=self.opt.uniform_sphere_rate)
## poses F,4,4
else:
# random pose on the fly, size 1,4,4
|
logger = logging.getLogger(__name__)
def rand_poses_trajectory(size, device, opt, radius_range=[1, 1.5], theta_range=[0, 120], phi_range=[0, 360], return_dirs=False, angle_overhead=30, angle_front=60, uniform_sphere_rate=0.5, static_view_rate=0.):
''' generate random poses from an orbit camera
Args:
size: batch size of generated poses.
device: where to allocate the output.
radius: camera radius [1.8,1.8]
theta_range: [min, max], should be in [45, 135]
phi_range: [min, max], should be in [-180, 180]
Return:
poses: [size, 4, 4]
'''
assert size == 1 and not opt.jitter_pose
theta_range = np.array(theta_range) / 180 * np.pi # -pi/4 ~ 3pi/4
phi_range = np.array(phi_range) / 180 * np.pi # -pi ~ pi
angle_overhead = angle_overhead / 180 * np.pi # pi/6
angle_front = angle_front / 180 * np.pi # pi/3
radius = torch.rand(size, device=device) * (radius_range[1] - radius_range[0]) + radius_range[0]
if random.random() < uniform_sphere_rate: # 0.5
unit_centers = F.normalize(
torch.stack([
(torch.rand(size, device=device) - 0.5) * 2.0,
torch.rand(size, device=device),
(torch.rand(size, device=device) - 0.5) * 2.0,
], dim=-1), p=2, dim=1
)
thetas = torch.acos(unit_centers[:,1])
phis = torch.atan2(unit_centers[:,0], unit_centers[:,2])
init_phis = phis # init phi can be smaller than 0
phis[phis < 0] += 2 * np.pi
centers = unit_centers * radius.unsqueeze(-1)
else:
thetas = torch.rand(size, device=device) * (theta_range[1] - theta_range[0]) + theta_range[0] # 1
phis = torch.rand(size, device=device) * (phi_range[1] - phi_range[0]) + phi_range[0]
init_phis = phis
phis[phis < 0] += 2 * np.pi
centers = torch.stack([
radius * torch.sin(thetas) * torch.sin(phis),
radius * torch.cos(thetas),
radius * torch.sin(thetas) * torch.cos(phis),
], dim=-1) # [B, 3]
targets = 0
# scale_delta = False
# ipdb.set_trace()
## delta thetas
d_theta_range = [-np.pi/4, np.pi/4]
d_phi_range = [-np.pi/2, np.pi/2]
d_thetas = torch.rand(size, device=device) * (d_theta_range[1] - d_theta_range[0]) + d_theta_range[0] # -np.pi/4, np.pi/4
d_phis = torch.rand(size, device=device) * (d_phi_range[1] - d_phi_range[0]) + d_phi_range[0] # -np.pi/2, np.pi/2
if opt.scale_delta:
## scale delta will make the camera not exceed the range, also, it cannot across the range
# for example, phi from -pi/4 to pi/4 is a reasonable motion but scale delta will make it impossible
d_thetas = d_thetas.clamp(theta_range[0]-thetas, theta_range[1]-thetas) # d_theta + theta in range [theta_range[0], theta_range[1]]
d_phis = d_phis.clamp(phi_range[0]-init_phis, phi_range[1]-init_phis) # d_phi + init_phi in range [phi_range[0], phi_range[1]] # init phi is before convert to 0-2pi
##
num_frames = opt.num_frames
scale = torch.arange(num_frames, device=device) / num_frames # 0,1/f, ... f-1/f, F
thetas_dyn = thetas + scale * d_thetas # F
phis_dyn = init_phis + scale * d_phis # F
phis_dyn[phis_dyn < 0] += 2 * np.pi
assert thetas_dyn[0] == thetas[0] and phis_dyn[0] == init_phis[0]
centers = torch.stack([
radius * torch.sin(thetas_dyn) * torch.sin(phis_dyn),
radius * torch.cos(thetas_dyn),
radius * torch.sin(thetas_dyn) * torch.cos(phis_dyn),
], dim=-1) # [B, 3] # F,3
# lookat
forward_vector = safe_normalize(centers - targets)
up_vector = torch.FloatTensor([0, 1, 0]).to(device).unsqueeze(0).repeat(num_frames, 1)
right_vector = safe_normalize(torch.cross(forward_vector, up_vector, dim=-1))
if opt.jitter_pose:
up_noise = torch.randn_like(up_vector) * opt.jitter_up
else:
up_noise = 0
up_vector = safe_normalize(torch.cross(right_vector, forward_vector, dim=-1) + up_noise)
poses = torch.eye(4, dtype=torch.float, device=device).unsqueeze(0).repeat(num_frames, 1, 1)
poses[:, :3, :3] = torch.stack((right_vector, up_vector, forward_vector), dim=-1)
poses[:, :3, 3] = centers
if return_dirs:
dirs = get_view_direction(thetas_dyn, phis_dyn, angle_overhead, angle_front)
else:
dirs = None
# back to degree
thetas_dyn = thetas_dyn / np.pi * 180
phis_dyn = phis_dyn / np.pi * 180
radius = radius.repeat(num_frames)
return poses, dirs, thetas_dyn, phis_dyn, radius
class DNeRFDataset(NeRFDataset):
def __init__(self, opt, device, type='train', H=256, W=256, size=100):
super().__init__(opt, device, type, H, W, size)
self.num_frames = opt.num_frames
self.num_test_frames = opt.get("num_test_frames", self.num_frames)
self.dynamic_cam_rate = self.opt.dynamic_cam_rate
self.precision = opt.get('precision', 64)
self.zero_precision = opt.get('zero_precision', self.precision)
logger.info(f"Training dataset, random time sampling precision is {self.precision}, zero time sampling precision is {self.zero_precision}")
def get_default_view_data(self):
H = int(self.opt.known_view_scale * self.H)
W = int(self.opt.known_view_scale * self.W)
cx = H / 2
cy = W / 2
radii = torch.FloatTensor(self.opt.ref_radii).to(self.device)
thetas = torch.FloatTensor(self.opt.ref_polars).to(self.device)
phis = torch.FloatTensor(self.opt.ref_azimuths).to(self.device)
poses, dirs = circle_poses(self.device, radius=radii, theta=thetas, phi=phis, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front)
fov = self.opt.default_fovy
focal = H / (2 * np.tan(np.deg2rad(fov) / 2))
intrinsics = np.array([focal, focal, cx, cy])
projection = torch.tensor([
[2*focal/W, 0, 0, 0],
[0, -2*focal/H, 0, 0],
[0, 0, -(self.far+self.near)/(self.far-self.near), -(2*self.far*self.near)/(self.far-self.near)],
[0, 0, -1, 0]
], dtype=torch.float32, device=self.device).unsqueeze(0).repeat(len(radii), 1, 1)
mvp = projection @ torch.inverse(poses) # [B, 4, 4]
# sample a low-resolution but full image
rays = get_rays(poses, intrinsics, H, W, -1)
if rays['rays_o'].size(0):
time = torch.FloatTensor([0]).reshape(rays['rays_o'].size(0), 1)
else:
time = None
data = {
'H': H,
'W': W,
'rays_o': rays['rays_o'],
'rays_d': rays['rays_d'],
'dir': dirs,
'mvp': mvp,
'time': time,
'polar': self.opt.ref_polars,
'azimuth': self.opt.ref_azimuths,
'radius': self.opt.ref_radii,
}
return data
def collate(self, index):
B = len(index)
dynamic_cam = False
start_from_zero = False
if self.training:
if np.random.random() < self.dynamic_cam_rate:
dynamic_cam = True
poses, dirs, thetas, phis, radius = rand_poses_trajectory(B, self.device, self.opt, radius_range=self.opt.radius_range, theta_range=self.opt.theta_range, phi_range=self.opt.phi_range, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, uniform_sphere_rate=self.opt.uniform_sphere_rate)
## poses F,4,4
else:
# random pose on the fly, size 1,4,4 | poses, dirs, thetas, phis, radius = rand_poses(B, self.device, self.opt, radius_range=self.opt.radius_range, theta_range=self.opt.theta_range, phi_range=self.opt.phi_range, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, uniform_sphere_rate=self.opt.uniform_sphere_rate) | 6 | 2023-11-23 10:34:08+00:00 | 12k |
tingxueronghua/ChartLlama-code | llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "build_attn_bias",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "MPTBlock",
"path": "llava/model/language_model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)"
},
{
"identifier": "SharedEmbedding",
"path": "llava/model/language_model/mpt/custom_embedding.py",
"snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "llava/model/language_model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}"
},
{
"identifier": "MPTConfig",
"path": "llava/model/language_model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')"
},
{
"identifier": "AutoTokenizerForMOD",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')"
},
{
"identifier": "init_empty_weights",
"path": "llava/model/language_model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}"
},
{
"identifier": "generic_param_init_fn_",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 7,343 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) | self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max) | 1 | 2023-11-26 17:26:00+00:00 | 12k |
eliphatfs/calibur | calibur/ndarray_extension.py | [
{
"identifier": "container_catamorphism",
"path": "calibur/generic_utils.py",
"snippet": "def container_catamorphism(data, func):\n \"\"\"\n Transforms leaf elements in ``list``, ``dict``, ``tuple``, ``set`` with ``func``, aka. *tree-map*.\n Nested containers are also supported.\n \"\"\"\n if isinstance(data, dict):\n return {\n k: container_catamorphism(v, func) for k, v in data.items()\n }\n if isinstance(data, list):\n return [container_catamorphism(x, func) for x in data]\n if isinstance(data, tuple):\n return tuple(container_catamorphism(x, func) for x in data)\n if isinstance(data, set):\n return {container_catamorphism(x, func) for x in data}\n return func(data)"
},
{
"identifier": "type_match",
"path": "calibur/generic_utils.py",
"snippet": "def type_match(matcher: Union[Type, Sequence[Type]], missing: Literal[\"ignore\", \"error\"] = \"ignore\"):\n \"\"\"\n :meta private:\n \"\"\"\n def decorator(func):\n @wraps(func)\n def wrapper(elem):\n if isinstance(elem, matcher):\n return func(elem)\n if missing == \"ignore\":\n return elem\n else:\n raise TypeError(\"Got unexpected type!\", type(elem), \"Expected\", matcher)\n return wrapper\n return decorator"
}
] | import numpy
from typing import Union, TypeVar
from functools import wraps
from .generic_utils import container_catamorphism, type_match | 7,937 | brag: "GraphicsNDArray" = qattr((..., [2, 0, 3, 1]))
brab: "GraphicsNDArray" = qattr((..., [2, 0, 3, 2]))
braa: "GraphicsNDArray" = qattr((..., [2, 0, 3, 3]))
bgrr: "GraphicsNDArray" = qattr((..., [2, 1, 0, 0]))
bgrg: "GraphicsNDArray" = qattr((..., [2, 1, 0, 1]))
bgrb: "GraphicsNDArray" = qattr((..., [2, 1, 0, 2]))
bgra: "GraphicsNDArray" = qattr((..., [2, 1, 0, 3]))
bggr: "GraphicsNDArray" = qattr((..., [2, 1, 1, 0]))
bggg: "GraphicsNDArray" = qattr((..., [2, 1, 1, 1]))
bggb: "GraphicsNDArray" = qattr((..., [2, 1, 1, 2]))
bgga: "GraphicsNDArray" = qattr((..., [2, 1, 1, 3]))
bgbr: "GraphicsNDArray" = qattr((..., [2, 1, 2, 0]))
bgbg: "GraphicsNDArray" = qattr((..., [2, 1, 2, 1]))
bgbb: "GraphicsNDArray" = qattr((..., [2, 1, 2, 2]))
bgba: "GraphicsNDArray" = qattr((..., [2, 1, 2, 3]))
bgar: "GraphicsNDArray" = qattr((..., [2, 1, 3, 0]))
bgag: "GraphicsNDArray" = qattr((..., [2, 1, 3, 1]))
bgab: "GraphicsNDArray" = qattr((..., [2, 1, 3, 2]))
bgaa: "GraphicsNDArray" = qattr((..., [2, 1, 3, 3]))
bbrr: "GraphicsNDArray" = qattr((..., [2, 2, 0, 0]))
bbrg: "GraphicsNDArray" = qattr((..., [2, 2, 0, 1]))
bbrb: "GraphicsNDArray" = qattr((..., [2, 2, 0, 2]))
bbra: "GraphicsNDArray" = qattr((..., [2, 2, 0, 3]))
bbgr: "GraphicsNDArray" = qattr((..., [2, 2, 1, 0]))
bbgg: "GraphicsNDArray" = qattr((..., [2, 2, 1, 1]))
bbgb: "GraphicsNDArray" = qattr((..., [2, 2, 1, 2]))
bbga: "GraphicsNDArray" = qattr((..., [2, 2, 1, 3]))
bbbr: "GraphicsNDArray" = qattr((..., [2, 2, 2, 0]))
bbbg: "GraphicsNDArray" = qattr((..., [2, 2, 2, 1]))
bbbb: "GraphicsNDArray" = qattr((..., [2, 2, 2, 2]))
bbba: "GraphicsNDArray" = qattr((..., [2, 2, 2, 3]))
bbar: "GraphicsNDArray" = qattr((..., [2, 2, 3, 0]))
bbag: "GraphicsNDArray" = qattr((..., [2, 2, 3, 1]))
bbab: "GraphicsNDArray" = qattr((..., [2, 2, 3, 2]))
bbaa: "GraphicsNDArray" = qattr((..., [2, 2, 3, 3]))
barr: "GraphicsNDArray" = qattr((..., [2, 3, 0, 0]))
barg: "GraphicsNDArray" = qattr((..., [2, 3, 0, 1]))
barb: "GraphicsNDArray" = qattr((..., [2, 3, 0, 2]))
bara: "GraphicsNDArray" = qattr((..., [2, 3, 0, 3]))
bagr: "GraphicsNDArray" = qattr((..., [2, 3, 1, 0]))
bagg: "GraphicsNDArray" = qattr((..., [2, 3, 1, 1]))
bagb: "GraphicsNDArray" = qattr((..., [2, 3, 1, 2]))
baga: "GraphicsNDArray" = qattr((..., [2, 3, 1, 3]))
babr: "GraphicsNDArray" = qattr((..., [2, 3, 2, 0]))
babg: "GraphicsNDArray" = qattr((..., [2, 3, 2, 1]))
babb: "GraphicsNDArray" = qattr((..., [2, 3, 2, 2]))
baba: "GraphicsNDArray" = qattr((..., [2, 3, 2, 3]))
baar: "GraphicsNDArray" = qattr((..., [2, 3, 3, 0]))
baag: "GraphicsNDArray" = qattr((..., [2, 3, 3, 1]))
baab: "GraphicsNDArray" = qattr((..., [2, 3, 3, 2]))
baaa: "GraphicsNDArray" = qattr((..., [2, 3, 3, 3]))
arrr: "GraphicsNDArray" = qattr((..., [3, 0, 0, 0]))
arrg: "GraphicsNDArray" = qattr((..., [3, 0, 0, 1]))
arrb: "GraphicsNDArray" = qattr((..., [3, 0, 0, 2]))
arra: "GraphicsNDArray" = qattr((..., [3, 0, 0, 3]))
argr: "GraphicsNDArray" = qattr((..., [3, 0, 1, 0]))
argg: "GraphicsNDArray" = qattr((..., [3, 0, 1, 1]))
argb: "GraphicsNDArray" = qattr((..., [3, 0, 1, 2]))
arga: "GraphicsNDArray" = qattr((..., [3, 0, 1, 3]))
arbr: "GraphicsNDArray" = qattr((..., [3, 0, 2, 0]))
arbg: "GraphicsNDArray" = qattr((..., [3, 0, 2, 1]))
arbb: "GraphicsNDArray" = qattr((..., [3, 0, 2, 2]))
arba: "GraphicsNDArray" = qattr((..., [3, 0, 2, 3]))
arar: "GraphicsNDArray" = qattr((..., [3, 0, 3, 0]))
arag: "GraphicsNDArray" = qattr((..., [3, 0, 3, 1]))
arab: "GraphicsNDArray" = qattr((..., [3, 0, 3, 2]))
araa: "GraphicsNDArray" = qattr((..., [3, 0, 3, 3]))
agrr: "GraphicsNDArray" = qattr((..., [3, 1, 0, 0]))
agrg: "GraphicsNDArray" = qattr((..., [3, 1, 0, 1]))
agrb: "GraphicsNDArray" = qattr((..., [3, 1, 0, 2]))
agra: "GraphicsNDArray" = qattr((..., [3, 1, 0, 3]))
aggr: "GraphicsNDArray" = qattr((..., [3, 1, 1, 0]))
aggg: "GraphicsNDArray" = qattr((..., [3, 1, 1, 1]))
aggb: "GraphicsNDArray" = qattr((..., [3, 1, 1, 2]))
agga: "GraphicsNDArray" = qattr((..., [3, 1, 1, 3]))
agbr: "GraphicsNDArray" = qattr((..., [3, 1, 2, 0]))
agbg: "GraphicsNDArray" = qattr((..., [3, 1, 2, 1]))
agbb: "GraphicsNDArray" = qattr((..., [3, 1, 2, 2]))
agba: "GraphicsNDArray" = qattr((..., [3, 1, 2, 3]))
agar: "GraphicsNDArray" = qattr((..., [3, 1, 3, 0]))
agag: "GraphicsNDArray" = qattr((..., [3, 1, 3, 1]))
agab: "GraphicsNDArray" = qattr((..., [3, 1, 3, 2]))
agaa: "GraphicsNDArray" = qattr((..., [3, 1, 3, 3]))
abrr: "GraphicsNDArray" = qattr((..., [3, 2, 0, 0]))
abrg: "GraphicsNDArray" = qattr((..., [3, 2, 0, 1]))
abrb: "GraphicsNDArray" = qattr((..., [3, 2, 0, 2]))
abra: "GraphicsNDArray" = qattr((..., [3, 2, 0, 3]))
abgr: "GraphicsNDArray" = qattr((..., [3, 2, 1, 0]))
abgg: "GraphicsNDArray" = qattr((..., [3, 2, 1, 1]))
abgb: "GraphicsNDArray" = qattr((..., [3, 2, 1, 2]))
abga: "GraphicsNDArray" = qattr((..., [3, 2, 1, 3]))
abbr: "GraphicsNDArray" = qattr((..., [3, 2, 2, 0]))
abbg: "GraphicsNDArray" = qattr((..., [3, 2, 2, 1]))
abbb: "GraphicsNDArray" = qattr((..., [3, 2, 2, 2]))
abba: "GraphicsNDArray" = qattr((..., [3, 2, 2, 3]))
abar: "GraphicsNDArray" = qattr((..., [3, 2, 3, 0]))
abag: "GraphicsNDArray" = qattr((..., [3, 2, 3, 1]))
abab: "GraphicsNDArray" = qattr((..., [3, 2, 3, 2]))
abaa: "GraphicsNDArray" = qattr((..., [3, 2, 3, 3]))
aarr: "GraphicsNDArray" = qattr((..., [3, 3, 0, 0]))
aarg: "GraphicsNDArray" = qattr((..., [3, 3, 0, 1]))
aarb: "GraphicsNDArray" = qattr((..., [3, 3, 0, 2]))
aara: "GraphicsNDArray" = qattr((..., [3, 3, 0, 3]))
aagr: "GraphicsNDArray" = qattr((..., [3, 3, 1, 0]))
aagg: "GraphicsNDArray" = qattr((..., [3, 3, 1, 1]))
aagb: "GraphicsNDArray" = qattr((..., [3, 3, 1, 2]))
aaga: "GraphicsNDArray" = qattr((..., [3, 3, 1, 3]))
aabr: "GraphicsNDArray" = qattr((..., [3, 3, 2, 0]))
aabg: "GraphicsNDArray" = qattr((..., [3, 3, 2, 1]))
aabb: "GraphicsNDArray" = qattr((..., [3, 3, 2, 2]))
aaba: "GraphicsNDArray" = qattr((..., [3, 3, 2, 3]))
aaar: "GraphicsNDArray" = qattr((..., [3, 3, 3, 0]))
aaag: "GraphicsNDArray" = qattr((..., [3, 3, 3, 1]))
aaab: "GraphicsNDArray" = qattr((..., [3, 3, 3, 2]))
aaaa: "GraphicsNDArray" = qattr((..., [3, 3, 3, 3]))
NDArray = Union[numpy.ndarray, GraphicsNDArray]
T = TypeVar("T")
|
USE_QUICK_ATTRIBUTES = True
class GraphicsNDArray(numpy.ndarray):
"""
A subclass of ``numpy.ndarray``.
It adds GLSL-like access to components,
e.g. ``a.xyz / a.w`` is a shorthand for ``a[..., [0, 1, 2]] / a[..., [3]]``.
Note that single-component results retains a singleton ``1`` dimension.
"""
def __new__(cls, input_array):
return numpy.asarray(input_array).view(cls)
def __array_function__(self, func, types, args, kwargs):
result = super().__array_function__(func, types, args, kwargs)
if isinstance(result, numpy.ndarray):
# print("__array_function__ cast")
return GraphicsNDArray(result)
return result
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # this method is called whenever you use a ufunc
f = {
"reduce": ufunc.reduce,
"accumulate": ufunc.accumulate,
"reduceat": ufunc.reduceat,
"outer": ufunc.outer,
"at": ufunc.at,
"__call__": ufunc,
}
# print("__array_ufunc__ cast")
output = GraphicsNDArray(f[method](*(numpy.asarray(i) for i in inputs), **kwargs))
return output
def qattr(x: tuple):
return property(lambda self: self[x])
if USE_QUICK_ATTRIBUTES:
x: "GraphicsNDArray" = qattr((..., [0]))
y: "GraphicsNDArray" = qattr((..., [1]))
z: "GraphicsNDArray" = qattr((..., [2]))
w: "GraphicsNDArray" = qattr((..., [3]))
xx: "GraphicsNDArray" = qattr((..., [0, 0]))
xy: "GraphicsNDArray" = qattr((..., [0, 1]))
xz: "GraphicsNDArray" = qattr((..., [0, 2]))
xw: "GraphicsNDArray" = qattr((..., [0, 3]))
yx: "GraphicsNDArray" = qattr((..., [1, 0]))
yy: "GraphicsNDArray" = qattr((..., [1, 1]))
yz: "GraphicsNDArray" = qattr((..., [1, 2]))
yw: "GraphicsNDArray" = qattr((..., [1, 3]))
zx: "GraphicsNDArray" = qattr((..., [2, 0]))
zy: "GraphicsNDArray" = qattr((..., [2, 1]))
zz: "GraphicsNDArray" = qattr((..., [2, 2]))
zw: "GraphicsNDArray" = qattr((..., [2, 3]))
wx: "GraphicsNDArray" = qattr((..., [3, 0]))
wy: "GraphicsNDArray" = qattr((..., [3, 1]))
wz: "GraphicsNDArray" = qattr((..., [3, 2]))
ww: "GraphicsNDArray" = qattr((..., [3, 3]))
xxx: "GraphicsNDArray" = qattr((..., [0, 0, 0]))
xxy: "GraphicsNDArray" = qattr((..., [0, 0, 1]))
xxz: "GraphicsNDArray" = qattr((..., [0, 0, 2]))
xxw: "GraphicsNDArray" = qattr((..., [0, 0, 3]))
xyx: "GraphicsNDArray" = qattr((..., [0, 1, 0]))
xyy: "GraphicsNDArray" = qattr((..., [0, 1, 1]))
xyz: "GraphicsNDArray" = qattr((..., [0, 1, 2]))
xyw: "GraphicsNDArray" = qattr((..., [0, 1, 3]))
xzx: "GraphicsNDArray" = qattr((..., [0, 2, 0]))
xzy: "GraphicsNDArray" = qattr((..., [0, 2, 1]))
xzz: "GraphicsNDArray" = qattr((..., [0, 2, 2]))
xzw: "GraphicsNDArray" = qattr((..., [0, 2, 3]))
xwx: "GraphicsNDArray" = qattr((..., [0, 3, 0]))
xwy: "GraphicsNDArray" = qattr((..., [0, 3, 1]))
xwz: "GraphicsNDArray" = qattr((..., [0, 3, 2]))
xww: "GraphicsNDArray" = qattr((..., [0, 3, 3]))
yxx: "GraphicsNDArray" = qattr((..., [1, 0, 0]))
yxy: "GraphicsNDArray" = qattr((..., [1, 0, 1]))
yxz: "GraphicsNDArray" = qattr((..., [1, 0, 2]))
yxw: "GraphicsNDArray" = qattr((..., [1, 0, 3]))
yyx: "GraphicsNDArray" = qattr((..., [1, 1, 0]))
yyy: "GraphicsNDArray" = qattr((..., [1, 1, 1]))
yyz: "GraphicsNDArray" = qattr((..., [1, 1, 2]))
yyw: "GraphicsNDArray" = qattr((..., [1, 1, 3]))
yzx: "GraphicsNDArray" = qattr((..., [1, 2, 0]))
yzy: "GraphicsNDArray" = qattr((..., [1, 2, 1]))
yzz: "GraphicsNDArray" = qattr((..., [1, 2, 2]))
yzw: "GraphicsNDArray" = qattr((..., [1, 2, 3]))
ywx: "GraphicsNDArray" = qattr((..., [1, 3, 0]))
ywy: "GraphicsNDArray" = qattr((..., [1, 3, 1]))
ywz: "GraphicsNDArray" = qattr((..., [1, 3, 2]))
yww: "GraphicsNDArray" = qattr((..., [1, 3, 3]))
zxx: "GraphicsNDArray" = qattr((..., [2, 0, 0]))
zxy: "GraphicsNDArray" = qattr((..., [2, 0, 1]))
zxz: "GraphicsNDArray" = qattr((..., [2, 0, 2]))
zxw: "GraphicsNDArray" = qattr((..., [2, 0, 3]))
zyx: "GraphicsNDArray" = qattr((..., [2, 1, 0]))
zyy: "GraphicsNDArray" = qattr((..., [2, 1, 1]))
zyz: "GraphicsNDArray" = qattr((..., [2, 1, 2]))
zyw: "GraphicsNDArray" = qattr((..., [2, 1, 3]))
zzx: "GraphicsNDArray" = qattr((..., [2, 2, 0]))
zzy: "GraphicsNDArray" = qattr((..., [2, 2, 1]))
zzz: "GraphicsNDArray" = qattr((..., [2, 2, 2]))
zzw: "GraphicsNDArray" = qattr((..., [2, 2, 3]))
zwx: "GraphicsNDArray" = qattr((..., [2, 3, 0]))
zwy: "GraphicsNDArray" = qattr((..., [2, 3, 1]))
zwz: "GraphicsNDArray" = qattr((..., [2, 3, 2]))
zww: "GraphicsNDArray" = qattr((..., [2, 3, 3]))
wxx: "GraphicsNDArray" = qattr((..., [3, 0, 0]))
wxy: "GraphicsNDArray" = qattr((..., [3, 0, 1]))
wxz: "GraphicsNDArray" = qattr((..., [3, 0, 2]))
wxw: "GraphicsNDArray" = qattr((..., [3, 0, 3]))
wyx: "GraphicsNDArray" = qattr((..., [3, 1, 0]))
wyy: "GraphicsNDArray" = qattr((..., [3, 1, 1]))
wyz: "GraphicsNDArray" = qattr((..., [3, 1, 2]))
wyw: "GraphicsNDArray" = qattr((..., [3, 1, 3]))
wzx: "GraphicsNDArray" = qattr((..., [3, 2, 0]))
wzy: "GraphicsNDArray" = qattr((..., [3, 2, 1]))
wzz: "GraphicsNDArray" = qattr((..., [3, 2, 2]))
wzw: "GraphicsNDArray" = qattr((..., [3, 2, 3]))
wwx: "GraphicsNDArray" = qattr((..., [3, 3, 0]))
wwy: "GraphicsNDArray" = qattr((..., [3, 3, 1]))
wwz: "GraphicsNDArray" = qattr((..., [3, 3, 2]))
www: "GraphicsNDArray" = qattr((..., [3, 3, 3]))
xxxx: "GraphicsNDArray" = qattr((..., [0, 0, 0, 0]))
xxxy: "GraphicsNDArray" = qattr((..., [0, 0, 0, 1]))
xxxz: "GraphicsNDArray" = qattr((..., [0, 0, 0, 2]))
xxxw: "GraphicsNDArray" = qattr((..., [0, 0, 0, 3]))
xxyx: "GraphicsNDArray" = qattr((..., [0, 0, 1, 0]))
xxyy: "GraphicsNDArray" = qattr((..., [0, 0, 1, 1]))
xxyz: "GraphicsNDArray" = qattr((..., [0, 0, 1, 2]))
xxyw: "GraphicsNDArray" = qattr((..., [0, 0, 1, 3]))
xxzx: "GraphicsNDArray" = qattr((..., [0, 0, 2, 0]))
xxzy: "GraphicsNDArray" = qattr((..., [0, 0, 2, 1]))
xxzz: "GraphicsNDArray" = qattr((..., [0, 0, 2, 2]))
xxzw: "GraphicsNDArray" = qattr((..., [0, 0, 2, 3]))
xxwx: "GraphicsNDArray" = qattr((..., [0, 0, 3, 0]))
xxwy: "GraphicsNDArray" = qattr((..., [0, 0, 3, 1]))
xxwz: "GraphicsNDArray" = qattr((..., [0, 0, 3, 2]))
xxww: "GraphicsNDArray" = qattr((..., [0, 0, 3, 3]))
xyxx: "GraphicsNDArray" = qattr((..., [0, 1, 0, 0]))
xyxy: "GraphicsNDArray" = qattr((..., [0, 1, 0, 1]))
xyxz: "GraphicsNDArray" = qattr((..., [0, 1, 0, 2]))
xyxw: "GraphicsNDArray" = qattr((..., [0, 1, 0, 3]))
xyyx: "GraphicsNDArray" = qattr((..., [0, 1, 1, 0]))
xyyy: "GraphicsNDArray" = qattr((..., [0, 1, 1, 1]))
xyyz: "GraphicsNDArray" = qattr((..., [0, 1, 1, 2]))
xyyw: "GraphicsNDArray" = qattr((..., [0, 1, 1, 3]))
xyzx: "GraphicsNDArray" = qattr((..., [0, 1, 2, 0]))
xyzy: "GraphicsNDArray" = qattr((..., [0, 1, 2, 1]))
xyzz: "GraphicsNDArray" = qattr((..., [0, 1, 2, 2]))
xyzw: "GraphicsNDArray" = qattr((..., [0, 1, 2, 3]))
xywx: "GraphicsNDArray" = qattr((..., [0, 1, 3, 0]))
xywy: "GraphicsNDArray" = qattr((..., [0, 1, 3, 1]))
xywz: "GraphicsNDArray" = qattr((..., [0, 1, 3, 2]))
xyww: "GraphicsNDArray" = qattr((..., [0, 1, 3, 3]))
xzxx: "GraphicsNDArray" = qattr((..., [0, 2, 0, 0]))
xzxy: "GraphicsNDArray" = qattr((..., [0, 2, 0, 1]))
xzxz: "GraphicsNDArray" = qattr((..., [0, 2, 0, 2]))
xzxw: "GraphicsNDArray" = qattr((..., [0, 2, 0, 3]))
xzyx: "GraphicsNDArray" = qattr((..., [0, 2, 1, 0]))
xzyy: "GraphicsNDArray" = qattr((..., [0, 2, 1, 1]))
xzyz: "GraphicsNDArray" = qattr((..., [0, 2, 1, 2]))
xzyw: "GraphicsNDArray" = qattr((..., [0, 2, 1, 3]))
xzzx: "GraphicsNDArray" = qattr((..., [0, 2, 2, 0]))
xzzy: "GraphicsNDArray" = qattr((..., [0, 2, 2, 1]))
xzzz: "GraphicsNDArray" = qattr((..., [0, 2, 2, 2]))
xzzw: "GraphicsNDArray" = qattr((..., [0, 2, 2, 3]))
xzwx: "GraphicsNDArray" = qattr((..., [0, 2, 3, 0]))
xzwy: "GraphicsNDArray" = qattr((..., [0, 2, 3, 1]))
xzwz: "GraphicsNDArray" = qattr((..., [0, 2, 3, 2]))
xzww: "GraphicsNDArray" = qattr((..., [0, 2, 3, 3]))
xwxx: "GraphicsNDArray" = qattr((..., [0, 3, 0, 0]))
xwxy: "GraphicsNDArray" = qattr((..., [0, 3, 0, 1]))
xwxz: "GraphicsNDArray" = qattr((..., [0, 3, 0, 2]))
xwxw: "GraphicsNDArray" = qattr((..., [0, 3, 0, 3]))
xwyx: "GraphicsNDArray" = qattr((..., [0, 3, 1, 0]))
xwyy: "GraphicsNDArray" = qattr((..., [0, 3, 1, 1]))
xwyz: "GraphicsNDArray" = qattr((..., [0, 3, 1, 2]))
xwyw: "GraphicsNDArray" = qattr((..., [0, 3, 1, 3]))
xwzx: "GraphicsNDArray" = qattr((..., [0, 3, 2, 0]))
xwzy: "GraphicsNDArray" = qattr((..., [0, 3, 2, 1]))
xwzz: "GraphicsNDArray" = qattr((..., [0, 3, 2, 2]))
xwzw: "GraphicsNDArray" = qattr((..., [0, 3, 2, 3]))
xwwx: "GraphicsNDArray" = qattr((..., [0, 3, 3, 0]))
xwwy: "GraphicsNDArray" = qattr((..., [0, 3, 3, 1]))
xwwz: "GraphicsNDArray" = qattr((..., [0, 3, 3, 2]))
xwww: "GraphicsNDArray" = qattr((..., [0, 3, 3, 3]))
yxxx: "GraphicsNDArray" = qattr((..., [1, 0, 0, 0]))
yxxy: "GraphicsNDArray" = qattr((..., [1, 0, 0, 1]))
yxxz: "GraphicsNDArray" = qattr((..., [1, 0, 0, 2]))
yxxw: "GraphicsNDArray" = qattr((..., [1, 0, 0, 3]))
yxyx: "GraphicsNDArray" = qattr((..., [1, 0, 1, 0]))
yxyy: "GraphicsNDArray" = qattr((..., [1, 0, 1, 1]))
yxyz: "GraphicsNDArray" = qattr((..., [1, 0, 1, 2]))
yxyw: "GraphicsNDArray" = qattr((..., [1, 0, 1, 3]))
yxzx: "GraphicsNDArray" = qattr((..., [1, 0, 2, 0]))
yxzy: "GraphicsNDArray" = qattr((..., [1, 0, 2, 1]))
yxzz: "GraphicsNDArray" = qattr((..., [1, 0, 2, 2]))
yxzw: "GraphicsNDArray" = qattr((..., [1, 0, 2, 3]))
yxwx: "GraphicsNDArray" = qattr((..., [1, 0, 3, 0]))
yxwy: "GraphicsNDArray" = qattr((..., [1, 0, 3, 1]))
yxwz: "GraphicsNDArray" = qattr((..., [1, 0, 3, 2]))
yxww: "GraphicsNDArray" = qattr((..., [1, 0, 3, 3]))
yyxx: "GraphicsNDArray" = qattr((..., [1, 1, 0, 0]))
yyxy: "GraphicsNDArray" = qattr((..., [1, 1, 0, 1]))
yyxz: "GraphicsNDArray" = qattr((..., [1, 1, 0, 2]))
yyxw: "GraphicsNDArray" = qattr((..., [1, 1, 0, 3]))
yyyx: "GraphicsNDArray" = qattr((..., [1, 1, 1, 0]))
yyyy: "GraphicsNDArray" = qattr((..., [1, 1, 1, 1]))
yyyz: "GraphicsNDArray" = qattr((..., [1, 1, 1, 2]))
yyyw: "GraphicsNDArray" = qattr((..., [1, 1, 1, 3]))
yyzx: "GraphicsNDArray" = qattr((..., [1, 1, 2, 0]))
yyzy: "GraphicsNDArray" = qattr((..., [1, 1, 2, 1]))
yyzz: "GraphicsNDArray" = qattr((..., [1, 1, 2, 2]))
yyzw: "GraphicsNDArray" = qattr((..., [1, 1, 2, 3]))
yywx: "GraphicsNDArray" = qattr((..., [1, 1, 3, 0]))
yywy: "GraphicsNDArray" = qattr((..., [1, 1, 3, 1]))
yywz: "GraphicsNDArray" = qattr((..., [1, 1, 3, 2]))
yyww: "GraphicsNDArray" = qattr((..., [1, 1, 3, 3]))
yzxx: "GraphicsNDArray" = qattr((..., [1, 2, 0, 0]))
yzxy: "GraphicsNDArray" = qattr((..., [1, 2, 0, 1]))
yzxz: "GraphicsNDArray" = qattr((..., [1, 2, 0, 2]))
yzxw: "GraphicsNDArray" = qattr((..., [1, 2, 0, 3]))
yzyx: "GraphicsNDArray" = qattr((..., [1, 2, 1, 0]))
yzyy: "GraphicsNDArray" = qattr((..., [1, 2, 1, 1]))
yzyz: "GraphicsNDArray" = qattr((..., [1, 2, 1, 2]))
yzyw: "GraphicsNDArray" = qattr((..., [1, 2, 1, 3]))
yzzx: "GraphicsNDArray" = qattr((..., [1, 2, 2, 0]))
yzzy: "GraphicsNDArray" = qattr((..., [1, 2, 2, 1]))
yzzz: "GraphicsNDArray" = qattr((..., [1, 2, 2, 2]))
yzzw: "GraphicsNDArray" = qattr((..., [1, 2, 2, 3]))
yzwx: "GraphicsNDArray" = qattr((..., [1, 2, 3, 0]))
yzwy: "GraphicsNDArray" = qattr((..., [1, 2, 3, 1]))
yzwz: "GraphicsNDArray" = qattr((..., [1, 2, 3, 2]))
yzww: "GraphicsNDArray" = qattr((..., [1, 2, 3, 3]))
ywxx: "GraphicsNDArray" = qattr((..., [1, 3, 0, 0]))
ywxy: "GraphicsNDArray" = qattr((..., [1, 3, 0, 1]))
ywxz: "GraphicsNDArray" = qattr((..., [1, 3, 0, 2]))
ywxw: "GraphicsNDArray" = qattr((..., [1, 3, 0, 3]))
ywyx: "GraphicsNDArray" = qattr((..., [1, 3, 1, 0]))
ywyy: "GraphicsNDArray" = qattr((..., [1, 3, 1, 1]))
ywyz: "GraphicsNDArray" = qattr((..., [1, 3, 1, 2]))
ywyw: "GraphicsNDArray" = qattr((..., [1, 3, 1, 3]))
ywzx: "GraphicsNDArray" = qattr((..., [1, 3, 2, 0]))
ywzy: "GraphicsNDArray" = qattr((..., [1, 3, 2, 1]))
ywzz: "GraphicsNDArray" = qattr((..., [1, 3, 2, 2]))
ywzw: "GraphicsNDArray" = qattr((..., [1, 3, 2, 3]))
ywwx: "GraphicsNDArray" = qattr((..., [1, 3, 3, 0]))
ywwy: "GraphicsNDArray" = qattr((..., [1, 3, 3, 1]))
ywwz: "GraphicsNDArray" = qattr((..., [1, 3, 3, 2]))
ywww: "GraphicsNDArray" = qattr((..., [1, 3, 3, 3]))
zxxx: "GraphicsNDArray" = qattr((..., [2, 0, 0, 0]))
zxxy: "GraphicsNDArray" = qattr((..., [2, 0, 0, 1]))
zxxz: "GraphicsNDArray" = qattr((..., [2, 0, 0, 2]))
zxxw: "GraphicsNDArray" = qattr((..., [2, 0, 0, 3]))
zxyx: "GraphicsNDArray" = qattr((..., [2, 0, 1, 0]))
zxyy: "GraphicsNDArray" = qattr((..., [2, 0, 1, 1]))
zxyz: "GraphicsNDArray" = qattr((..., [2, 0, 1, 2]))
zxyw: "GraphicsNDArray" = qattr((..., [2, 0, 1, 3]))
zxzx: "GraphicsNDArray" = qattr((..., [2, 0, 2, 0]))
zxzy: "GraphicsNDArray" = qattr((..., [2, 0, 2, 1]))
zxzz: "GraphicsNDArray" = qattr((..., [2, 0, 2, 2]))
zxzw: "GraphicsNDArray" = qattr((..., [2, 0, 2, 3]))
zxwx: "GraphicsNDArray" = qattr((..., [2, 0, 3, 0]))
zxwy: "GraphicsNDArray" = qattr((..., [2, 0, 3, 1]))
zxwz: "GraphicsNDArray" = qattr((..., [2, 0, 3, 2]))
zxww: "GraphicsNDArray" = qattr((..., [2, 0, 3, 3]))
zyxx: "GraphicsNDArray" = qattr((..., [2, 1, 0, 0]))
zyxy: "GraphicsNDArray" = qattr((..., [2, 1, 0, 1]))
zyxz: "GraphicsNDArray" = qattr((..., [2, 1, 0, 2]))
zyxw: "GraphicsNDArray" = qattr((..., [2, 1, 0, 3]))
zyyx: "GraphicsNDArray" = qattr((..., [2, 1, 1, 0]))
zyyy: "GraphicsNDArray" = qattr((..., [2, 1, 1, 1]))
zyyz: "GraphicsNDArray" = qattr((..., [2, 1, 1, 2]))
zyyw: "GraphicsNDArray" = qattr((..., [2, 1, 1, 3]))
zyzx: "GraphicsNDArray" = qattr((..., [2, 1, 2, 0]))
zyzy: "GraphicsNDArray" = qattr((..., [2, 1, 2, 1]))
zyzz: "GraphicsNDArray" = qattr((..., [2, 1, 2, 2]))
zyzw: "GraphicsNDArray" = qattr((..., [2, 1, 2, 3]))
zywx: "GraphicsNDArray" = qattr((..., [2, 1, 3, 0]))
zywy: "GraphicsNDArray" = qattr((..., [2, 1, 3, 1]))
zywz: "GraphicsNDArray" = qattr((..., [2, 1, 3, 2]))
zyww: "GraphicsNDArray" = qattr((..., [2, 1, 3, 3]))
zzxx: "GraphicsNDArray" = qattr((..., [2, 2, 0, 0]))
zzxy: "GraphicsNDArray" = qattr((..., [2, 2, 0, 1]))
zzxz: "GraphicsNDArray" = qattr((..., [2, 2, 0, 2]))
zzxw: "GraphicsNDArray" = qattr((..., [2, 2, 0, 3]))
zzyx: "GraphicsNDArray" = qattr((..., [2, 2, 1, 0]))
zzyy: "GraphicsNDArray" = qattr((..., [2, 2, 1, 1]))
zzyz: "GraphicsNDArray" = qattr((..., [2, 2, 1, 2]))
zzyw: "GraphicsNDArray" = qattr((..., [2, 2, 1, 3]))
zzzx: "GraphicsNDArray" = qattr((..., [2, 2, 2, 0]))
zzzy: "GraphicsNDArray" = qattr((..., [2, 2, 2, 1]))
zzzz: "GraphicsNDArray" = qattr((..., [2, 2, 2, 2]))
zzzw: "GraphicsNDArray" = qattr((..., [2, 2, 2, 3]))
zzwx: "GraphicsNDArray" = qattr((..., [2, 2, 3, 0]))
zzwy: "GraphicsNDArray" = qattr((..., [2, 2, 3, 1]))
zzwz: "GraphicsNDArray" = qattr((..., [2, 2, 3, 2]))
zzww: "GraphicsNDArray" = qattr((..., [2, 2, 3, 3]))
zwxx: "GraphicsNDArray" = qattr((..., [2, 3, 0, 0]))
zwxy: "GraphicsNDArray" = qattr((..., [2, 3, 0, 1]))
zwxz: "GraphicsNDArray" = qattr((..., [2, 3, 0, 2]))
zwxw: "GraphicsNDArray" = qattr((..., [2, 3, 0, 3]))
zwyx: "GraphicsNDArray" = qattr((..., [2, 3, 1, 0]))
zwyy: "GraphicsNDArray" = qattr((..., [2, 3, 1, 1]))
zwyz: "GraphicsNDArray" = qattr((..., [2, 3, 1, 2]))
zwyw: "GraphicsNDArray" = qattr((..., [2, 3, 1, 3]))
zwzx: "GraphicsNDArray" = qattr((..., [2, 3, 2, 0]))
zwzy: "GraphicsNDArray" = qattr((..., [2, 3, 2, 1]))
zwzz: "GraphicsNDArray" = qattr((..., [2, 3, 2, 2]))
zwzw: "GraphicsNDArray" = qattr((..., [2, 3, 2, 3]))
zwwx: "GraphicsNDArray" = qattr((..., [2, 3, 3, 0]))
zwwy: "GraphicsNDArray" = qattr((..., [2, 3, 3, 1]))
zwwz: "GraphicsNDArray" = qattr((..., [2, 3, 3, 2]))
zwww: "GraphicsNDArray" = qattr((..., [2, 3, 3, 3]))
wxxx: "GraphicsNDArray" = qattr((..., [3, 0, 0, 0]))
wxxy: "GraphicsNDArray" = qattr((..., [3, 0, 0, 1]))
wxxz: "GraphicsNDArray" = qattr((..., [3, 0, 0, 2]))
wxxw: "GraphicsNDArray" = qattr((..., [3, 0, 0, 3]))
wxyx: "GraphicsNDArray" = qattr((..., [3, 0, 1, 0]))
wxyy: "GraphicsNDArray" = qattr((..., [3, 0, 1, 1]))
wxyz: "GraphicsNDArray" = qattr((..., [3, 0, 1, 2]))
wxyw: "GraphicsNDArray" = qattr((..., [3, 0, 1, 3]))
wxzx: "GraphicsNDArray" = qattr((..., [3, 0, 2, 0]))
wxzy: "GraphicsNDArray" = qattr((..., [3, 0, 2, 1]))
wxzz: "GraphicsNDArray" = qattr((..., [3, 0, 2, 2]))
wxzw: "GraphicsNDArray" = qattr((..., [3, 0, 2, 3]))
wxwx: "GraphicsNDArray" = qattr((..., [3, 0, 3, 0]))
wxwy: "GraphicsNDArray" = qattr((..., [3, 0, 3, 1]))
wxwz: "GraphicsNDArray" = qattr((..., [3, 0, 3, 2]))
wxww: "GraphicsNDArray" = qattr((..., [3, 0, 3, 3]))
wyxx: "GraphicsNDArray" = qattr((..., [3, 1, 0, 0]))
wyxy: "GraphicsNDArray" = qattr((..., [3, 1, 0, 1]))
wyxz: "GraphicsNDArray" = qattr((..., [3, 1, 0, 2]))
wyxw: "GraphicsNDArray" = qattr((..., [3, 1, 0, 3]))
wyyx: "GraphicsNDArray" = qattr((..., [3, 1, 1, 0]))
wyyy: "GraphicsNDArray" = qattr((..., [3, 1, 1, 1]))
wyyz: "GraphicsNDArray" = qattr((..., [3, 1, 1, 2]))
wyyw: "GraphicsNDArray" = qattr((..., [3, 1, 1, 3]))
wyzx: "GraphicsNDArray" = qattr((..., [3, 1, 2, 0]))
wyzy: "GraphicsNDArray" = qattr((..., [3, 1, 2, 1]))
wyzz: "GraphicsNDArray" = qattr((..., [3, 1, 2, 2]))
wyzw: "GraphicsNDArray" = qattr((..., [3, 1, 2, 3]))
wywx: "GraphicsNDArray" = qattr((..., [3, 1, 3, 0]))
wywy: "GraphicsNDArray" = qattr((..., [3, 1, 3, 1]))
wywz: "GraphicsNDArray" = qattr((..., [3, 1, 3, 2]))
wyww: "GraphicsNDArray" = qattr((..., [3, 1, 3, 3]))
wzxx: "GraphicsNDArray" = qattr((..., [3, 2, 0, 0]))
wzxy: "GraphicsNDArray" = qattr((..., [3, 2, 0, 1]))
wzxz: "GraphicsNDArray" = qattr((..., [3, 2, 0, 2]))
wzxw: "GraphicsNDArray" = qattr((..., [3, 2, 0, 3]))
wzyx: "GraphicsNDArray" = qattr((..., [3, 2, 1, 0]))
wzyy: "GraphicsNDArray" = qattr((..., [3, 2, 1, 1]))
wzyz: "GraphicsNDArray" = qattr((..., [3, 2, 1, 2]))
wzyw: "GraphicsNDArray" = qattr((..., [3, 2, 1, 3]))
wzzx: "GraphicsNDArray" = qattr((..., [3, 2, 2, 0]))
wzzy: "GraphicsNDArray" = qattr((..., [3, 2, 2, 1]))
wzzz: "GraphicsNDArray" = qattr((..., [3, 2, 2, 2]))
wzzw: "GraphicsNDArray" = qattr((..., [3, 2, 2, 3]))
wzwx: "GraphicsNDArray" = qattr((..., [3, 2, 3, 0]))
wzwy: "GraphicsNDArray" = qattr((..., [3, 2, 3, 1]))
wzwz: "GraphicsNDArray" = qattr((..., [3, 2, 3, 2]))
wzww: "GraphicsNDArray" = qattr((..., [3, 2, 3, 3]))
wwxx: "GraphicsNDArray" = qattr((..., [3, 3, 0, 0]))
wwxy: "GraphicsNDArray" = qattr((..., [3, 3, 0, 1]))
wwxz: "GraphicsNDArray" = qattr((..., [3, 3, 0, 2]))
wwxw: "GraphicsNDArray" = qattr((..., [3, 3, 0, 3]))
wwyx: "GraphicsNDArray" = qattr((..., [3, 3, 1, 0]))
wwyy: "GraphicsNDArray" = qattr((..., [3, 3, 1, 1]))
wwyz: "GraphicsNDArray" = qattr((..., [3, 3, 1, 2]))
wwyw: "GraphicsNDArray" = qattr((..., [3, 3, 1, 3]))
wwzx: "GraphicsNDArray" = qattr((..., [3, 3, 2, 0]))
wwzy: "GraphicsNDArray" = qattr((..., [3, 3, 2, 1]))
wwzz: "GraphicsNDArray" = qattr((..., [3, 3, 2, 2]))
wwzw: "GraphicsNDArray" = qattr((..., [3, 3, 2, 3]))
wwwx: "GraphicsNDArray" = qattr((..., [3, 3, 3, 0]))
wwwy: "GraphicsNDArray" = qattr((..., [3, 3, 3, 1]))
wwwz: "GraphicsNDArray" = qattr((..., [3, 3, 3, 2]))
wwww: "GraphicsNDArray" = qattr((..., [3, 3, 3, 3]))
r: "GraphicsNDArray" = qattr((..., [0]))
g: "GraphicsNDArray" = qattr((..., [1]))
b: "GraphicsNDArray" = qattr((..., [2]))
a: "GraphicsNDArray" = qattr((..., [3]))
rr: "GraphicsNDArray" = qattr((..., [0, 0]))
rg: "GraphicsNDArray" = qattr((..., [0, 1]))
rb: "GraphicsNDArray" = qattr((..., [0, 2]))
ra: "GraphicsNDArray" = qattr((..., [0, 3]))
gr: "GraphicsNDArray" = qattr((..., [1, 0]))
gg: "GraphicsNDArray" = qattr((..., [1, 1]))
gb: "GraphicsNDArray" = qattr((..., [1, 2]))
ga: "GraphicsNDArray" = qattr((..., [1, 3]))
br: "GraphicsNDArray" = qattr((..., [2, 0]))
bg: "GraphicsNDArray" = qattr((..., [2, 1]))
bb: "GraphicsNDArray" = qattr((..., [2, 2]))
ba: "GraphicsNDArray" = qattr((..., [2, 3]))
ar: "GraphicsNDArray" = qattr((..., [3, 0]))
ag: "GraphicsNDArray" = qattr((..., [3, 1]))
ab: "GraphicsNDArray" = qattr((..., [3, 2]))
aa: "GraphicsNDArray" = qattr((..., [3, 3]))
rrr: "GraphicsNDArray" = qattr((..., [0, 0, 0]))
rrg: "GraphicsNDArray" = qattr((..., [0, 0, 1]))
rrb: "GraphicsNDArray" = qattr((..., [0, 0, 2]))
rra: "GraphicsNDArray" = qattr((..., [0, 0, 3]))
rgr: "GraphicsNDArray" = qattr((..., [0, 1, 0]))
rgg: "GraphicsNDArray" = qattr((..., [0, 1, 1]))
rgb: "GraphicsNDArray" = qattr((..., [0, 1, 2]))
rga: "GraphicsNDArray" = qattr((..., [0, 1, 3]))
rbr: "GraphicsNDArray" = qattr((..., [0, 2, 0]))
rbg: "GraphicsNDArray" = qattr((..., [0, 2, 1]))
rbb: "GraphicsNDArray" = qattr((..., [0, 2, 2]))
rba: "GraphicsNDArray" = qattr((..., [0, 2, 3]))
rar: "GraphicsNDArray" = qattr((..., [0, 3, 0]))
rag: "GraphicsNDArray" = qattr((..., [0, 3, 1]))
rab: "GraphicsNDArray" = qattr((..., [0, 3, 2]))
raa: "GraphicsNDArray" = qattr((..., [0, 3, 3]))
grr: "GraphicsNDArray" = qattr((..., [1, 0, 0]))
grg: "GraphicsNDArray" = qattr((..., [1, 0, 1]))
grb: "GraphicsNDArray" = qattr((..., [1, 0, 2]))
gra: "GraphicsNDArray" = qattr((..., [1, 0, 3]))
ggr: "GraphicsNDArray" = qattr((..., [1, 1, 0]))
ggg: "GraphicsNDArray" = qattr((..., [1, 1, 1]))
ggb: "GraphicsNDArray" = qattr((..., [1, 1, 2]))
gga: "GraphicsNDArray" = qattr((..., [1, 1, 3]))
gbr: "GraphicsNDArray" = qattr((..., [1, 2, 0]))
gbg: "GraphicsNDArray" = qattr((..., [1, 2, 1]))
gbb: "GraphicsNDArray" = qattr((..., [1, 2, 2]))
gba: "GraphicsNDArray" = qattr((..., [1, 2, 3]))
gar: "GraphicsNDArray" = qattr((..., [1, 3, 0]))
gag: "GraphicsNDArray" = qattr((..., [1, 3, 1]))
gab: "GraphicsNDArray" = qattr((..., [1, 3, 2]))
gaa: "GraphicsNDArray" = qattr((..., [1, 3, 3]))
brr: "GraphicsNDArray" = qattr((..., [2, 0, 0]))
brg: "GraphicsNDArray" = qattr((..., [2, 0, 1]))
brb: "GraphicsNDArray" = qattr((..., [2, 0, 2]))
bra: "GraphicsNDArray" = qattr((..., [2, 0, 3]))
bgr: "GraphicsNDArray" = qattr((..., [2, 1, 0]))
bgg: "GraphicsNDArray" = qattr((..., [2, 1, 1]))
bgb: "GraphicsNDArray" = qattr((..., [2, 1, 2]))
bga: "GraphicsNDArray" = qattr((..., [2, 1, 3]))
bbr: "GraphicsNDArray" = qattr((..., [2, 2, 0]))
bbg: "GraphicsNDArray" = qattr((..., [2, 2, 1]))
bbb: "GraphicsNDArray" = qattr((..., [2, 2, 2]))
bba: "GraphicsNDArray" = qattr((..., [2, 2, 3]))
bar: "GraphicsNDArray" = qattr((..., [2, 3, 0]))
bag: "GraphicsNDArray" = qattr((..., [2, 3, 1]))
bab: "GraphicsNDArray" = qattr((..., [2, 3, 2]))
baa: "GraphicsNDArray" = qattr((..., [2, 3, 3]))
arr: "GraphicsNDArray" = qattr((..., [3, 0, 0]))
arg: "GraphicsNDArray" = qattr((..., [3, 0, 1]))
arb: "GraphicsNDArray" = qattr((..., [3, 0, 2]))
ara: "GraphicsNDArray" = qattr((..., [3, 0, 3]))
agr: "GraphicsNDArray" = qattr((..., [3, 1, 0]))
agg: "GraphicsNDArray" = qattr((..., [3, 1, 1]))
agb: "GraphicsNDArray" = qattr((..., [3, 1, 2]))
aga: "GraphicsNDArray" = qattr((..., [3, 1, 3]))
abr: "GraphicsNDArray" = qattr((..., [3, 2, 0]))
abg: "GraphicsNDArray" = qattr((..., [3, 2, 1]))
abb: "GraphicsNDArray" = qattr((..., [3, 2, 2]))
aba: "GraphicsNDArray" = qattr((..., [3, 2, 3]))
aar: "GraphicsNDArray" = qattr((..., [3, 3, 0]))
aag: "GraphicsNDArray" = qattr((..., [3, 3, 1]))
aab: "GraphicsNDArray" = qattr((..., [3, 3, 2]))
aaa: "GraphicsNDArray" = qattr((..., [3, 3, 3]))
rrrr: "GraphicsNDArray" = qattr((..., [0, 0, 0, 0]))
rrrg: "GraphicsNDArray" = qattr((..., [0, 0, 0, 1]))
rrrb: "GraphicsNDArray" = qattr((..., [0, 0, 0, 2]))
rrra: "GraphicsNDArray" = qattr((..., [0, 0, 0, 3]))
rrgr: "GraphicsNDArray" = qattr((..., [0, 0, 1, 0]))
rrgg: "GraphicsNDArray" = qattr((..., [0, 0, 1, 1]))
rrgb: "GraphicsNDArray" = qattr((..., [0, 0, 1, 2]))
rrga: "GraphicsNDArray" = qattr((..., [0, 0, 1, 3]))
rrbr: "GraphicsNDArray" = qattr((..., [0, 0, 2, 0]))
rrbg: "GraphicsNDArray" = qattr((..., [0, 0, 2, 1]))
rrbb: "GraphicsNDArray" = qattr((..., [0, 0, 2, 2]))
rrba: "GraphicsNDArray" = qattr((..., [0, 0, 2, 3]))
rrar: "GraphicsNDArray" = qattr((..., [0, 0, 3, 0]))
rrag: "GraphicsNDArray" = qattr((..., [0, 0, 3, 1]))
rrab: "GraphicsNDArray" = qattr((..., [0, 0, 3, 2]))
rraa: "GraphicsNDArray" = qattr((..., [0, 0, 3, 3]))
rgrr: "GraphicsNDArray" = qattr((..., [0, 1, 0, 0]))
rgrg: "GraphicsNDArray" = qattr((..., [0, 1, 0, 1]))
rgrb: "GraphicsNDArray" = qattr((..., [0, 1, 0, 2]))
rgra: "GraphicsNDArray" = qattr((..., [0, 1, 0, 3]))
rggr: "GraphicsNDArray" = qattr((..., [0, 1, 1, 0]))
rggg: "GraphicsNDArray" = qattr((..., [0, 1, 1, 1]))
rggb: "GraphicsNDArray" = qattr((..., [0, 1, 1, 2]))
rgga: "GraphicsNDArray" = qattr((..., [0, 1, 1, 3]))
rgbr: "GraphicsNDArray" = qattr((..., [0, 1, 2, 0]))
rgbg: "GraphicsNDArray" = qattr((..., [0, 1, 2, 1]))
rgbb: "GraphicsNDArray" = qattr((..., [0, 1, 2, 2]))
rgba: "GraphicsNDArray" = qattr((..., [0, 1, 2, 3]))
rgar: "GraphicsNDArray" = qattr((..., [0, 1, 3, 0]))
rgag: "GraphicsNDArray" = qattr((..., [0, 1, 3, 1]))
rgab: "GraphicsNDArray" = qattr((..., [0, 1, 3, 2]))
rgaa: "GraphicsNDArray" = qattr((..., [0, 1, 3, 3]))
rbrr: "GraphicsNDArray" = qattr((..., [0, 2, 0, 0]))
rbrg: "GraphicsNDArray" = qattr((..., [0, 2, 0, 1]))
rbrb: "GraphicsNDArray" = qattr((..., [0, 2, 0, 2]))
rbra: "GraphicsNDArray" = qattr((..., [0, 2, 0, 3]))
rbgr: "GraphicsNDArray" = qattr((..., [0, 2, 1, 0]))
rbgg: "GraphicsNDArray" = qattr((..., [0, 2, 1, 1]))
rbgb: "GraphicsNDArray" = qattr((..., [0, 2, 1, 2]))
rbga: "GraphicsNDArray" = qattr((..., [0, 2, 1, 3]))
rbbr: "GraphicsNDArray" = qattr((..., [0, 2, 2, 0]))
rbbg: "GraphicsNDArray" = qattr((..., [0, 2, 2, 1]))
rbbb: "GraphicsNDArray" = qattr((..., [0, 2, 2, 2]))
rbba: "GraphicsNDArray" = qattr((..., [0, 2, 2, 3]))
rbar: "GraphicsNDArray" = qattr((..., [0, 2, 3, 0]))
rbag: "GraphicsNDArray" = qattr((..., [0, 2, 3, 1]))
rbab: "GraphicsNDArray" = qattr((..., [0, 2, 3, 2]))
rbaa: "GraphicsNDArray" = qattr((..., [0, 2, 3, 3]))
rarr: "GraphicsNDArray" = qattr((..., [0, 3, 0, 0]))
rarg: "GraphicsNDArray" = qattr((..., [0, 3, 0, 1]))
rarb: "GraphicsNDArray" = qattr((..., [0, 3, 0, 2]))
rara: "GraphicsNDArray" = qattr((..., [0, 3, 0, 3]))
ragr: "GraphicsNDArray" = qattr((..., [0, 3, 1, 0]))
ragg: "GraphicsNDArray" = qattr((..., [0, 3, 1, 1]))
ragb: "GraphicsNDArray" = qattr((..., [0, 3, 1, 2]))
raga: "GraphicsNDArray" = qattr((..., [0, 3, 1, 3]))
rabr: "GraphicsNDArray" = qattr((..., [0, 3, 2, 0]))
rabg: "GraphicsNDArray" = qattr((..., [0, 3, 2, 1]))
rabb: "GraphicsNDArray" = qattr((..., [0, 3, 2, 2]))
raba: "GraphicsNDArray" = qattr((..., [0, 3, 2, 3]))
raar: "GraphicsNDArray" = qattr((..., [0, 3, 3, 0]))
raag: "GraphicsNDArray" = qattr((..., [0, 3, 3, 1]))
raab: "GraphicsNDArray" = qattr((..., [0, 3, 3, 2]))
raaa: "GraphicsNDArray" = qattr((..., [0, 3, 3, 3]))
grrr: "GraphicsNDArray" = qattr((..., [1, 0, 0, 0]))
grrg: "GraphicsNDArray" = qattr((..., [1, 0, 0, 1]))
grrb: "GraphicsNDArray" = qattr((..., [1, 0, 0, 2]))
grra: "GraphicsNDArray" = qattr((..., [1, 0, 0, 3]))
grgr: "GraphicsNDArray" = qattr((..., [1, 0, 1, 0]))
grgg: "GraphicsNDArray" = qattr((..., [1, 0, 1, 1]))
grgb: "GraphicsNDArray" = qattr((..., [1, 0, 1, 2]))
grga: "GraphicsNDArray" = qattr((..., [1, 0, 1, 3]))
grbr: "GraphicsNDArray" = qattr((..., [1, 0, 2, 0]))
grbg: "GraphicsNDArray" = qattr((..., [1, 0, 2, 1]))
grbb: "GraphicsNDArray" = qattr((..., [1, 0, 2, 2]))
grba: "GraphicsNDArray" = qattr((..., [1, 0, 2, 3]))
grar: "GraphicsNDArray" = qattr((..., [1, 0, 3, 0]))
grag: "GraphicsNDArray" = qattr((..., [1, 0, 3, 1]))
grab: "GraphicsNDArray" = qattr((..., [1, 0, 3, 2]))
graa: "GraphicsNDArray" = qattr((..., [1, 0, 3, 3]))
ggrr: "GraphicsNDArray" = qattr((..., [1, 1, 0, 0]))
ggrg: "GraphicsNDArray" = qattr((..., [1, 1, 0, 1]))
ggrb: "GraphicsNDArray" = qattr((..., [1, 1, 0, 2]))
ggra: "GraphicsNDArray" = qattr((..., [1, 1, 0, 3]))
gggr: "GraphicsNDArray" = qattr((..., [1, 1, 1, 0]))
gggg: "GraphicsNDArray" = qattr((..., [1, 1, 1, 1]))
gggb: "GraphicsNDArray" = qattr((..., [1, 1, 1, 2]))
ggga: "GraphicsNDArray" = qattr((..., [1, 1, 1, 3]))
ggbr: "GraphicsNDArray" = qattr((..., [1, 1, 2, 0]))
ggbg: "GraphicsNDArray" = qattr((..., [1, 1, 2, 1]))
ggbb: "GraphicsNDArray" = qattr((..., [1, 1, 2, 2]))
ggba: "GraphicsNDArray" = qattr((..., [1, 1, 2, 3]))
ggar: "GraphicsNDArray" = qattr((..., [1, 1, 3, 0]))
ggag: "GraphicsNDArray" = qattr((..., [1, 1, 3, 1]))
ggab: "GraphicsNDArray" = qattr((..., [1, 1, 3, 2]))
ggaa: "GraphicsNDArray" = qattr((..., [1, 1, 3, 3]))
gbrr: "GraphicsNDArray" = qattr((..., [1, 2, 0, 0]))
gbrg: "GraphicsNDArray" = qattr((..., [1, 2, 0, 1]))
gbrb: "GraphicsNDArray" = qattr((..., [1, 2, 0, 2]))
gbra: "GraphicsNDArray" = qattr((..., [1, 2, 0, 3]))
gbgr: "GraphicsNDArray" = qattr((..., [1, 2, 1, 0]))
gbgg: "GraphicsNDArray" = qattr((..., [1, 2, 1, 1]))
gbgb: "GraphicsNDArray" = qattr((..., [1, 2, 1, 2]))
gbga: "GraphicsNDArray" = qattr((..., [1, 2, 1, 3]))
gbbr: "GraphicsNDArray" = qattr((..., [1, 2, 2, 0]))
gbbg: "GraphicsNDArray" = qattr((..., [1, 2, 2, 1]))
gbbb: "GraphicsNDArray" = qattr((..., [1, 2, 2, 2]))
gbba: "GraphicsNDArray" = qattr((..., [1, 2, 2, 3]))
gbar: "GraphicsNDArray" = qattr((..., [1, 2, 3, 0]))
gbag: "GraphicsNDArray" = qattr((..., [1, 2, 3, 1]))
gbab: "GraphicsNDArray" = qattr((..., [1, 2, 3, 2]))
gbaa: "GraphicsNDArray" = qattr((..., [1, 2, 3, 3]))
garr: "GraphicsNDArray" = qattr((..., [1, 3, 0, 0]))
garg: "GraphicsNDArray" = qattr((..., [1, 3, 0, 1]))
garb: "GraphicsNDArray" = qattr((..., [1, 3, 0, 2]))
gara: "GraphicsNDArray" = qattr((..., [1, 3, 0, 3]))
gagr: "GraphicsNDArray" = qattr((..., [1, 3, 1, 0]))
gagg: "GraphicsNDArray" = qattr((..., [1, 3, 1, 1]))
gagb: "GraphicsNDArray" = qattr((..., [1, 3, 1, 2]))
gaga: "GraphicsNDArray" = qattr((..., [1, 3, 1, 3]))
gabr: "GraphicsNDArray" = qattr((..., [1, 3, 2, 0]))
gabg: "GraphicsNDArray" = qattr((..., [1, 3, 2, 1]))
gabb: "GraphicsNDArray" = qattr((..., [1, 3, 2, 2]))
gaba: "GraphicsNDArray" = qattr((..., [1, 3, 2, 3]))
gaar: "GraphicsNDArray" = qattr((..., [1, 3, 3, 0]))
gaag: "GraphicsNDArray" = qattr((..., [1, 3, 3, 1]))
gaab: "GraphicsNDArray" = qattr((..., [1, 3, 3, 2]))
gaaa: "GraphicsNDArray" = qattr((..., [1, 3, 3, 3]))
brrr: "GraphicsNDArray" = qattr((..., [2, 0, 0, 0]))
brrg: "GraphicsNDArray" = qattr((..., [2, 0, 0, 1]))
brrb: "GraphicsNDArray" = qattr((..., [2, 0, 0, 2]))
brra: "GraphicsNDArray" = qattr((..., [2, 0, 0, 3]))
brgr: "GraphicsNDArray" = qattr((..., [2, 0, 1, 0]))
brgg: "GraphicsNDArray" = qattr((..., [2, 0, 1, 1]))
brgb: "GraphicsNDArray" = qattr((..., [2, 0, 1, 2]))
brga: "GraphicsNDArray" = qattr((..., [2, 0, 1, 3]))
brbr: "GraphicsNDArray" = qattr((..., [2, 0, 2, 0]))
brbg: "GraphicsNDArray" = qattr((..., [2, 0, 2, 1]))
brbb: "GraphicsNDArray" = qattr((..., [2, 0, 2, 2]))
brba: "GraphicsNDArray" = qattr((..., [2, 0, 2, 3]))
brar: "GraphicsNDArray" = qattr((..., [2, 0, 3, 0]))
brag: "GraphicsNDArray" = qattr((..., [2, 0, 3, 1]))
brab: "GraphicsNDArray" = qattr((..., [2, 0, 3, 2]))
braa: "GraphicsNDArray" = qattr((..., [2, 0, 3, 3]))
bgrr: "GraphicsNDArray" = qattr((..., [2, 1, 0, 0]))
bgrg: "GraphicsNDArray" = qattr((..., [2, 1, 0, 1]))
bgrb: "GraphicsNDArray" = qattr((..., [2, 1, 0, 2]))
bgra: "GraphicsNDArray" = qattr((..., [2, 1, 0, 3]))
bggr: "GraphicsNDArray" = qattr((..., [2, 1, 1, 0]))
bggg: "GraphicsNDArray" = qattr((..., [2, 1, 1, 1]))
bggb: "GraphicsNDArray" = qattr((..., [2, 1, 1, 2]))
bgga: "GraphicsNDArray" = qattr((..., [2, 1, 1, 3]))
bgbr: "GraphicsNDArray" = qattr((..., [2, 1, 2, 0]))
bgbg: "GraphicsNDArray" = qattr((..., [2, 1, 2, 1]))
bgbb: "GraphicsNDArray" = qattr((..., [2, 1, 2, 2]))
bgba: "GraphicsNDArray" = qattr((..., [2, 1, 2, 3]))
bgar: "GraphicsNDArray" = qattr((..., [2, 1, 3, 0]))
bgag: "GraphicsNDArray" = qattr((..., [2, 1, 3, 1]))
bgab: "GraphicsNDArray" = qattr((..., [2, 1, 3, 2]))
bgaa: "GraphicsNDArray" = qattr((..., [2, 1, 3, 3]))
bbrr: "GraphicsNDArray" = qattr((..., [2, 2, 0, 0]))
bbrg: "GraphicsNDArray" = qattr((..., [2, 2, 0, 1]))
bbrb: "GraphicsNDArray" = qattr((..., [2, 2, 0, 2]))
bbra: "GraphicsNDArray" = qattr((..., [2, 2, 0, 3]))
bbgr: "GraphicsNDArray" = qattr((..., [2, 2, 1, 0]))
bbgg: "GraphicsNDArray" = qattr((..., [2, 2, 1, 1]))
bbgb: "GraphicsNDArray" = qattr((..., [2, 2, 1, 2]))
bbga: "GraphicsNDArray" = qattr((..., [2, 2, 1, 3]))
bbbr: "GraphicsNDArray" = qattr((..., [2, 2, 2, 0]))
bbbg: "GraphicsNDArray" = qattr((..., [2, 2, 2, 1]))
bbbb: "GraphicsNDArray" = qattr((..., [2, 2, 2, 2]))
bbba: "GraphicsNDArray" = qattr((..., [2, 2, 2, 3]))
bbar: "GraphicsNDArray" = qattr((..., [2, 2, 3, 0]))
bbag: "GraphicsNDArray" = qattr((..., [2, 2, 3, 1]))
bbab: "GraphicsNDArray" = qattr((..., [2, 2, 3, 2]))
bbaa: "GraphicsNDArray" = qattr((..., [2, 2, 3, 3]))
barr: "GraphicsNDArray" = qattr((..., [2, 3, 0, 0]))
barg: "GraphicsNDArray" = qattr((..., [2, 3, 0, 1]))
barb: "GraphicsNDArray" = qattr((..., [2, 3, 0, 2]))
bara: "GraphicsNDArray" = qattr((..., [2, 3, 0, 3]))
bagr: "GraphicsNDArray" = qattr((..., [2, 3, 1, 0]))
bagg: "GraphicsNDArray" = qattr((..., [2, 3, 1, 1]))
bagb: "GraphicsNDArray" = qattr((..., [2, 3, 1, 2]))
baga: "GraphicsNDArray" = qattr((..., [2, 3, 1, 3]))
babr: "GraphicsNDArray" = qattr((..., [2, 3, 2, 0]))
babg: "GraphicsNDArray" = qattr((..., [2, 3, 2, 1]))
babb: "GraphicsNDArray" = qattr((..., [2, 3, 2, 2]))
baba: "GraphicsNDArray" = qattr((..., [2, 3, 2, 3]))
baar: "GraphicsNDArray" = qattr((..., [2, 3, 3, 0]))
baag: "GraphicsNDArray" = qattr((..., [2, 3, 3, 1]))
baab: "GraphicsNDArray" = qattr((..., [2, 3, 3, 2]))
baaa: "GraphicsNDArray" = qattr((..., [2, 3, 3, 3]))
arrr: "GraphicsNDArray" = qattr((..., [3, 0, 0, 0]))
arrg: "GraphicsNDArray" = qattr((..., [3, 0, 0, 1]))
arrb: "GraphicsNDArray" = qattr((..., [3, 0, 0, 2]))
arra: "GraphicsNDArray" = qattr((..., [3, 0, 0, 3]))
argr: "GraphicsNDArray" = qattr((..., [3, 0, 1, 0]))
argg: "GraphicsNDArray" = qattr((..., [3, 0, 1, 1]))
argb: "GraphicsNDArray" = qattr((..., [3, 0, 1, 2]))
arga: "GraphicsNDArray" = qattr((..., [3, 0, 1, 3]))
arbr: "GraphicsNDArray" = qattr((..., [3, 0, 2, 0]))
arbg: "GraphicsNDArray" = qattr((..., [3, 0, 2, 1]))
arbb: "GraphicsNDArray" = qattr((..., [3, 0, 2, 2]))
arba: "GraphicsNDArray" = qattr((..., [3, 0, 2, 3]))
arar: "GraphicsNDArray" = qattr((..., [3, 0, 3, 0]))
arag: "GraphicsNDArray" = qattr((..., [3, 0, 3, 1]))
arab: "GraphicsNDArray" = qattr((..., [3, 0, 3, 2]))
araa: "GraphicsNDArray" = qattr((..., [3, 0, 3, 3]))
agrr: "GraphicsNDArray" = qattr((..., [3, 1, 0, 0]))
agrg: "GraphicsNDArray" = qattr((..., [3, 1, 0, 1]))
agrb: "GraphicsNDArray" = qattr((..., [3, 1, 0, 2]))
agra: "GraphicsNDArray" = qattr((..., [3, 1, 0, 3]))
aggr: "GraphicsNDArray" = qattr((..., [3, 1, 1, 0]))
aggg: "GraphicsNDArray" = qattr((..., [3, 1, 1, 1]))
aggb: "GraphicsNDArray" = qattr((..., [3, 1, 1, 2]))
agga: "GraphicsNDArray" = qattr((..., [3, 1, 1, 3]))
agbr: "GraphicsNDArray" = qattr((..., [3, 1, 2, 0]))
agbg: "GraphicsNDArray" = qattr((..., [3, 1, 2, 1]))
agbb: "GraphicsNDArray" = qattr((..., [3, 1, 2, 2]))
agba: "GraphicsNDArray" = qattr((..., [3, 1, 2, 3]))
agar: "GraphicsNDArray" = qattr((..., [3, 1, 3, 0]))
agag: "GraphicsNDArray" = qattr((..., [3, 1, 3, 1]))
agab: "GraphicsNDArray" = qattr((..., [3, 1, 3, 2]))
agaa: "GraphicsNDArray" = qattr((..., [3, 1, 3, 3]))
abrr: "GraphicsNDArray" = qattr((..., [3, 2, 0, 0]))
abrg: "GraphicsNDArray" = qattr((..., [3, 2, 0, 1]))
abrb: "GraphicsNDArray" = qattr((..., [3, 2, 0, 2]))
abra: "GraphicsNDArray" = qattr((..., [3, 2, 0, 3]))
abgr: "GraphicsNDArray" = qattr((..., [3, 2, 1, 0]))
abgg: "GraphicsNDArray" = qattr((..., [3, 2, 1, 1]))
abgb: "GraphicsNDArray" = qattr((..., [3, 2, 1, 2]))
abga: "GraphicsNDArray" = qattr((..., [3, 2, 1, 3]))
abbr: "GraphicsNDArray" = qattr((..., [3, 2, 2, 0]))
abbg: "GraphicsNDArray" = qattr((..., [3, 2, 2, 1]))
abbb: "GraphicsNDArray" = qattr((..., [3, 2, 2, 2]))
abba: "GraphicsNDArray" = qattr((..., [3, 2, 2, 3]))
abar: "GraphicsNDArray" = qattr((..., [3, 2, 3, 0]))
abag: "GraphicsNDArray" = qattr((..., [3, 2, 3, 1]))
abab: "GraphicsNDArray" = qattr((..., [3, 2, 3, 2]))
abaa: "GraphicsNDArray" = qattr((..., [3, 2, 3, 3]))
aarr: "GraphicsNDArray" = qattr((..., [3, 3, 0, 0]))
aarg: "GraphicsNDArray" = qattr((..., [3, 3, 0, 1]))
aarb: "GraphicsNDArray" = qattr((..., [3, 3, 0, 2]))
aara: "GraphicsNDArray" = qattr((..., [3, 3, 0, 3]))
aagr: "GraphicsNDArray" = qattr((..., [3, 3, 1, 0]))
aagg: "GraphicsNDArray" = qattr((..., [3, 3, 1, 1]))
aagb: "GraphicsNDArray" = qattr((..., [3, 3, 1, 2]))
aaga: "GraphicsNDArray" = qattr((..., [3, 3, 1, 3]))
aabr: "GraphicsNDArray" = qattr((..., [3, 3, 2, 0]))
aabg: "GraphicsNDArray" = qattr((..., [3, 3, 2, 1]))
aabb: "GraphicsNDArray" = qattr((..., [3, 3, 2, 2]))
aaba: "GraphicsNDArray" = qattr((..., [3, 3, 2, 3]))
aaar: "GraphicsNDArray" = qattr((..., [3, 3, 3, 0]))
aaag: "GraphicsNDArray" = qattr((..., [3, 3, 3, 1]))
aaab: "GraphicsNDArray" = qattr((..., [3, 3, 3, 2]))
aaaa: "GraphicsNDArray" = qattr((..., [3, 3, 3, 3]))
NDArray = Union[numpy.ndarray, GraphicsNDArray]
T = TypeVar("T")
| @type_match(numpy.ndarray) | 1 | 2023-11-21 22:44:49+00:00 | 12k |
yk7333/d3po | scripts/train_with_rm.py | [
{
"identifier": "pipeline_with_logprob",
"path": "d3po_pytorch/diffusers_patch/pipeline_with_logprob.py",
"snippet": "@torch.no_grad()\ndef pipeline_with_logprob(\n self: StableDiffusionPipeline,\n prompt: Union[str, List[str]] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n guidance_rescale: float = 0.0,\n):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.\n instead.\n height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The width in pixels of the generated image.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is\n less than `1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generate image. Choose between\n [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n guidance_rescale (`float`, *optional*, defaults to 0.7):\n Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are\n Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of\n [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).\n Guidance rescale factor should fix overexposure when using zero terminal SNR.\n\n Examples:\n\n Returns:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.\n When returning a tuple, the first element is a list with the generated images, and the second element is a\n list of `bool`s denoting whether the corresponding generated image likely represents \"not-safe-for-work\"\n (nsfw) content, according to the `safety_checker`.\n \"\"\"\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # 3. Encode input prompt\n text_encoder_lora_scale = cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n prompt_embeds = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare latent variables\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n )\n\n # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 7. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n all_latents = [latents]\n all_log_probs = []\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n if do_classifier_free_guidance and guidance_rescale > 0.0:\n # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf\n noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents, log_prob = ddim_step_with_logprob(self.scheduler, noise_pred, t, latents, **extra_step_kwargs)\n\n all_latents.append(latents)\n all_log_probs.append(log_prob)\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n if not output_type == \"latent\":\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n else:\n image = latents\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n return image, has_nsfw_concept, all_latents, all_log_probs"
},
{
"identifier": "ddim_step_with_logprob",
"path": "d3po_pytorch/diffusers_patch/ddim_with_logprob.py",
"snippet": "def ddim_step_with_logprob(\n self: DDIMScheduler,\n model_output: torch.FloatTensor,\n timestep: int,\n sample: torch.FloatTensor,\n eta: float = 0.0,\n use_clipped_model_output: bool = False,\n generator=None,\n prev_sample: Optional[torch.FloatTensor] = None,\n) -> Union[DDIMSchedulerOutput, Tuple]:\n \"\"\"\n Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion\n process from the learned model outputs (most often the predicted noise).\n\n Args:\n model_output (`torch.FloatTensor`): direct output from learned diffusion model.\n timestep (`int`): current discrete timestep in the diffusion chain.\n sample (`torch.FloatTensor`):\n current instance of sample being created by diffusion process.\n eta (`float`): weight of noise for added noise in diffusion step.\n use_clipped_model_output (`bool`): if `True`, compute \"corrected\" `model_output` from the clipped\n predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when\n `self.config.clip_sample` is `True`. If no clipping has happened, \"corrected\" `model_output` would\n coincide with the one provided as input and `use_clipped_model_output` will have not effect.\n generator: random number generator.\n variance_noise (`torch.FloatTensor`): instead of generating noise for the variance using `generator`, we\n can directly provide the noise for the variance itself. This is useful for methods such as\n CycleDiffusion. (https://arxiv.org/abs/2210.05559)\n return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class\n\n Returns:\n [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`:\n [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n\n \"\"\"\n assert isinstance(self, DDIMScheduler)\n if self.num_inference_steps is None:\n raise ValueError(\n \"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler\"\n )\n\n # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf\n # Ideally, read DDIM paper in-detail understanding\n\n # Notation (<variable name> -> <name in paper>\n # - pred_noise_t -> e_theta(x_t, t)\n # - pred_original_sample -> f_theta(x_t, t) or x_0\n # - std_dev_t -> sigma_t\n # - eta -> η\n # - pred_sample_direction -> \"direction pointing to x_t\"\n # - pred_prev_sample -> \"x_t-1\"\n\n # 1. get previous step value (=t-1)\n prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps\n # to prevent OOB on gather\n prev_timestep = torch.clamp(prev_timestep, 0, self.config.num_train_timesteps - 1)\n\n # 2. compute alphas, betas\n alpha_prod_t = self.alphas_cumprod.gather(0, timestep.cpu())\n alpha_prod_t_prev = torch.where(\n prev_timestep.cpu() >= 0, self.alphas_cumprod.gather(0, prev_timestep.cpu()), self.final_alpha_cumprod\n )\n alpha_prod_t = _left_broadcast(alpha_prod_t, sample.shape).to(sample.device)\n alpha_prod_t_prev = _left_broadcast(alpha_prod_t_prev, sample.shape).to(sample.device)\n\n beta_prod_t = 1 - alpha_prod_t\n\n # 3. compute predicted original sample from predicted noise also called\n # \"predicted x_0\" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf\n if self.config.prediction_type == \"epsilon\":\n pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)\n pred_epsilon = model_output\n elif self.config.prediction_type == \"sample\":\n pred_original_sample = model_output\n pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)\n elif self.config.prediction_type == \"v_prediction\":\n pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output\n pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample\n else:\n raise ValueError(\n f\"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or\"\n \" `v_prediction`\"\n )\n\n # 4. Clip or threshold \"predicted x_0\"\n if self.config.thresholding:\n pred_original_sample = self._threshold_sample(pred_original_sample)\n elif self.config.clip_sample:\n pred_original_sample = pred_original_sample.clamp(\n -self.config.clip_sample_range, self.config.clip_sample_range\n )\n\n # 5. compute variance: \"sigma_t(η)\" -> see formula (16)\n # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)\n variance = _get_variance(self, timestep, prev_timestep)\n std_dev_t = eta * variance ** (0.5)\n std_dev_t = _left_broadcast(std_dev_t, sample.shape).to(sample.device)\n\n if use_clipped_model_output:\n # the pred_epsilon is always re-derived from the clipped x_0 in Glide\n pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)\n\n # 6. compute \"direction pointing to x_t\" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf\n pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon\n\n # 7. compute x_t without \"random noise\" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf\n prev_sample_mean = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction\n\n if prev_sample is not None and generator is not None:\n raise ValueError(\n \"Cannot pass both generator and prev_sample. Please make sure that either `generator` or\"\n \" `prev_sample` stays `None`.\"\n )\n\n if prev_sample is None:\n variance_noise = randn_tensor(\n model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype\n )\n prev_sample = prev_sample_mean + std_dev_t * variance_noise\n\n # log prob of prev_sample given prev_sample_mean and std_dev_t\n log_prob = (\n -((prev_sample.detach() - prev_sample_mean) ** 2) / (2 * (std_dev_t**2))\n - torch.log(std_dev_t)\n - torch.log(torch.sqrt(2 * torch.as_tensor(math.pi)))\n )\n # mean along all but batch dimension\n log_prob = log_prob.mean(dim=tuple(range(1, log_prob.ndim)))\n\n return prev_sample.type(sample.dtype), log_prob"
}
] | from collections import defaultdict
from concurrent import futures
from absl import app, flags
from ml_collections import config_flags
from accelerate import Accelerator
from accelerate.utils import set_seed, ProjectConfiguration
from accelerate.logging import get_logger
from diffusers import StableDiffusionPipeline, DDIMScheduler, UNet2DConditionModel
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from d3po_pytorch.diffusers_patch.pipeline_with_logprob import pipeline_with_logprob
from d3po_pytorch.diffusers_patch.ddim_with_logprob import ddim_step_with_logprob
from functools import partial
from PIL import Image
import contextlib
import os
import copy
import datetime
import time
import sys
import numpy as np
import d3po_pytorch.prompts
import d3po_pytorch.rewards
import torch
import wandb
import tqdm
import tempfile
import copy
import numpy as np
import bitsandbytes as bnb | 7,614 | accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if config.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
# Initialize the optimizer
if config.train.use_8bit_adam:
try:
except ImportError:
raise ImportError(
"Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
)
optimizer_cls = bnb.optim.AdamW8bit
else:
optimizer_cls = torch.optim.AdamW
optimizer = optimizer_cls(
trainable_layers.parameters(),
lr=config.train.learning_rate,
betas=(config.train.adam_beta1, config.train.adam_beta2),
weight_decay=config.train.adam_weight_decay,
eps=config.train.adam_epsilon,
)
# prepare prompt and reward fn
prompt_fn = getattr(d3po_pytorch.prompts, config.prompt_fn)
reward_fn = getattr(d3po_pytorch.rewards, config.reward_fn)()
# generate negative prompt embeddings
neg_prompt_embed = pipeline.text_encoder(
pipeline.tokenizer(
[""],
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=pipeline.tokenizer.model_max_length,
).input_ids.to(accelerator.device)
)[0]
sample_neg_prompt_embeds = neg_prompt_embed.repeat(config.sample.batch_size, 1, 1)
train_neg_prompt_embeds = neg_prompt_embed.repeat(config.train.batch_size, 1, 1)
# for some reason, autocast is necessary for non-lora training but for lora training it isn't necessary and it uses
# more memory
autocast = contextlib.nullcontext if config.use_lora else accelerator.autocast
# Prepare everything with our `accelerator`.
trainable_layers, optimizer = accelerator.prepare(trainable_layers, optimizer)
# executor to perform callbacks asynchronously.
executor = futures.ThreadPoolExecutor(max_workers=2)
# Train!
samples_per_epoch = config.sample.batch_size * accelerator.num_processes * config.sample.num_batches_per_epoch
total_train_batch_size = (
config.train.batch_size * accelerator.num_processes * config.train.gradient_accumulation_steps
)
logger.info("***** Running training *****")
logger.info(f" Num Epochs = {config.num_epochs}")
logger.info(f" Sample batch size per device = {config.sample.batch_size}")
logger.info(f" Train batch size per device = {config.train.batch_size}")
logger.info(f" Gradient Accumulation steps = {config.train.gradient_accumulation_steps}")
logger.info("")
logger.info(f" Total number of samples per epoch = {samples_per_epoch}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Number of gradient updates per inner epoch = {samples_per_epoch // total_train_batch_size}")
logger.info(f" Number of inner epochs = {config.train.num_inner_epochs}")
assert config.sample.batch_size >= config.train.batch_size
assert config.sample.batch_size % config.train.batch_size == 0
assert samples_per_epoch % total_train_batch_size == 0
if config.resume_from:
logger.info(f"Resuming from {config.resume_from}")
accelerator.load_state(config.resume_from)
first_epoch = int(config.resume_from.split("_")[-1]) + 1
else:
first_epoch = 0
global_step = 0
for epoch in range(first_epoch, config.num_epochs):
#################### SAMPLING ####################
pipeline.unet.eval()
samples = []
prompt_metadata = None
for i in tqdm(
range(config.sample.num_batches_per_epoch),
desc=f"Epoch {epoch}: sampling",
disable=not accelerator.is_local_main_process,
position=0,
):
# generate prompts
prompts1, prompt_metadata = zip(
*[prompt_fn(**config.prompt_fn_kwargs) for _ in range(config.sample.batch_size)]
)
prompts2 = prompts1
# encode prompts
prompt_ids1 = pipeline.tokenizer(
prompts1,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=pipeline.tokenizer.model_max_length,
).input_ids.to(accelerator.device)
prompt_ids2 = pipeline.tokenizer(
prompts2,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=pipeline.tokenizer.model_max_length,
).input_ids.to(accelerator.device)
prompt_embeds1 = pipeline.text_encoder(prompt_ids1)[0]
prompt_embeds2 = pipeline.text_encoder(prompt_ids2)[0]
# sample
with autocast():
| script_path = os.path.abspath(__file__)
sys.path.append(os.path.dirname(os.path.dirname(script_path)))
tqdm = partial(tqdm.tqdm, dynamic_ncols=True)
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file("config", "config/base.py", "Training configuration.")
logger = get_logger(__name__)
def main(_):
# basic Accelerate and logging setup
config = FLAGS.config
unique_id = datetime.datetime.now().strftime("%Y.%m.%d_%H.%M.%S")
if not config.run_name:
config.run_name = unique_id
else:
config.run_name += "_" + unique_id
if config.resume_from:
config.resume_from = os.path.normpath(os.path.expanduser(config.resume_from))
if "checkpoint_" not in os.path.basename(config.resume_from):
# get the most recent checkpoint in this directory
checkpoints = list(filter(lambda x: "checkpoint_" in x, os.listdir(config.resume_from)))
if len(checkpoints) == 0:
raise ValueError(f"No checkpoints found in {config.resume_from}")
config.resume_from = os.path.join(
config.resume_from,
sorted(checkpoints, key=lambda x: int(x.split("_")[-1]))[-1],
)
# number of timesteps within each trajectory to train on
num_train_timesteps = int(config.sample.num_steps * config.train.timestep_fraction)
accelerator_config = ProjectConfiguration(
project_dir=os.path.join(config.logdir, config.run_name),
automatic_checkpoint_naming=True,
total_limit=config.num_checkpoint_limit,
)
accelerator = Accelerator(
log_with="wandb",
mixed_precision=config.mixed_precision,
project_config=accelerator_config,
gradient_accumulation_steps=config.train.gradient_accumulation_steps * num_train_timesteps,
)
if accelerator.is_main_process:
accelerator.init_trackers(
project_name="d3po-pytorch", config=config.to_dict(), init_kwargs={"wandb": {"name": config.run_name}}
)
logger.info(f"\n{config}")
ramdom_seed = np.random.randint(0,100000)
set_seed(ramdom_seed, device_specific=True)
# load scheduler, tokenizer and models.
pipeline = StableDiffusionPipeline.from_pretrained(config.pretrained.model, torch_dtype=torch.float16)
if config.use_xformers:
pipeline.enable_xformers_memory_efficient_attention()
# freeze parameters of models to save more memory
pipeline.vae.requires_grad_(False)
pipeline.text_encoder.requires_grad_(False)
pipeline.unet.requires_grad_(not config.use_lora)
if not config.use_lora and config.train.activation_checkpointing:
pipeline.unet.enable_gradient_checkpointing()
# disable safety checker
pipeline.safety_checker = None
# make the progress bar nicer
pipeline.set_progress_bar_config(
position=1,
disable=not accelerator.is_local_main_process,
leave=False,
desc="Timestep",
dynamic_ncols=True,
)
# switch to DDIM scheduler
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
# For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
inference_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
inference_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
inference_dtype = torch.bfloat16
# Move unet, vae and text_encoder to device and cast to inference_dtype
pipeline.vae.to(accelerator.device, dtype=inference_dtype)
pipeline.text_encoder.to(accelerator.device, dtype=inference_dtype)
pipeline.unet.to(accelerator.device, dtype=inference_dtype)
ref = copy.deepcopy(pipeline.unet)
for param in ref.parameters():
param.requires_grad = False
if config.use_lora:
# Set correct lora layers
lora_attn_procs = {}
for name in pipeline.unet.attn_processors.keys():
cross_attention_dim = (
None if name.endswith("attn1.processor") else pipeline.unet.config.cross_attention_dim
)
if name.startswith("mid_block"):
hidden_size = pipeline.unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(reversed(pipeline.unet.config.block_out_channels))[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = pipeline.unet.config.block_out_channels[block_id]
lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim)
pipeline.unet.set_attn_processor(lora_attn_procs)
trainable_layers = AttnProcsLayers(pipeline.unet.attn_processors)
else:
trainable_layers = pipeline.unet
# set up diffusers-friendly checkpoint saving with Accelerate
def save_model_hook(models, weights, output_dir):
assert len(models) == 1
if config.use_lora and isinstance(models[0], AttnProcsLayers):
pipeline.unet.save_attn_procs(output_dir)
elif not config.use_lora and isinstance(models[0], UNet2DConditionModel):
models[0].save_pretrained(os.path.join(output_dir, "unet"))
else:
raise ValueError(f"Unknown model type {type(models[0])}")
weights.pop() # ensures that accelerate doesn't try to handle saving of the model
def load_model_hook(models, input_dir):
assert len(models) == 1
if config.use_lora and isinstance(models[0], AttnProcsLayers):
tmp_unet = UNet2DConditionModel.from_pretrained(
config.pretrained.model, revision=config.pretrained.revision, subfolder="unet"
)
tmp_unet.load_attn_procs(input_dir)
models[0].load_state_dict(AttnProcsLayers(tmp_unet.attn_processors).state_dict())
del tmp_unet
elif not config.use_lora and isinstance(models[0], UNet2DConditionModel):
load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet")
models[0].register_to_config(**load_model.config)
models[0].load_state_dict(load_model.state_dict())
del load_model
else:
raise ValueError(f"Unknown model type {type(models[0])}")
models.pop() # ensures that accelerate doesn't try to handle loading of the model
# Support multi-dimensional comparison. Default demension is 1. You can add many rewards instead of only one to judge the preference of images.
# For example: A: clipscore-30 blipscore-10 LAION aesthetic score-6.0 ; B: 20, 8, 5.0 then A is prefered than B
# if C: 40, 4, 4.0 since C[0] = 40 > A[0] and C[1] < A[1], we do not think C is prefered than A or A is prefered than C
def compare(a, b):
assert isinstance(a, torch.Tensor) and isinstance(b, torch.Tensor)
if len(a.shape)==1:
a = a[...,None]
b = b[...,None]
a_dominates = torch.logical_and(torch.all(a <= b, dim=1), torch.any(a < b, dim=1))
b_dominates = torch.logical_and(torch.all(b <= a, dim=1), torch.any(b < a, dim=1))
c = torch.zeros([a.shape[0],2],dtype=torch.float,device=a.device)
c[a_dominates] = torch.tensor([-1., 1.],device=a.device)
c[b_dominates] = torch.tensor([1., -1.],device=a.device)
return c
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if config.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
# Initialize the optimizer
if config.train.use_8bit_adam:
try:
except ImportError:
raise ImportError(
"Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
)
optimizer_cls = bnb.optim.AdamW8bit
else:
optimizer_cls = torch.optim.AdamW
optimizer = optimizer_cls(
trainable_layers.parameters(),
lr=config.train.learning_rate,
betas=(config.train.adam_beta1, config.train.adam_beta2),
weight_decay=config.train.adam_weight_decay,
eps=config.train.adam_epsilon,
)
# prepare prompt and reward fn
prompt_fn = getattr(d3po_pytorch.prompts, config.prompt_fn)
reward_fn = getattr(d3po_pytorch.rewards, config.reward_fn)()
# generate negative prompt embeddings
neg_prompt_embed = pipeline.text_encoder(
pipeline.tokenizer(
[""],
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=pipeline.tokenizer.model_max_length,
).input_ids.to(accelerator.device)
)[0]
sample_neg_prompt_embeds = neg_prompt_embed.repeat(config.sample.batch_size, 1, 1)
train_neg_prompt_embeds = neg_prompt_embed.repeat(config.train.batch_size, 1, 1)
# for some reason, autocast is necessary for non-lora training but for lora training it isn't necessary and it uses
# more memory
autocast = contextlib.nullcontext if config.use_lora else accelerator.autocast
# Prepare everything with our `accelerator`.
trainable_layers, optimizer = accelerator.prepare(trainable_layers, optimizer)
# executor to perform callbacks asynchronously.
executor = futures.ThreadPoolExecutor(max_workers=2)
# Train!
samples_per_epoch = config.sample.batch_size * accelerator.num_processes * config.sample.num_batches_per_epoch
total_train_batch_size = (
config.train.batch_size * accelerator.num_processes * config.train.gradient_accumulation_steps
)
logger.info("***** Running training *****")
logger.info(f" Num Epochs = {config.num_epochs}")
logger.info(f" Sample batch size per device = {config.sample.batch_size}")
logger.info(f" Train batch size per device = {config.train.batch_size}")
logger.info(f" Gradient Accumulation steps = {config.train.gradient_accumulation_steps}")
logger.info("")
logger.info(f" Total number of samples per epoch = {samples_per_epoch}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Number of gradient updates per inner epoch = {samples_per_epoch // total_train_batch_size}")
logger.info(f" Number of inner epochs = {config.train.num_inner_epochs}")
assert config.sample.batch_size >= config.train.batch_size
assert config.sample.batch_size % config.train.batch_size == 0
assert samples_per_epoch % total_train_batch_size == 0
if config.resume_from:
logger.info(f"Resuming from {config.resume_from}")
accelerator.load_state(config.resume_from)
first_epoch = int(config.resume_from.split("_")[-1]) + 1
else:
first_epoch = 0
global_step = 0
for epoch in range(first_epoch, config.num_epochs):
#################### SAMPLING ####################
pipeline.unet.eval()
samples = []
prompt_metadata = None
for i in tqdm(
range(config.sample.num_batches_per_epoch),
desc=f"Epoch {epoch}: sampling",
disable=not accelerator.is_local_main_process,
position=0,
):
# generate prompts
prompts1, prompt_metadata = zip(
*[prompt_fn(**config.prompt_fn_kwargs) for _ in range(config.sample.batch_size)]
)
prompts2 = prompts1
# encode prompts
prompt_ids1 = pipeline.tokenizer(
prompts1,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=pipeline.tokenizer.model_max_length,
).input_ids.to(accelerator.device)
prompt_ids2 = pipeline.tokenizer(
prompts2,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=pipeline.tokenizer.model_max_length,
).input_ids.to(accelerator.device)
prompt_embeds1 = pipeline.text_encoder(prompt_ids1)[0]
prompt_embeds2 = pipeline.text_encoder(prompt_ids2)[0]
# sample
with autocast(): | images1, _, latents1, log_probs1 = pipeline_with_logprob( | 0 | 2023-11-23 08:08:20+00:00 | 12k |
alexzhou907/DreamPropeller | threestudio/models/guidance/zero123_unified_guidance.py | [
{
"identifier": "Zero123Pipeline",
"path": "extern/zero123.py",
"snippet": "class Zero123Pipeline(DiffusionPipeline):\n r\"\"\"\n Pipeline to generate variations from an input image using Stable Diffusion.\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n image_encoder ([`CLIPVisionModelWithProjection`]):\n Frozen CLIP image-encoder. Stable Diffusion Image Variation uses the vision portion of\n [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModelWithProjection),\n specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.\n unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n safety_checker ([`StableDiffusionSafetyChecker`]):\n Classification module that estimates whether generated images could be considered offensive or harmful.\n Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.\n feature_extractor ([`CLIPImageProcessor`]):\n Model that extracts features from generated images to be used as inputs for the `safety_checker`.\n \"\"\"\n # TODO: feature_extractor is required to encode images (if they are in PIL format),\n # we should give a descriptive message if the pipeline doesn't have one.\n _optional_components = [\"safety_checker\"]\n\n def __init__(\n self,\n vae: AutoencoderKL,\n image_encoder: CLIPVisionModelWithProjection,\n unet: UNet2DConditionModel,\n scheduler: KarrasDiffusionSchedulers,\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPImageProcessor,\n clip_camera_projection: CLIPCameraProjection,\n requires_safety_checker: bool = True,\n ):\n super().__init__()\n\n if safety_checker is None and requires_safety_checker:\n logger.warn(\n f\"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure\"\n \" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered\"\n \" results in services or applications open to the public. Both the diffusers team and Hugging Face\"\n \" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling\"\n \" it only for use-cases that involve analyzing network behavior or auditing its results. For more\"\n \" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .\"\n )\n\n if safety_checker is not None and feature_extractor is None:\n raise ValueError(\n \"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety\"\n \" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.\"\n )\n\n is_unet_version_less_0_9_0 = hasattr(\n unet.config, \"_diffusers_version\"\n ) and version.parse(\n version.parse(unet.config._diffusers_version).base_version\n ) < version.parse(\n \"0.9.0.dev0\"\n )\n is_unet_sample_size_less_64 = (\n hasattr(unet.config, \"sample_size\") and unet.config.sample_size < 64\n )\n if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:\n deprecation_message = (\n \"The configuration file of the unet has set the default `sample_size` to smaller than\"\n \" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the\"\n \" following: \\n- CompVis/stable-diffusion-v1-4 \\n- CompVis/stable-diffusion-v1-3 \\n-\"\n \" CompVis/stable-diffusion-v1-2 \\n- CompVis/stable-diffusion-v1-1 \\n- runwayml/stable-diffusion-v1-5\"\n \" \\n- runwayml/stable-diffusion-inpainting \\n you should change 'sample_size' to 64 in the\"\n \" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`\"\n \" in the config might lead to incorrect results in future versions. If you have downloaded this\"\n \" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for\"\n \" the `unet/config.json` file\"\n )\n deprecate(\n \"sample_size<64\", \"1.0.0\", deprecation_message, standard_warn=False\n )\n new_config = dict(unet.config)\n new_config[\"sample_size\"] = 64\n unet._internal_dict = FrozenDict(new_config)\n\n self.register_modules(\n vae=vae,\n image_encoder=image_encoder,\n unet=unet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n clip_camera_projection=clip_camera_projection,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)\n self.register_to_config(requires_safety_checker=requires_safety_checker)\n\n def enable_sequential_cpu_offload(self, gpu_id=0):\n r\"\"\"\n Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,\n text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a\n `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.\n \"\"\"\n if is_accelerate_available():\n from accelerate import cpu_offload\n else:\n raise ImportError(\"Please install accelerate via `pip install accelerate`\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n for cpu_offloaded_model in [\n self.unet,\n self.image_encoder,\n self.vae,\n self.safety_checker,\n ]:\n if cpu_offloaded_model is not None:\n cpu_offload(cpu_offloaded_model, device)\n\n @property\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device\n def _execution_device(self):\n r\"\"\"\n Returns the device on which the pipeline's models will be executed. After calling\n `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module\n hooks.\n \"\"\"\n if not hasattr(self.unet, \"_hf_hook\"):\n return self.device\n for module in self.unet.modules():\n if (\n hasattr(module, \"_hf_hook\")\n and hasattr(module._hf_hook, \"execution_device\")\n and module._hf_hook.execution_device is not None\n ):\n return torch.device(module._hf_hook.execution_device)\n return self.device\n\n def _encode_image(\n self,\n image,\n elevation,\n azimuth,\n distance,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n clip_image_embeddings=None,\n image_camera_embeddings=None,\n ):\n dtype = next(self.image_encoder.parameters()).dtype\n\n if image_camera_embeddings is None:\n if image is None:\n assert clip_image_embeddings is not None\n image_embeddings = clip_image_embeddings.to(device=device, dtype=dtype)\n else:\n if not isinstance(image, torch.Tensor):\n image = self.feature_extractor(\n images=image, return_tensors=\"pt\"\n ).pixel_values\n\n image = image.to(device=device, dtype=dtype)\n image_embeddings = self.image_encoder(image).image_embeds\n image_embeddings = image_embeddings.unsqueeze(1)\n\n bs_embed, seq_len, _ = image_embeddings.shape\n\n if isinstance(elevation, float):\n elevation = torch.as_tensor(\n [elevation] * bs_embed, dtype=dtype, device=device\n )\n if isinstance(azimuth, float):\n azimuth = torch.as_tensor(\n [azimuth] * bs_embed, dtype=dtype, device=device\n )\n if isinstance(distance, float):\n distance = torch.as_tensor(\n [distance] * bs_embed, dtype=dtype, device=device\n )\n\n camera_embeddings = torch.stack(\n [\n torch.deg2rad(elevation),\n torch.sin(torch.deg2rad(azimuth)),\n torch.cos(torch.deg2rad(azimuth)),\n distance,\n ],\n dim=-1,\n )[:, None, :]\n\n image_embeddings = torch.cat([image_embeddings, camera_embeddings], dim=-1)\n\n # project (image, camera) embeddings to the same dimension as clip embeddings\n image_embeddings = self.clip_camera_projection(image_embeddings)\n else:\n image_embeddings = image_camera_embeddings.to(device=device, dtype=dtype)\n bs_embed, seq_len, _ = image_embeddings.shape\n\n # duplicate image embeddings for each generation per prompt, using mps friendly method\n image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)\n image_embeddings = image_embeddings.view(\n bs_embed * num_images_per_prompt, seq_len, -1\n )\n\n if do_classifier_free_guidance:\n negative_prompt_embeds = torch.zeros_like(image_embeddings)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings])\n\n return image_embeddings\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker\n def run_safety_checker(self, image, device, dtype):\n if self.safety_checker is None:\n has_nsfw_concept = None\n else:\n if torch.is_tensor(image):\n feature_extractor_input = self.image_processor.postprocess(\n image, output_type=\"pil\"\n )\n else:\n feature_extractor_input = self.image_processor.numpy_to_pil(image)\n safety_checker_input = self.feature_extractor(\n feature_extractor_input, return_tensors=\"pt\"\n ).to(device)\n image, has_nsfw_concept = self.safety_checker(\n images=image, clip_input=safety_checker_input.pixel_values.to(dtype)\n )\n return image, has_nsfw_concept\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents\n def decode_latents(self, latents):\n warnings.warn(\n \"The decode_latents method is deprecated and will be removed in a future version. Please\"\n \" use VaeImageProcessor instead\",\n FutureWarning,\n )\n latents = 1 / self.vae.config.scaling_factor * latents\n image = self.vae.decode(latents, return_dict=False)[0]\n image = (image / 2 + 0.5).clamp(0, 1)\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\n return image\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs\n def prepare_extra_step_kwargs(self, generator, eta):\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature\n # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.\n # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502\n # and should be between [0, 1]\n\n accepts_eta = \"eta\" in set(\n inspect.signature(self.scheduler.step).parameters.keys()\n )\n extra_step_kwargs = {}\n if accepts_eta:\n extra_step_kwargs[\"eta\"] = eta\n\n # check if the scheduler accepts generator\n accepts_generator = \"generator\" in set(\n inspect.signature(self.scheduler.step).parameters.keys()\n )\n if accepts_generator:\n extra_step_kwargs[\"generator\"] = generator\n return extra_step_kwargs\n\n def check_inputs(self, image, height, width, callback_steps):\n # TODO: check image size or adjust image size to (height, width)\n\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(\n f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\"\n )\n\n if (callback_steps is None) or (\n callback_steps is not None\n and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents\n def prepare_latents(\n self,\n batch_size,\n num_channels_latents,\n height,\n width,\n dtype,\n device,\n generator,\n latents=None,\n ):\n shape = (\n batch_size,\n num_channels_latents,\n height // self.vae_scale_factor,\n width // self.vae_scale_factor,\n )\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if latents is None:\n latents = randn_tensor(\n shape, generator=generator, device=device, dtype=dtype\n )\n else:\n latents = latents.to(device)\n\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def _get_latent_model_input(\n self,\n latents: torch.FloatTensor,\n image: Optional[\n Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor]\n ],\n num_images_per_prompt: int,\n do_classifier_free_guidance: bool,\n image_latents: Optional[torch.FloatTensor] = None,\n ):\n if isinstance(image, PIL.Image.Image):\n image_pt = TF.to_tensor(image).unsqueeze(0).to(latents)\n elif isinstance(image, list):\n image_pt = torch.stack([TF.to_tensor(img) for img in image], dim=0).to(\n latents\n )\n elif isinstance(image, torch.Tensor):\n image_pt = image\n else:\n image_pt = None\n\n if image_pt is None:\n assert image_latents is not None\n image_pt = image_latents.repeat_interleave(num_images_per_prompt, dim=0)\n else:\n image_pt = image_pt * 2.0 - 1.0 # scale to [-1, 1]\n # FIXME: encoded latents should be multiplied with self.vae.config.scaling_factor\n # but zero123 was not trained this way\n image_pt = self.vae.encode(image_pt).latent_dist.mode()\n image_pt = image_pt.repeat_interleave(num_images_per_prompt, dim=0)\n if do_classifier_free_guidance:\n latent_model_input = torch.cat(\n [\n torch.cat([latents, latents], dim=0),\n torch.cat([torch.zeros_like(image_pt), image_pt], dim=0),\n ],\n dim=1,\n )\n else:\n latent_model_input = torch.cat([latents, image_pt], dim=1)\n\n return latent_model_input\n\n @torch.no_grad()\n def __call__(\n self,\n image: Optional[\n Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor]\n ] = None,\n elevation: Optional[Union[float, torch.FloatTensor]] = None,\n azimuth: Optional[Union[float, torch.FloatTensor]] = None,\n distance: Optional[Union[float, torch.FloatTensor]] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 3.0,\n num_images_per_prompt: int = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n clip_image_embeddings: Optional[torch.FloatTensor] = None,\n image_camera_embeddings: Optional[torch.FloatTensor] = None,\n image_latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`):\n The image or images to guide the image generation. If you provide a tensor, it needs to comply with the\n configuration of\n [this](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json)\n `CLIPImageProcessor`\n height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The width in pixels of the generated image.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generate image. Choose between\n [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n\n Returns:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.\n When returning a tuple, the first element is a list with the generated images, and the second element is a\n list of `bool`s denoting whether the corresponding generated image likely represents \"not-safe-for-work\"\n (nsfw) content, according to the `safety_checker`.\n \"\"\"\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 1. Check inputs. Raise error if not correct\n # TODO: check input elevation, azimuth, and distance\n # TODO: check image, clip_image_embeddings, image_latents\n self.check_inputs(image, height, width, callback_steps)\n\n # 2. Define call parameters\n if isinstance(image, PIL.Image.Image):\n batch_size = 1\n elif isinstance(image, list):\n batch_size = len(image)\n elif isinstance(image, torch.Tensor):\n batch_size = image.shape[0]\n else:\n assert image_latents is not None\n assert (\n clip_image_embeddings is not None or image_camera_embeddings is not None\n )\n batch_size = image_latents.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # 3. Encode input image\n if isinstance(image, PIL.Image.Image) or isinstance(image, list):\n pil_image = image\n elif isinstance(image, torch.Tensor):\n pil_image = [TF.to_pil_image(image[i]) for i in range(image.shape[0])]\n else:\n pil_image = None\n image_embeddings = self._encode_image(\n pil_image,\n elevation,\n azimuth,\n distance,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n clip_image_embeddings,\n image_camera_embeddings,\n )\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare latent variables\n # num_channels_latents = self.unet.config.in_channels\n num_channels_latents = 4 # FIXME: hard-coded\n latents = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n image_embeddings.dtype,\n device,\n generator,\n latents,\n )\n\n # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 7. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = self._get_latent_model_input(\n latents,\n image,\n num_images_per_prompt,\n do_classifier_free_guidance,\n image_latents,\n )\n latent_model_input = self.scheduler.scale_model_input(\n latent_model_input, t\n )\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=image_embeddings,\n cross_attention_kwargs=cross_attention_kwargs,\n ).sample\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (\n noise_pred_text - noise_pred_uncond\n )\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(\n noise_pred, t, latents, **extra_step_kwargs\n ).prev_sample\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or (\n (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0\n ):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n if not output_type == \"latent\":\n image = self.vae.decode(\n latents / self.vae.config.scaling_factor, return_dict=False\n )[0]\n image, has_nsfw_concept = self.run_safety_checker(\n image, device, image_embeddings.dtype\n )\n else:\n image = latents\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(\n image, output_type=output_type, do_denormalize=do_denormalize\n )\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(\n images=image, nsfw_content_detected=has_nsfw_concept\n )"
},
{
"identifier": "ToDTypeWrapper",
"path": "threestudio/models/networks.py",
"snippet": "class ToDTypeWrapper(nn.Module):\n def __init__(self, module: nn.Module, dtype: torch.dtype):\n super().__init__()\n self.module = module\n self.dtype = dtype\n\n def forward(self, x: Float[Tensor, \"...\"]) -> Float[Tensor, \"...\"]:\n return self.module(x).to(self.dtype)"
},
{
"identifier": "PromptProcessorOutput",
"path": "threestudio/models/prompt_processors/base.py",
"snippet": "class PromptProcessorOutput:\n text_embeddings: Float[Tensor, \"N Nf\"]\n uncond_text_embeddings: Float[Tensor, \"N Nf\"]\n text_embeddings_vd: Float[Tensor, \"Nv N Nf\"]\n uncond_text_embeddings_vd: Float[Tensor, \"Nv N Nf\"]\n directions: List[DirectionConfig]\n direction2idx: Dict[str, int]\n use_perp_neg: bool\n perp_neg_f_sb: Tuple[float, float, float]\n perp_neg_f_fsb: Tuple[float, float, float]\n perp_neg_f_fs: Tuple[float, float, float]\n perp_neg_f_sf: Tuple[float, float, float]\n\n # def __init__(self, text_embeddings, uncond_text_embeddings, \n # text_embeddings_vd, uncond_text_embeddings_vd, directions,\n # direction2idx, use_perp_neg, perp_neg_f_sb, perp_neg_f_fsb, perp_neg_f_fs, perp_neg_f_sf, num_gpus=1):\n \n # self.text_embeddings = text_embeddings\n # self.uncond_text_embeddings = uncond_text_embeddings\n # self.text_embeddings_vd = text_embeddings_vd\n # self.uncond_text_embeddings_vd = uncond_text_embeddings_vd\n # self.directions = directions\n # self.direction2idx = direction2idx\n # self.use_perp_neg = use_perp_neg\n # self.perp_neg_f_sb = perp_neg_f_sb\n # self.perp_neg_f_fsb = perp_neg_f_fsb\n # self.perp_neg_f_fs = perp_neg_f_fs\n # self.perp_neg_f_sf = perp_neg_f_sf\n\n\n def get_text_embeddings(\n self,\n elevation: Float[Tensor, \"B\"],\n azimuth: Float[Tensor, \"B\"],\n camera_distances: Float[Tensor, \"B\"],\n view_dependent_prompting: bool = True,\n ) -> Float[Tensor, \"BB N Nf\"]:\n batch_size = elevation.shape[0]\n\n if view_dependent_prompting:\n # Get direction\n direction_idx = torch.zeros_like(elevation, dtype=torch.long)\n for d in self.directions:\n direction_idx[\n d.condition(elevation, azimuth, camera_distances)\n ] = self.direction2idx[d.name]\n\n # Get text embeddings\n text_embeddings = self.text_embeddings_vd[direction_idx] # type: ignore\n uncond_text_embeddings = self.uncond_text_embeddings_vd[direction_idx] # type: ignore\n else:\n text_embeddings = self.text_embeddings.expand(batch_size, -1, -1) # type: ignore\n uncond_text_embeddings = self.uncond_text_embeddings.expand( # type: ignore\n batch_size, -1, -1\n )\n\n # IMPORTANT: we return (cond, uncond), which is in different order than other implementations!\n return torch.cat([text_embeddings, uncond_text_embeddings], dim=0)\n\n def get_text_embeddings_perp_neg(\n self,\n elevation: Float[Tensor, \"B\"],\n azimuth: Float[Tensor, \"B\"],\n camera_distances: Float[Tensor, \"B\"],\n view_dependent_prompting: bool = True,\n ) -> Tuple[Float[Tensor, \"BBBB N Nf\"], Float[Tensor, \"B 2\"]]:\n assert (\n view_dependent_prompting\n ), \"Perp-Neg only works with view-dependent prompting\"\n\n batch_size = elevation.shape[0]\n\n direction_idx = torch.zeros_like(elevation, dtype=torch.long)\n for d in self.directions:\n direction_idx[\n d.condition(elevation, azimuth, camera_distances)\n ] = self.direction2idx[d.name]\n # 0 - side view\n # 1 - front view\n # 2 - back view\n # 3 - overhead view\n\n pos_text_embeddings = []\n neg_text_embeddings = []\n neg_guidance_weights = []\n uncond_text_embeddings = []\n\n side_emb = self.text_embeddings_vd[0]\n front_emb = self.text_embeddings_vd[1]\n back_emb = self.text_embeddings_vd[2]\n overhead_emb = self.text_embeddings_vd[3]\n\n for idx, ele, azi, dis in zip(\n direction_idx, elevation, azimuth, camera_distances\n ):\n azi = shift_azimuth_deg(azi) # to (-180, 180)\n uncond_text_embeddings.append(\n self.uncond_text_embeddings_vd[idx]\n ) # should be \"\"\n if idx.item() == 3: # overhead view\n pos_text_embeddings.append(overhead_emb) # side view\n # dummy\n neg_text_embeddings += [\n self.uncond_text_embeddings_vd[idx],\n self.uncond_text_embeddings_vd[idx],\n ]\n neg_guidance_weights += [0.0, 0.0]\n else: # interpolating views\n if torch.abs(azi) < 90:\n # front-side interpolation\n # 0 - complete side, 1 - complete front\n r_inter = 1 - torch.abs(azi) / 90\n pos_text_embeddings.append(\n r_inter * front_emb + (1 - r_inter) * side_emb\n )\n neg_text_embeddings += [front_emb, side_emb]\n neg_guidance_weights += [\n -shifted_expotional_decay(*self.perp_neg_f_fs, r_inter),\n -shifted_expotional_decay(*self.perp_neg_f_sf, 1 - r_inter),\n ]\n else:\n # side-back interpolation\n # 0 - complete back, 1 - complete side\n r_inter = 2.0 - torch.abs(azi) / 90\n pos_text_embeddings.append(\n r_inter * side_emb + (1 - r_inter) * back_emb\n )\n neg_text_embeddings += [side_emb, front_emb]\n neg_guidance_weights += [\n -shifted_expotional_decay(*self.perp_neg_f_sb, r_inter),\n -shifted_expotional_decay(*self.perp_neg_f_fsb, r_inter),\n ]\n\n text_embeddings = torch.cat(\n [\n torch.stack(pos_text_embeddings, dim=0),\n torch.stack(uncond_text_embeddings, dim=0),\n torch.stack(neg_text_embeddings, dim=0),\n ],\n dim=0,\n )\n\n return text_embeddings, torch.as_tensor(\n neg_guidance_weights, device=elevation.device\n ).reshape(batch_size, 2)"
},
{
"identifier": "BaseModule",
"path": "threestudio/utils/base.py",
"snippet": "class BaseModule(nn.Module, Updateable):\n @dataclass\n class Config:\n weights: Optional[str] = None\n\n cfg: Config # add this to every subclass of BaseModule to enable static type checking\n\n def __init__(\n self, cfg: Optional[Union[dict, DictConfig]] = None, device=get_device(), *args, **kwargs\n ) -> None:\n super().__init__()\n self.cfg = parse_structured(self.Config, cfg)\n self.device = device\n self.configure(*args, device=device, **kwargs)\n if self.cfg.weights is not None:\n # format: path/to/weights:module_name\n weights_path, module_name = self.cfg.weights.split(\":\")\n state_dict, epoch, global_step = load_module_weights(\n weights_path, module_name=module_name, map_location=\"cpu\"\n )\n self.load_state_dict(state_dict)\n self.do_update_step(\n epoch, global_step, on_load_weights=True\n ) # restore states\n # dummy tensor to indicate model state\n self._dummy: Float[Tensor, \"...\"]\n self.register_buffer(\"_dummy\", torch.zeros(0).float(), persistent=False)\n\n def configure(self, *args, **kwargs) -> None:\n pass"
},
{
"identifier": "C",
"path": "threestudio/utils/misc.py",
"snippet": "def C(value: Any, epoch: int, global_step: int) -> float:\n if isinstance(value, int) or isinstance(value, float):\n pass\n else:\n value = config_to_primitive(value)\n if not isinstance(value, list):\n raise TypeError(\"Scalar specification only supports list, got\", type(value))\n if len(value) == 3:\n value = [0] + value\n assert len(value) == 4\n start_step, start_value, end_value, end_step = value\n if isinstance(end_step, int):\n current_step = global_step\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n elif isinstance(end_step, float):\n current_step = epoch\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n return value"
},
{
"identifier": "cleanup",
"path": "threestudio/utils/misc.py",
"snippet": "def cleanup():\n gc.collect()\n torch.cuda.empty_cache()\n tcnn.free_temporary_memory()"
},
{
"identifier": "enable_gradient",
"path": "threestudio/utils/misc.py",
"snippet": "def enable_gradient(model, enabled: bool = True) -> None:\n for param in model.parameters():\n param.requires_grad_(enabled)"
},
{
"identifier": "parse_version",
"path": "threestudio/utils/misc.py",
"snippet": "def parse_version(ver: str):\n return version.parse(ver)"
}
] | import os
import random
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms.functional as TF
import threestudio
from contextlib import contextmanager
from dataclasses import dataclass, field
from diffusers import (
AutoencoderKL,
DDPMScheduler,
DPMSolverSinglestepScheduler,
UNet2DConditionModel,
)
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.models.embeddings import TimestepEmbedding
from PIL import Image
from tqdm import tqdm
from extern.zero123 import Zero123Pipeline
from threestudio.models.networks import ToDTypeWrapper
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseModule
from threestudio.utils.misc import C, cleanup, enable_gradient, parse_version
from threestudio.utils.typing import * | 10,515 |
# need to make sure the pipeline file is in path
sys.path.append("extern/")
pipe_kwargs = {
"safety_checker": None,
"requires_safety_checker": False,
"variant": "fp16" if self.cfg.half_precision_weights else None,
"torch_dtype": self.weights_dtype,
}
pipe = Zero123Pipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path,
**pipe_kwargs,
).to(self.device)
self.prepare_pipe(pipe)
# phi network for VSD
# introduce two trainable modules:
# - self.camera_embedding
# - self.lora_layers
pipe_phi = None
# if the phi network shares the same unet with the pretrain network
# we need to pass additional cross attention kwargs to the unet
self.vsd_share_model = (
self.cfg.guidance_type == "vsd"
and self.cfg.vsd_phi_model_name_or_path is None
)
if self.cfg.guidance_type == "vsd":
if self.cfg.vsd_phi_model_name_or_path is None:
pipe_phi = pipe
else:
pipe_phi = Zero123Pipeline.from_pretrained(
self.cfg.vsd_phi_model_name_or_path,
**pipe_kwargs,
).to(self.device)
self.prepare_pipe(pipe_phi)
# set up camera embedding
if self.cfg.vsd_use_camera_condition:
if self.cfg.vsd_camera_condition_type in ["extrinsics", "mvp"]:
self.camera_embedding_dim = 16
elif self.cfg.vsd_camera_condition_type == "spherical":
self.camera_embedding_dim = 4
else:
raise ValueError("Invalid camera condition type!")
# FIXME: hard-coded output dim
self.camera_embedding = ToDTypeWrapper(
TimestepEmbedding(self.camera_embedding_dim, 1280),
self.weights_dtype,
).to(self.device)
pipe_phi.unet.class_embedding = self.camera_embedding
if self.cfg.vsd_use_lora:
# set up LoRA layers
lora_attn_procs = {}
for name in pipe_phi.unet.attn_processors.keys():
cross_attention_dim = (
None
if name.endswith("attn1.processor")
else pipe_phi.unet.config.cross_attention_dim
)
if name.startswith("mid_block"):
hidden_size = pipe_phi.unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(
reversed(pipe_phi.unet.config.block_out_channels)
)[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = pipe_phi.unet.config.block_out_channels[block_id]
lora_attn_procs[name] = LoRAAttnProcessor(
hidden_size=hidden_size, cross_attention_dim=cross_attention_dim
)
pipe_phi.unet.set_attn_processor(lora_attn_procs)
self.lora_layers = AttnProcsLayers(pipe_phi.unet.attn_processors).to(
self.device
)
self.lora_layers._load_state_dict_pre_hooks.clear()
self.lora_layers._state_dict_hooks.clear()
threestudio.info(f"Loaded Stable Diffusion!")
self.scheduler = DDPMScheduler.from_config(pipe.scheduler.config)
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
# q(z_t|x) = N(alpha_t x, sigma_t^2 I)
# in DDPM, alpha_t = sqrt(alphas_cumprod_t), sigma_t^2 = 1 - alphas_cumprod_t
self.alphas_cumprod: Float[Tensor, "T"] = self.scheduler.alphas_cumprod.to(
self.device
)
self.alphas: Float[Tensor, "T"] = self.alphas_cumprod**0.5
self.sigmas: Float[Tensor, "T"] = (1 - self.alphas_cumprod) ** 0.5
# log SNR
self.lambdas: Float[Tensor, "T"] = self.sigmas / self.alphas
self._non_trainable_modules = NonTrainableModules(
pipe=pipe,
pipe_phi=pipe_phi,
)
# self.clip_image_embeddings and self.image_latents
self.prepare_image_embeddings()
@property
def pipe(self) -> Zero123Pipeline:
return self._non_trainable_modules.pipe
@property
def pipe_phi(self) -> Zero123Pipeline:
if self._non_trainable_modules.pipe_phi is None:
raise RuntimeError("phi model is not available.")
return self._non_trainable_modules.pipe_phi
def prepare_pipe(self, pipe: Zero123Pipeline):
|
@threestudio.register("zero123-unified-guidance")
class Zero123UnifiedGuidance(BaseModule):
@dataclass
class Config(BaseModule.Config):
# guidance type, in ["sds", "vsd"]
guidance_type: str = "sds"
pretrained_model_name_or_path: str = "bennyguo/zero123-diffusers"
guidance_scale: float = 5.0
weighting_strategy: str = "dreamfusion"
min_step_percent: Any = 0.02
max_step_percent: Any = 0.98
grad_clip: Optional[Any] = None
return_rgb_1step_orig: bool = False
return_rgb_multistep_orig: bool = False
n_rgb_multistep_orig_steps: int = 4
cond_image_path: str = ""
cond_elevation_deg: float = 0.0
cond_azimuth_deg: float = 0.0
cond_camera_distance: float = 1.2
# efficiency-related configurations
half_precision_weights: bool = True
# VSD configurations, only used when guidance_type is "vsd"
vsd_phi_model_name_or_path: Optional[str] = None
vsd_guidance_scale_phi: float = 1.0
vsd_use_lora: bool = True
vsd_lora_cfg_training: bool = False
vsd_lora_n_timestamp_samples: int = 1
vsd_use_camera_condition: bool = True
# camera condition type, in ["extrinsics", "mvp", "spherical"]
vsd_camera_condition_type: Optional[str] = "extrinsics"
cfg: Config
def configure(self) -> None:
self.min_step: Optional[int] = None
self.max_step: Optional[int] = None
self.grad_clip_val: Optional[float] = None
@dataclass
class NonTrainableModules:
pipe: Zero123Pipeline
pipe_phi: Optional[Zero123Pipeline] = None
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
threestudio.info(f"Loading Zero123 ...")
# need to make sure the pipeline file is in path
sys.path.append("extern/")
pipe_kwargs = {
"safety_checker": None,
"requires_safety_checker": False,
"variant": "fp16" if self.cfg.half_precision_weights else None,
"torch_dtype": self.weights_dtype,
}
pipe = Zero123Pipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path,
**pipe_kwargs,
).to(self.device)
self.prepare_pipe(pipe)
# phi network for VSD
# introduce two trainable modules:
# - self.camera_embedding
# - self.lora_layers
pipe_phi = None
# if the phi network shares the same unet with the pretrain network
# we need to pass additional cross attention kwargs to the unet
self.vsd_share_model = (
self.cfg.guidance_type == "vsd"
and self.cfg.vsd_phi_model_name_or_path is None
)
if self.cfg.guidance_type == "vsd":
if self.cfg.vsd_phi_model_name_or_path is None:
pipe_phi = pipe
else:
pipe_phi = Zero123Pipeline.from_pretrained(
self.cfg.vsd_phi_model_name_or_path,
**pipe_kwargs,
).to(self.device)
self.prepare_pipe(pipe_phi)
# set up camera embedding
if self.cfg.vsd_use_camera_condition:
if self.cfg.vsd_camera_condition_type in ["extrinsics", "mvp"]:
self.camera_embedding_dim = 16
elif self.cfg.vsd_camera_condition_type == "spherical":
self.camera_embedding_dim = 4
else:
raise ValueError("Invalid camera condition type!")
# FIXME: hard-coded output dim
self.camera_embedding = ToDTypeWrapper(
TimestepEmbedding(self.camera_embedding_dim, 1280),
self.weights_dtype,
).to(self.device)
pipe_phi.unet.class_embedding = self.camera_embedding
if self.cfg.vsd_use_lora:
# set up LoRA layers
lora_attn_procs = {}
for name in pipe_phi.unet.attn_processors.keys():
cross_attention_dim = (
None
if name.endswith("attn1.processor")
else pipe_phi.unet.config.cross_attention_dim
)
if name.startswith("mid_block"):
hidden_size = pipe_phi.unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(
reversed(pipe_phi.unet.config.block_out_channels)
)[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = pipe_phi.unet.config.block_out_channels[block_id]
lora_attn_procs[name] = LoRAAttnProcessor(
hidden_size=hidden_size, cross_attention_dim=cross_attention_dim
)
pipe_phi.unet.set_attn_processor(lora_attn_procs)
self.lora_layers = AttnProcsLayers(pipe_phi.unet.attn_processors).to(
self.device
)
self.lora_layers._load_state_dict_pre_hooks.clear()
self.lora_layers._state_dict_hooks.clear()
threestudio.info(f"Loaded Stable Diffusion!")
self.scheduler = DDPMScheduler.from_config(pipe.scheduler.config)
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
# q(z_t|x) = N(alpha_t x, sigma_t^2 I)
# in DDPM, alpha_t = sqrt(alphas_cumprod_t), sigma_t^2 = 1 - alphas_cumprod_t
self.alphas_cumprod: Float[Tensor, "T"] = self.scheduler.alphas_cumprod.to(
self.device
)
self.alphas: Float[Tensor, "T"] = self.alphas_cumprod**0.5
self.sigmas: Float[Tensor, "T"] = (1 - self.alphas_cumprod) ** 0.5
# log SNR
self.lambdas: Float[Tensor, "T"] = self.sigmas / self.alphas
self._non_trainable_modules = NonTrainableModules(
pipe=pipe,
pipe_phi=pipe_phi,
)
# self.clip_image_embeddings and self.image_latents
self.prepare_image_embeddings()
@property
def pipe(self) -> Zero123Pipeline:
return self._non_trainable_modules.pipe
@property
def pipe_phi(self) -> Zero123Pipeline:
if self._non_trainable_modules.pipe_phi is None:
raise RuntimeError("phi model is not available.")
return self._non_trainable_modules.pipe_phi
def prepare_pipe(self, pipe: Zero123Pipeline): | cleanup() | 5 | 2023-11-27 23:39:49+00:00 | 12k |
DAMO-NLP-SG/VCD | experiments/llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "experiments/llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "build_attn_bias",
"path": "experiments/llava/model/language_model/mpt/attention.py",
"snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "MPTBlock",
"path": "experiments/llava/model/language_model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)"
},
{
"identifier": "SharedEmbedding",
"path": "experiments/llava/model/language_model/mpt/custom_embedding.py",
"snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "experiments/llava/model/language_model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}"
},
{
"identifier": "MPTConfig",
"path": "experiments/llava/model/language_model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')"
},
{
"identifier": "AutoTokenizerForMOD",
"path": "experiments/llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "experiments/llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "experiments/llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "experiments/llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')"
},
{
"identifier": "init_empty_weights",
"path": "experiments/llava/model/language_model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "experiments/llava/model/language_model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}"
},
{
"identifier": "generic_param_init_fn_",
"path": "experiments/llava/model/language_model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 7,371 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) | self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max) | 1 | 2023-11-26 12:34:31+00:00 | 12k |
CineMingle/CineMingle | Movie_Data_Capture.py | [
{
"identifier": "get_data_from_json",
"path": "scraper.py",
"snippet": "def get_data_from_json(\n file_number: str,\n open_cc: opencc.OpenCC,\n specified_source: str, specified_url: str) -> typing.Optional[dict]:\n \n # iterate through all services and fetch the data 从网站上查询片名解析JSON返回元数据\n # :param file_number: 影片名称\n # :param open_cc: 简繁转换器\n # :param specified_source: 指定的媒体数据源\n # :param specified_url: 指定的数据查询地址, 目前未使用\n # :return 给定影片名称的具体信息\n \n try:\n actor_mapping_data = etree.parse(str(Path.home() / '.local' / 'share' / 'mdc' / 'mapping_actor.xml'))\n info_mapping_data = etree.parse(str(Path.home() / '.local' / 'share' / 'mdc' / 'mapping_info.xml'))\n except:\n actor_mapping_data = etree.fromstring(\"<html></html>\", etree.HTMLParser())\n info_mapping_data = etree.fromstring(\"<html></html>\", etree.HTMLParser())\n\n conf = config.getInstance()\n # default fetch order list, from the beginning to the end\n sources = conf.sources()\n\n # TODO 准备参数\n # - 清理 ADC_function, webcrawler\n proxies: dict = None\n config_proxy = conf.proxy()\n if config_proxy.enable:\n proxies = config_proxy.proxies()\n\n # javdb website logic\n # javdb have suffix\n javdb_sites = conf.javdb_sites().split(',')\n for i in javdb_sites:\n javdb_sites[javdb_sites.index(i)] = \"javdb\" + i\n javdb_sites.append(\"javdb\")\n # 不加载过期的cookie,javdb登录界面显示为7天免登录,故假定cookie有效期为7天\n has_valid_cookie = False\n for cj in javdb_sites:\n javdb_site = cj\n cookie_json = javdb_site + '.json'\n cookies_dict, cookies_filepath = load_cookies(cookie_json)\n if isinstance(cookies_dict, dict) and isinstance(cookies_filepath, str):\n cdays = file_modification_days(cookies_filepath)\n if cdays < 7:\n javdb_cookies = cookies_dict\n has_valid_cookie = True\n break\n elif cdays != 9999:\n print(\n f'[!]Cookies file {cookies_filepath} was updated {cdays} days ago, it will not be used for HTTP requests.')\n if not has_valid_cookie:\n # get real random site from javdb_sites, because random is not really random when the seed value is known\n # 已经是没有这些随机数了\n # javdb_site = secrets.choice(javdb_sites)\n javdb_site = None\n javdb_cookies = None\n\n ca_cert = None\n if conf.cacert_file():\n ca_cert = conf.cacert_file()\n\n json_data = search(file_number, sources, proxies=proxies, verify=ca_cert,\n dbsite=javdb_site, dbcookies=javdb_cookies,\n morestoryline=conf.is_storyline(),\n specifiedSource=specified_source, specifiedUrl=specified_url,\n debug = conf.debug())\n # Return if data not found in all sources\n if not json_data:\n print('[-]Movie Number not found!')\n return None\n\n # 增加number严格判断,避免提交任何number,总是返回\"本橋実来 ADZ335\",这种返回number不一致的数据源故障\n # 目前选用number命名规则是javdb.com Domain Creation Date: 2013-06-19T18:34:27Z\n # 然而也可以跟进关注其它命名规则例如airav.wiki Domain Creation Date: 2019-08-28T07:18:42.0Z\n # 如果将来javdb.com命名规则下不同Studio出现同名碰撞导致无法区分,可考虑更换规则,更新相应的number分析和抓取代码。\n if str(json_data.get('number')).upper() != file_number.upper():\n try:\n if json_data.get('allow_number_change'):\n pass\n except:\n print('[-]Movie number has changed! [{}]->[{}]'.format(file_number, str(json_data.get('number'))))\n return None\n\n # ================================================网站规则添加结束================================================\n\n if json_data.get('title') == '':\n print('[-]Movie Number or Title not found!')\n return None\n\n title = json_data.get('title')\n actor_list = str(json_data.get('actor')).strip(\"[ ]\").replace(\"'\", '').split(',') # 字符串转列表\n actor_list = [actor.strip() for actor in actor_list] # 去除空白\n director = json_data.get('director')\n release = json_data.get('release')\n number = json_data.get('number')\n studio = json_data.get('studio')\n source = json_data.get('source')\n runtime = json_data.get('runtime')\n outline = json_data.get('outline')\n label = json_data.get('label')\n series = json_data.get('series')\n year = json_data.get('year')\n\n if json_data.get('cover_small'):\n cover_small = json_data.get('cover_small')\n else:\n cover_small = ''\n\n if json_data.get('trailer'):\n trailer = json_data.get('trailer')\n else:\n trailer = ''\n\n if json_data.get('extrafanart'):\n extrafanart = json_data.get('extrafanart')\n else:\n extrafanart = ''\n\n imagecut = json_data.get('imagecut')\n tag = str(json_data.get('tag')).strip(\"[ ]\").replace(\"'\", '').replace(\" \", '').split(',') # 字符串转列表 @\n while 'XXXX' in tag:\n tag.remove('XXXX')\n while 'xxx' in tag:\n tag.remove('xxx')\n if json_data['source'] =='pissplay': # pissplay actor为英文名,不用去除空格\n actor = str(actor_list).strip(\"[ ]\").replace(\"'\", '')\n else:\n actor = str(actor_list).strip(\"[ ]\").replace(\"'\", '').replace(\" \", '')\n\n # if imagecut == '3':\n # DownloadFileWithFilename()\n\n # ====================处理异常字符====================== #\\/:*?\"<>|\n actor = special_characters_replacement(actor)\n actor_list = [special_characters_replacement(a) for a in actor_list]\n title = special_characters_replacement(title)\n label = special_characters_replacement(label)\n outline = special_characters_replacement(outline)\n series = special_characters_replacement(series)\n studio = special_characters_replacement(studio)\n director = special_characters_replacement(director)\n tag = [special_characters_replacement(t) for t in tag]\n release = release.replace('/', '-')\n tmpArr = cover_small.split(',')\n if len(tmpArr) > 0:\n cover_small = tmpArr[0].strip('\\\"').strip('\\'')\n # ====================处理异常字符 END================== #\\/:*?\"<>|\n\n # 返回处理后的json_data\n json_data['title'] = title\n json_data['original_title'] = title\n json_data['actor'] = actor\n json_data['release'] = release\n json_data['cover_small'] = cover_small\n json_data['tag'] = tag\n json_data['year'] = year\n json_data['actor_list'] = actor_list\n json_data['trailer'] = trailer\n json_data['extrafanart'] = extrafanart\n json_data['label'] = label\n json_data['outline'] = outline\n json_data['series'] = series\n json_data['studio'] = studio\n json_data['director'] = director\n\n if conf.is_translate():\n translate_values = conf.translate_values().split(\",\")\n for translate_value in translate_values:\n if json_data[translate_value] == \"\":\n continue\n if translate_value == \"title\":\n title_dict = json.loads(\n (Path.home() / '.local' / 'share' / 'mdc' / 'c_number.json').read_text(encoding=\"utf-8\"))\n try:\n json_data[translate_value] = title_dict[number]\n continue\n except:\n pass\n if conf.get_translate_engine() == \"azure\":\n t = translate(\n json_data[translate_value],\n target_language=\"zh-Hans\",\n engine=conf.get_translate_engine(),\n key=conf.get_translate_key(),\n )\n else:\n if len(json_data[translate_value]):\n if type(json_data[translate_value]) == str:\n json_data[translate_value] = special_characters_replacement(json_data[translate_value])\n json_data[translate_value] = translate(json_data[translate_value])\n else:\n for i in range(len(json_data[translate_value])):\n json_data[translate_value][i] = special_characters_replacement(\n json_data[translate_value][i])\n list_in_str = \",\".join(json_data[translate_value])\n json_data[translate_value] = translate(list_in_str).split(',')\n\n if open_cc:\n cc_vars = conf.cc_convert_vars().split(\",\")\n ccm = conf.cc_convert_mode()\n\n def convert_list(mapping_data, language, vars):\n total = []\n for i in vars:\n if len(mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=f\",{i},\")) != 0:\n i = mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=f\",{i},\")[0]\n total.append(i)\n return total\n\n def convert(mapping_data, language, vars):\n if len(mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=vars)) != 0:\n return mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=vars)[0]\n else:\n raise IndexError('keyword not found')\n\n for cc in cc_vars:\n if json_data[cc] == \"\" or len(json_data[cc]) == 0:\n continue\n if cc == \"actor\":\n try:\n if ccm == 1:\n json_data['actor_list'] = convert_list(actor_mapping_data, \"zh_cn\", json_data['actor_list'])\n json_data['actor'] = convert(actor_mapping_data, \"zh_cn\", json_data['actor'])\n elif ccm == 2:\n json_data['actor_list'] = convert_list(actor_mapping_data, \"zh_tw\", json_data['actor_list'])\n json_data['actor'] = convert(actor_mapping_data, \"zh_tw\", json_data['actor'])\n elif ccm == 3:\n json_data['actor_list'] = convert_list(actor_mapping_data, \"jp\", json_data['actor_list'])\n json_data['actor'] = convert(actor_mapping_data, \"jp\", json_data['actor'])\n except:\n json_data['actor_list'] = [open_cc.convert(aa) for aa in json_data['actor_list']]\n json_data['actor'] = open_cc.convert(json_data['actor'])\n elif cc == \"tag\":\n try:\n if ccm == 1:\n json_data[cc] = convert_list(info_mapping_data, \"zh_cn\", json_data[cc])\n json_data[cc] = delete_all_elements_in_list(\"删除\", json_data[cc])\n elif ccm == 2:\n json_data[cc] = convert_list(info_mapping_data, \"zh_tw\", json_data[cc])\n json_data[cc] = delete_all_elements_in_list(\"删除\", json_data[cc])\n elif ccm == 3:\n json_data[cc] = convert_list(info_mapping_data, \"jp\", json_data[cc])\n json_data[cc] = delete_all_elements_in_list(\"删除\", json_data[cc])\n except:\n json_data[cc] = [open_cc.convert(t) for t in json_data[cc]]\n else:\n try:\n if ccm == 1:\n json_data[cc] = convert(info_mapping_data, \"zh_cn\", json_data[cc])\n json_data[cc] = delete_all_elements_in_str(\"删除\", json_data[cc])\n elif ccm == 2:\n json_data[cc] = convert(info_mapping_data, \"zh_tw\", json_data[cc])\n json_data[cc] = delete_all_elements_in_str(\"删除\", json_data[cc])\n elif ccm == 3:\n json_data[cc] = convert(info_mapping_data, \"jp\", json_data[cc])\n json_data[cc] = delete_all_elements_in_str(\"删除\", json_data[cc])\n except IndexError:\n json_data[cc] = open_cc.convert(json_data[cc])\n except:\n pass\n\n naming_rule = \"\"\n original_naming_rule = \"\"\n for i in conf.naming_rule().split(\"+\"):\n if i not in json_data:\n naming_rule += i.strip(\"'\").strip('\"')\n original_naming_rule += i.strip(\"'\").strip('\"')\n else:\n item = json_data.get(i)\n naming_rule += item if type(item) is not list else \"&\".join(item)\n # PATCH:处理[title]存在翻译的情况,后续NFO文件的original_name只会直接沿用naming_rule,这导致original_name非原始名\n # 理应在翻译处处理 naming_rule和original_naming_rule\n if i == 'title':\n item = json_data.get('original_title')\n original_naming_rule += item if type(item) is not list else \"&\".join(item)\n\n json_data['naming_rule'] = naming_rule\n json_data['original_naming_rule'] = original_naming_rule\n return json_data"
},
{
"identifier": "file_modification_days",
"path": "ADC_function.py",
"snippet": "def file_modification_days(filename: str) -> int:\n \"\"\"\n 文件修改时间距此时的天数\n \"\"\"\n mfile = Path(filename)\n if not mfile.is_file():\n return 9999\n mtime = int(mfile.stat().st_mtime)\n now = int(time.time())\n days = int((now - mtime) / (24 * 60 * 60))\n if days < 0:\n return 9999\n return days"
},
{
"identifier": "get_html",
"path": "ADC_function.py",
"snippet": "def get_html(url, cookies: dict = None, ua: str = None, return_type: str = None, encoding: str = None, json_headers=None):\n \"\"\"\n 网页请求核心函数\n \"\"\"\n verify = config.getInstance().cacert_file()\n config_proxy = config.getInstance().proxy()\n errors = \"\"\n\n headers = {\"User-Agent\": ua or G_USER_AGENT} # noqa\n if json_headers is not None:\n headers.update(json_headers)\n\n for i in range(config_proxy.retry):\n try:\n if config_proxy.enable:\n proxies = config_proxy.proxies()\n result = requests.get(str(url), headers=headers, timeout=config_proxy.timeout, proxies=proxies,\n verify=verify,\n cookies=cookies)\n else:\n result = requests.get(str(url), headers=headers, timeout=config_proxy.timeout, cookies=cookies)\n\n if return_type == \"object\":\n return result\n elif return_type == \"content\":\n return result.content\n else:\n result.encoding = encoding or result.apparent_encoding\n return result.text\n except Exception as e:\n print(\"[-]Connect retry {}/{}\".format(i + 1, config_proxy.retry))\n errors = str(e)\n if \"getaddrinfo failed\" in errors:\n print(\"[-]Connect Failed! Please Check your proxy config\")\n debug = config.getInstance().debug()\n if debug:\n print(\"[-]\" + errors)\n else:\n print(\"[-]\" + errors)\n print('[-]Connect Failed! Please check your Proxy or Network!')\n raise Exception('Connect Failed')"
},
{
"identifier": "parallel_download_files",
"path": "ADC_function.py",
"snippet": "def parallel_download_files(dn_list: typing.Iterable[typing.Sequence], parallel: int = 0, json_headers=None):\n \"\"\"\n download files in parallel 多线程下载文件\n\n 用法示例: 2线程同时下载两个不同文件,并保存到不同路径,路径目录可未创建,但需要具备对目标目录和文件的写权限\n parallel_download_files([\n ('https://site1/img/p1.jpg', 'C:/temp/img/p1.jpg'),\n ('https://site2/cover/n1.xml', 'C:/tmp/cover/n1.xml')\n ])\n\n :dn_list: 可以是 tuple或者list: ((url1, save_fullpath1),(url2, save_fullpath2),) fullpath可以是str或Path\n :parallel: 并行下载的线程池线程数,为0则由函数自己决定\n \"\"\"\n mp_args = []\n for url, fullpath in dn_list:\n if url and isinstance(url, str) and url.startswith('http') \\\n and fullpath and isinstance(fullpath, (str, Path)) and len(str(fullpath)):\n fullpath = Path(fullpath)\n fullpath.parent.mkdir(parents=True, exist_ok=True)\n mp_args.append((url, fullpath, json_headers))\n if not len(mp_args):\n return []\n if not isinstance(parallel, int) or parallel not in range(1, 200):\n parallel = min(5, len(mp_args))\n with ThreadPoolExecutor(parallel) as pool:\n results = list(pool.map(download_one_file, mp_args))\n return results"
},
{
"identifier": "get_number",
"path": "number_parser.py",
"snippet": "def get_number(debug: bool, file_path: str) -> str:\n \"\"\"\n 从文件路径中提取番号 from number_parser import get_number\n >>> get_number(False, \"/Users/Guest/AV_Data_Capture/snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"/Users/Guest/AV_Data_Capture/snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \"/Users/Guest/AV_Data_Capture/[脸肿字幕组][PoRO]牝教師4~穢された教壇~ 「生意気ドジっ娘女教師・美結~高飛車ハメ堕ち2濁金」[720p][x264_aac].mp4\")\n '牝教師4~穢された教壇~ 「生意気ドジっ娘女教師・美結~高飛車ハメ堕ち2濁金」'\n >>> get_number(False, \"C:¥Users¥Guest¥snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"C:¥Users¥Guest¥snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \"./snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"./snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \".¥snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \".¥snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \"snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"snis-829-C.mp4\")\n 'snis-829'\n \"\"\"\n filepath = os.path.basename(file_path)\n # debug True 和 False 两块代码块合并,原因是此模块及函数只涉及字符串计算,没有IO操作,debug on时输出导致异常信息即可\n try:\n file_number = get_number_by_dict(filepath)\n if file_number:\n return file_number\n elif '字幕组' in filepath or 'SUB' in filepath.upper() or re.match(r'[\\u30a0-\\u30ff]+', filepath):\n filepath = G_spat.sub(\"\", filepath)\n filepath = re.sub(\"\\[.*?\\]\",\"\",filepath)\n filepath = filepath.replace(\".chs\", \"\").replace(\".cht\", \"\")\n file_number = str(re.findall(r'(.+?)\\.', filepath)).strip(\" [']\")\n return file_number\n elif '-' in filepath or '_' in filepath: # 普通提取番号 主要处理包含减号-和_的番号\n filepath = G_spat.sub(\"\", filepath)\n filename = str(re.sub(\"\\[\\d{4}-\\d{1,2}-\\d{1,2}\\] - \", \"\", filepath)) # 去除文件名中时间\n lower_check = filename.lower()\n if 'fc2' in lower_check:\n filename = lower_check.replace('--', '-').replace('_', '-').upper()\n filename = re.sub(\"[-_]cd\\d{1,2}\", \"\", filename, flags=re.IGNORECASE)\n if not re.search(\"-|_\", filename): # 去掉-CD1之后再无-的情况,例如n1012-CD1.wmv\n return str(re.search(r'\\w+', filename[:filename.find('.')], re.A).group())\n file_number = os.path.splitext(filename)\n print(file_number)\n filename = re.search(r'[\\w\\-_]+', filename, re.A)\n if filename:\n file_number = str(filename.group())\n else:\n file_number = file_number[0]\n file_number = re.sub(\"(-|_)c$\", \"\", file_number, flags=re.IGNORECASE)\n file_number = re.sub(\"(-|_)uc$\", \"\", file_number, flags=re.IGNORECASE)\n file_number = re.sub(\"(-|_)u$\", \"\", file_number, flags=re.IGNORECASE)\n if re.search(\"\\d+ch$\", file_number, flags=re.I):\n file_number = file_number[:-2]\n return file_number.upper()\n else: # 提取不含减号-的番号,FANZA CID\n # 欧美番号匹配规则\n oumei = re.search(r'[a-zA-Z]+\\.\\d{2}\\.\\d{2}\\.\\d{2}', filepath)\n if oumei:\n return oumei.group()\n try:\n return str(\n re.findall(r'(.+?)\\.',\n str(re.search('([^<>/\\\\\\\\|:\"\"\\\\*\\\\?]+)\\\\.\\\\w+$', filepath).group()))).strip(\n \"['']\").replace('_', '-')\n except:\n return str(re.search(r'(.+?)\\.', filepath)[0])\n except Exception as e:\n if debug:\n print(f'[-]Number Parser exception: {e} [{file_path}]')\n return None"
},
{
"identifier": "core_main",
"path": "core.py",
"snippet": "def core_main(movie_path, number_th, oCC, specified_source=None, specified_url=None):\n conf = config.getInstance()\n # =======================================================================初始化所需变量\n multi_part = False\n part = ''\n leak_word = ''\n c_word = ''\n cn_sub = False\n liuchu = False\n hack = False\n hack_word = ''\n _4k = False\n\n # 下面被注释的变量不需要\n # rootpath = os.getcwd\n number = number_th\n json_data = get_data_from_json(number, oCC, specified_source, specified_url) # 定义番号\n\n # Return if blank dict returned (data not found)\n if not json_data:\n moveFailedFolder(movie_path)\n return\n\n if json_data[\"number\"] != number:\n # fix issue #119\n # the root cause is we normalize the search id\n # print_files() will use the normalized id from website,\n # but paste_file_to_folder() still use the input raw search id\n # so the solution is: use the normalized search id\n number = json_data[\"number\"]\n imagecut = json_data.get('imagecut')\n tag = json_data.get('tag')\n # =======================================================================判断-C,-CD后缀\n if re.search('[-_]CD\\d+', movie_path, re.IGNORECASE):\n multi_part = True\n part = re.findall('[-_]CD\\d+', movie_path, re.IGNORECASE)[0].upper()\n if re.search(r'[-_]C(\\.\\w+$|-\\w+)|\\d+ch(\\.\\w+$|-\\w+)', movie_path,\n re.I) or '中文' in movie_path or '字幕' in movie_path:\n cn_sub = True\n c_word = '-C' # 中文字幕影片后缀\n\n # 判断是否无码\n unce = json_data.get('无码')\n uncensored = int(unce) if isinstance(unce, bool) else int(is_uncensored(number))\n\n if '流出' in movie_path or 'uncensored' in movie_path.lower():\n liuchu = '流出'\n leak = True\n leak_word = '-无码流出' # 流出影片后缀\n else:\n leak = False\n\n if 'hack'.upper() in str(movie_path).upper() or '破解' in movie_path:\n hack = True\n hack_word = \"-hack\"\n\n if '4k'.upper() in str(movie_path).upper() or '4k' in movie_path:\n _4k = True\n\n # 判断是否4k\n if '4K' in tag:\n tag.remove('4K') # 从tag中移除'4K'\n\n # 判断是否为无码破解\n if '无码破解' in tag:\n tag.remove('无码破解') # 从tag中移除'无码破解'\n\n # try:\n # props = get_video_properties(movie_path) # 判断是否为4K视频\n # if props['width'] >= 4096 or props['height'] >= 2160:\n # _4k = True\n # except:\n # pass\n\n # 调试模式检测\n if conf.debug():\n debug_print(json_data)\n\n # 创建文件夹\n # path = create_folder(rootpath + '/' + conf.success_folder(), json_data.get('location_rule'), json_data)\n\n cover = json_data.get('cover')\n ext = image_ext(cover)\n\n fanart_path = f\"fanart{ext}\"\n poster_path = f\"poster{ext}\"\n thumb_path = f\"thumb{ext}\"\n if config.getInstance().image_naming_with_number():\n fanart_path = f\"{number}{leak_word}{c_word}{hack_word}-fanart{ext}\"\n poster_path = f\"{number}{leak_word}{c_word}{hack_word}-poster{ext}\"\n thumb_path = f\"{number}{leak_word}{c_word}{hack_word}-thumb{ext}\"\n\n # main_mode\n # 1: 刮削模式 / Scraping mode\n # 2: 整理模式 / Organizing mode\n # 3:不改变路径刮削\n if conf.main_mode() == 1:\n # 创建文件夹\n path = create_folder(json_data)\n if multi_part == 1:\n number += part # 这时number会被附加上CD1后缀\n\n # 检查小封面, 如果image cut为3,则下载小封面\n if imagecut == 3:\n if 'headers' in json_data:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path, json_data)\n else:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path)\n\n # creatFolder会返回番号路径\n if 'headers' in json_data:\n image_download(cover, fanart_path, thumb_path, path, movie_path, json_data)\n else:\n image_download(cover, fanart_path, thumb_path, path, movie_path)\n\n if not multi_part or part.lower() == '-cd1':\n try:\n # 下载预告片\n if conf.is_trailer() and json_data.get('trailer'):\n trailer_download(json_data.get('trailer'), leak_word, c_word, hack_word, number, path, movie_path)\n\n # 下载剧照 data, path, filepath\n if conf.is_extrafanart() and json_data.get('extrafanart'):\n if 'headers' in json_data:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path, json_data)\n else:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path)\n\n # 下载演员头像 KODI .actors 目录位置\n if conf.download_actor_photo_for_kodi():\n actor_photo_download(json_data.get('actor_photo'), path, number)\n except:\n pass\n\n # 裁剪图\n cutImage(imagecut, path, thumb_path, poster_path, bool(conf.face_uncensored_only() and not uncensored))\n\n # 兼容Jellyfin封面图文件名规则\n if multi_part and conf.jellyfin_multi_part_fanart():\n linkImage(path, number_th, part, leak_word, c_word, hack_word, ext)\n\n # 移动电影\n paste_file_to_folder(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n\n # Move subtitles\n if(conf.check_subtitles()):\n move_status = move_subtitles(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n if move_status:\n cn_sub = True\n # 添加水印\n if conf.is_watermark():\n add_mark(os.path.join(path, poster_path), os.path.join(path, thumb_path), cn_sub, leak, uncensored,\n hack, _4k)\n\n # 最后输出.nfo元数据文件,以完成.nfo文件创建作为任务成功标志\n print_files(path, leak_word, c_word, json_data.get('naming_rule'), part, cn_sub, json_data, movie_path, tag,\n json_data.get('actor_list'), liuchu, uncensored, hack, hack_word\n , _4k, fanart_path, poster_path, thumb_path)\n\n elif conf.main_mode() == 2:\n # 创建文件夹\n path = create_folder(json_data)\n # 移动文件\n paste_file_to_folder_mode2(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n\n # Move subtitles\n if(conf.check_subtitles()):\n move_subtitles(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n\n elif conf.main_mode() == 3:\n path = str(Path(movie_path).parent)\n if multi_part == 1:\n number += part # 这时number会被附加上CD1后缀\n\n # 检查小封面, 如果image cut为3,则下载小封面\n if imagecut == 3:\n if 'headers' in json_data:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path, json_data)\n else:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path)\n\n # creatFolder会返回番号路径\n if 'headers' in json_data:\n image_download(cover, fanart_path, thumb_path, path, movie_path, json_data)\n else:\n image_download(cover, fanart_path, thumb_path, path, movie_path)\n\n if not multi_part or part.lower() == '-cd1':\n try:\n # 下载预告片\n if conf.is_trailer() and json_data.get('trailer'):\n trailer_download(json_data.get('trailer'), leak_word, c_word, hack_word, number, path, movie_path)\n\n # 下载剧照 data, path, filepath\n if conf.is_extrafanart() and json_data.get('extrafanart'):\n if 'headers' in json_data:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path, json_data)\n else:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path)\n\n # 下载演员头像 KODI .actors 目录位置\n if conf.download_actor_photo_for_kodi():\n actor_photo_download(json_data.get('actor_photo'), path, number)\n except:\n pass\n\n # 裁剪图\n cutImage(imagecut, path, fanart_path, poster_path, bool(conf.face_uncensored_only() and not uncensored))\n\n # 添加水印\n if conf.is_watermark():\n add_mark(os.path.join(path, poster_path), os.path.join(path, fanart_path), cn_sub, leak, uncensored, hack,\n _4k)\n\n # 兼容Jellyfin封面图文件名规则\n if multi_part and conf.jellyfin_multi_part_fanart():\n linkImage(path, number_th, part, leak_word, c_word, hack_word, ext)\n\n # 最后输出.nfo元数据文件,以完成.nfo文件创建作为任务成功标志\n print_files(path, leak_word, c_word, json_data.get('naming_rule'), part, cn_sub, json_data, movie_path,\n tag, json_data.get('actor_list'), liuchu, uncensored, hack, hack_word, _4k, fanart_path, poster_path,\n thumb_path)"
},
{
"identifier": "core_main_no_net_op",
"path": "core.py",
"snippet": "def core_main_no_net_op(movie_path, number):\n conf = config.getInstance()\n part = ''\n leak_word = ''\n leak = False\n c_word = ''\n cn_sub = False\n hack = False\n hack_word = ''\n _4k = False\n imagecut = 1\n multi = False\n part = ''\n path = str(Path(movie_path).parent)\n\n if re.search('[-_]CD\\d+', movie_path, re.IGNORECASE):\n part = re.findall('[-_]CD\\d+', movie_path, re.IGNORECASE)[0].upper()\n multi = True\n if re.search(r'[-_]C(\\.\\w+$|-\\w+)|\\d+ch(\\.\\w+$|-\\w+)', movie_path,\n re.I) or '中文' in movie_path or '字幕' in movie_path or \".chs\" in movie_path or '.cht' in movie_path:\n cn_sub = True\n c_word = '-C' # 中文字幕影片后缀\n uncensored = True if is_uncensored(number) else 0\n if '流出' in movie_path or 'uncensored' in movie_path.lower():\n leak_word = '-无码流出' # 无码流出影片后缀\n leak = True\n\n if 'hack'.upper() in str(movie_path).upper() or '破解' in movie_path:\n hack = True\n hack_word = \"-hack\"\n\n # try:\n\n # props = get_video_properties(movie_path) # 判断是否为4K视频\n # if props['width'] >= 4096 or props['height'] >= 2160:\n # _4k = True\n # except:\n # pass\n prestr = f\"{number}{leak_word}{c_word}{hack_word}\"\n\n full_nfo = Path(path) / f\"{prestr}{part}.nfo\"\n if full_nfo.is_file():\n if full_nfo.read_text(encoding='utf-8').find(r'<tag>无码</tag>') >= 0:\n uncensored = True\n try:\n nfo_xml = etree.parse(full_nfo)\n nfo_fanart_path = nfo_xml.xpath('//fanart/text()')[0]\n ext = Path(nfo_fanart_path).suffix\n except:\n return\n else:\n return\n fanart_path = f\"fanart{ext}\"\n poster_path = f\"poster{ext}\"\n thumb_path = f\"thumb{ext}\"\n if config.getInstance().image_naming_with_number():\n fanart_path = f\"{prestr}-fanart{ext}\"\n poster_path = f\"{prestr}-poster{ext}\"\n thumb_path = f\"{prestr}-thumb{ext}\"\n full_fanart_path = os.path.join(path, fanart_path)\n full_poster_path = os.path.join(path, poster_path)\n full_thumb_path = os.path.join(path, thumb_path)\n\n if not all(os.path.isfile(f) for f in (full_fanart_path, full_thumb_path)):\n return\n\n cutImage(imagecut, path, fanart_path, poster_path, bool(conf.face_uncensored_only() and not uncensored))\n if conf.is_watermark():\n add_mark(full_poster_path, full_thumb_path, cn_sub, leak, uncensored, hack, _4k)\n\n if multi and conf.jellyfin_multi_part_fanart():\n linkImage(path, number, part, leak_word, c_word, hack_word, ext)"
},
{
"identifier": "moveFailedFolder",
"path": "core.py",
"snippet": "def moveFailedFolder(filepath):\n conf = config.getInstance()\n failed_folder = conf.failed_folder()\n link_mode = conf.link_mode()\n # 模式3或软连接,改为维护一个失败列表,启动扫描时加载用于排除该路径,以免反复处理\n # 原先的创建软连接到失败目录,并不直观,不方便找到失败文件位置,不如直接记录该文件路径\n if conf.main_mode() == 3 or link_mode:\n ftxt = os.path.abspath(os.path.join(failed_folder, 'failed_list.txt'))\n print(\"[-]Add to Failed List file, see '%s'\" % ftxt)\n with open(ftxt, 'a', encoding='utf-8') as flt:\n flt.write(f'{filepath}\\n')\n elif conf.failed_move() and not link_mode:\n failed_name = os.path.join(failed_folder, os.path.basename(filepath))\n mtxt = os.path.abspath(os.path.join(failed_folder, 'where_was_i_before_being_moved.txt'))\n print(\"'[-]Move to Failed output folder, see '%s'\" % mtxt)\n with open(mtxt, 'a', encoding='utf-8') as wwibbmt:\n tmstr = datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n wwibbmt.write(f'{tmstr} FROM[{filepath}]TO[{failed_name}]\\n')\n try:\n if os.path.exists(failed_name):\n print('[-]File Exists while moving to FailedFolder')\n return\n shutil.move(filepath, failed_name)\n except:\n print('[-]File Moving to FailedFolder unsuccessful!')"
},
{
"identifier": "debug_print",
"path": "core.py",
"snippet": "def debug_print(data: json):\n try:\n print(\"[+] ------- DEBUG INFO -------\")\n for i, v in data.items():\n if i == 'outline':\n print('[+] -', \"%-19s\" % i, ':', len(v), 'characters')\n continue\n if i == 'actor_photo' or i == 'year':\n continue\n if i == 'extrafanart':\n print('[+] -', \"%-19s\" % i, ':', len(v), 'links')\n continue\n print(f'[+] - {i:<{cn_space(i, 19)}} : {v}')\n\n print(\"[+] ------- DEBUG INFO -------\")\n except:\n pass"
}
] | import argparse
import json
import os
import random
import re
import sys
import time
import shutil
import typing
import urllib3
import signal
import platform
import config
from datetime import datetime, timedelta
from lxml import etree
from pathlib import Path
from opencc import OpenCC
from scraper import get_data_from_json
from ADC_function import file_modification_days, get_html, parallel_download_files
from number_parser import get_number
from core import core_main, core_main_no_net_op, moveFailedFolder, debug_print | 9,816 |
def check_update(local_version):
"""
Check for updates by comparing the local version of the application with the latest version available on GitHub.
It fetches the latest release information from GitHub and compares the version numbers.
If a new version is available, it prints out the update information.
:param local_version: The current local version of the application.
"""
|
def check_update(local_version):
"""
Check for updates by comparing the local version of the application with the latest version available on GitHub.
It fetches the latest release information from GitHub and compares the version numbers.
If a new version is available, it prints out the update information.
:param local_version: The current local version of the application.
""" | htmlcode = get_html("https://api.github.com/repos/CineMingle/CineMingle/releases/latest") | 2 | 2023-11-25 03:16:13+00:00 | 12k |
crystallee-ai/controlGIF | animatediff/models/unet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n \n self.has_cross_attention = True\n self.attn_num_attention_heads = num_attention_heads\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n \n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n \n attentions.append(\n Transformer3DModel(\n self.attn_num_attention_heads,\n out_channels // self.attn_num_attention_heads,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n # use_linear_projection=use_linear_projection,\n use_linear_projection=False,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n \n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n )\n )\n \n self.resnets = nn.ModuleList(resnets)\n self.attentions = nn.ModuleList(attentions)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n temb=None,\n encoder_hidden_states=None,\n attention_mask=None,\n num_frames=1,\n ):\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for resnet, attn, motion_module in zip(\n self.resnets, self.attentions, self.motion_modules\n ):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n return_dict=False,\n )[0]\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n \n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_attention_heads = num_attention_heads\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n \n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n attentions.append(\n Transformer3DModel(\n self.attn_num_attention_heads,\n out_channels // self.attn_num_attention_heads,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n # use_linear_projection=use_linear_projection,\n use_linear_projection=False,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n self.resnets = nn.ModuleList(resnets)\n self.attentions = nn.ModuleList(attentions)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n ):\n # TODO(Patrick, William) - attention mask is not used\n for resnet, attn, motion_module in zip(\n self.resnets, self.attentions, self.motion_modules\n ):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n return_dict=False,\n )[0]\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n \n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n use_inflated_groupnorm=False,\n use_motion_module=True,\n motion_module_type=None, \n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n \n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None):\n output_states = ()\n\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n hidden_states = resnet(hidden_states, temb)\n # hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=True,\n upcast_attention=False,\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n )\n ]\n \n attentions = []\n motion_modules = []\n\n for _ in range(num_layers):\n attentions.append(\n Transformer3DModel(\n in_channels // num_attention_heads,\n num_attention_heads,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n # use_linear_projection=use_linear_projection,\n use_linear_projection=False,\n upcast_attention=upcast_attention,\n \n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=in_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n \n use_inflated_groupnorm=use_inflated_groupnorm,\n \n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n def forward(\n self,\n hidden_states,\n temb=None,\n encoder_hidden_states=None,\n attention_mask=None,\n ):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n return_dict=False,\n )[0]\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n \n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n \n use_inflated_groupnorm=use_inflated_groupnorm,\n\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None):\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n hidden_states = resnet(hidden_states, temb)\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n \n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "animatediff/models/unet_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n num_attention_heads,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=True,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n):\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n \n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n \n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n \n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "animatediff/models/unet_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n num_attention_heads,\n use_motion_module,\n motion_module_type,\n motion_module_kwargs,\n resnet_groups=None,\n cross_attention_dim=None,\n unet_use_cross_frame_attention=False,\n unet_use_temporal_attention=False,\n dual_cross_attention=False,\n use_linear_projection=True,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n \n use_inflated_groupnorm=False,\n\n):\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n \n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n \n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
},
{
"identifier": "InflatedConv3d",
"path": "animatediff/models/resnet.py",
"snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n \n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f = video_length)\n \n return x "
},
{
"identifier": "InflatedGroupNorm",
"path": "animatediff/models/resnet.py",
"snippet": "class InflatedGroupNorm(nn.GroupNorm):\n def froward(self, x):\n video_length = x.shape[2]\n \n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n \n return x"
}
] | import sys
import os
import json
import torch
import torch.nn as nn
import torch.utils.checkpoint
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.loaders import UNet2DConditionLoadersMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.attention_processor import (
ADDED_KV_ATTENTION_PROCESSORS,
CROSS_ATTENTION_PROCESSORS,
AttentionProcessor,
AttnAddedKVProcessor,
AttnProcessor,
)
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from diffusers.models.modeling_utils import ModelMixin
from animatediff.models.unet_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d, InflatedGroupNorm
from diffusers.utils import WEIGHTS_NAME | 7,713 | in_channels: int = 4,
out_channels: int = 4,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
#-----
mid_block_type: str = "UnetMidBlock3DCrossAttn",
#-----
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
# cross_attention_dim: int = 1024,
cross_attention_dim: int = 1280,
# attention_head_dim: Union[int, Tuple[int]] = 64,
attention_head_dim: Union[int, Tuple[int]] = 8,
num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
use_inflated_groupnorm=False,
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
# time_embed_dim = block_out_channels[0] * 4
if num_attention_heads is not None:
raise NotImplementedError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
)
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
num_attention_heads = num_attention_heads or attention_head_dim
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_out_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
# self.conv_in = nn.Conv2d(
# in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
# )
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], True, 0)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
# self.transformer_in = TransformerTemporalModel(
# num_attention_heads=8,
# attention_head_dim=attention_head_dim,
# in_channels=block_out_channels[0],
# num_layers=1,
# )
# class embedding
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(num_attention_heads, int):
num_attention_heads = (num_attention_heads,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
| sys.path.append("/root/autodl-tmp/code/animatediff/modelshigh")
# from diffusers.models.transformer_temporal import TransformerTemporalModel
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
"""
The output of [`UNet3DConditionModel`].
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
_supports_gradient_checkpointing = False
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
#-----
mid_block_type: str = "UnetMidBlock3DCrossAttn",
#-----
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
# cross_attention_dim: int = 1024,
cross_attention_dim: int = 1280,
# attention_head_dim: Union[int, Tuple[int]] = 64,
attention_head_dim: Union[int, Tuple[int]] = 8,
num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
use_inflated_groupnorm=False,
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
# time_embed_dim = block_out_channels[0] * 4
if num_attention_heads is not None:
raise NotImplementedError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
)
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
num_attention_heads = num_attention_heads or attention_head_dim
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_out_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
# self.conv_in = nn.Conv2d(
# in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
# )
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], True, 0)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
# self.transformer_in = TransformerTemporalModel(
# num_attention_heads=8,
# attention_head_dim=attention_head_dim,
# in_channels=block_out_channels[0],
# num_layers=1,
# )
# class embedding
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(num_attention_heads, int):
num_attention_heads = (num_attention_heads,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
| down_block = get_down_block( | 5 | 2023-11-25 07:43:32+00:00 | 12k |
amazon-science/instruct-video-to-video | modules/video_unet_temporal/unet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "modules/video_unet_temporal/unet_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, video_start_index=0):\n output_states = ()\n\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states, attention_mask, None, video_start_index)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states, video_start_index=video_start_index) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "modules/video_unet_temporal/unet_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n video_start_index=0,\n ):\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states, attention_mask, None, video_start_index)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states, video_start_index=video_start_index) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "modules/video_unet_temporal/unet_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, video_start_index=0):\n output_states = ()\n\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, None, None, None, video_start_index)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = motion_module(hidden_states, temb, video_start_index=video_start_index) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "modules/video_unet_temporal/unet_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n attentions = []\n motion_modules = []\n\n for _ in range(num_layers):\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n in_channels // attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=in_channels,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, video_start_index=0):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states, video_start_index=video_start_index) if motion_module is not None else hidden_states\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "modules/video_unet_temporal/unet_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, video_start_index=0):\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, None, None, None, video_start_index)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = motion_module(hidden_states, temb, video_start_index=video_start_index) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "modules/video_unet_temporal/unet_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n):\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "modules/video_unet_temporal/unet_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n):\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
},
{
"identifier": "InflatedConv3d",
"path": "modules/video_unet_temporal/resnet.py",
"snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x"
}
] | from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from .unet_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d
import os
import json
import torch
import torch.nn as nn
import torch.utils.checkpoint | 8,589 |
# count how many layers upsample the videos
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module and (res in motion_module_resolutions),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False):
| # Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
# Additional
use_motion_module = True,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = True,
motion_module_decoder_only = False,
motion_module_type = 'Vanilla',
motion_module_kwargs = {},
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn":
self.mid_block = UNetMidBlock3DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
use_motion_module=use_motion_module and motion_module_mid_block,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
else:
raise ValueError(f"unknown mid_block_type : {mid_block_type}")
# count how many layers upsample the videos
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module and (res in motion_module_resolutions),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False): | if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): | 4 | 2023-11-25 06:00:08+00:00 | 12k |
abdulhaim/LMRL-Gym | llm_rl_scripts/maze/ilql/eval_ilql.py | [
{
"identifier": "text_env_eval",
"path": "LLM_RL/environment.py",
"snippet": "def text_env_eval(\n env: Union[TextEnv, BatchedTextEnv], \n policy: Union[TextPolicy, BatchedTextPolicy], \n n_rollouts: int, \n initial_text_history: Optional[TextHistory]=None, # only allow one initial_text_history here\n seed_generator: Optional[Iterator[int]]=None, \n env_options: Optional[Dict]=None, # only allow one env_options here\n interaction_callback: Optional[Callable[[List[Tuple[TextHistory, TextHistory, TextHistory, float, bool]]], None]]=None, \n bsize: int=1, \n verbose: bool=True, \n) -> Tuple[List[List[InteractionTransition]], Dict[str, Any]]:\n interactions, rewards, dones, eps_lengths = [], [], [], []\n for _ in tqdm(range((n_rollouts+(bsize-1))//bsize), disable=not verbose):\n actual_bsize = min(n_rollouts-len(interactions), bsize)\n npad = bsize - actual_bsize\n interaction_batch = interact_environment(\n env, \n policy, \n initial_text_history=initial_text_history, \n env_seed=[None]*actual_bsize if seed_generator is None else [next(seed_generator) for _ in range(actual_bsize)], \n env_options=[env_options]*actual_bsize, \n bsize=actual_bsize,\n npad=npad,\n )\n \n for interaction in interaction_batch:\n interactions.append(interaction)\n rewards.append(sum(map(lambda x: x.reward, interaction)))\n dones.append(interaction[-1].done)\n eps_lengths.append(len(interaction))\n if interaction_callback is not None:\n interaction_callback(interaction)\n \n rewards = np.asarray(rewards, dtype=np.float32)\n dones = np.asarray(dones, dtype=np.float32)\n results_summary = dict(\n reward=dict(\n mean=np.mean(rewards), \n std=np.std(rewards), \n min=np.min(rewards), \n max=np.max(rewards), \n ), \n done=dict(\n mean=np.mean(dones), \n std=np.std(dones), \n min=np.min(dones), \n max=np.max(dones), \n ), \n length=dict(\n mean=np.mean(eps_lengths),\n std=np.std(eps_lengths),\n min=np.min(eps_lengths),\n max=np.max(eps_lengths),\n ),\n )\n \n return interactions, results_summary"
},
{
"identifier": "setup_maze_env",
"path": "llm_rl_scripts/maze/env/maze_utils.py",
"snippet": "def setup_maze_env(maze_name, describe_function, reward_function=None, last_k=1, max_steps=100):\n # setup environment\n if maze_name == 'umaze':\n maze = maze2d_umaze()\n valid_goals = np.array([[3, 3]])\n start_position = (3, 1)\n elif maze_name == \"double_t_maze\":\n maze = double_t_maze()\n valid_goals = np.array([[8, 6]])\n start_position = (1, 1)\n else:\n raise ValueError(f'unknown maze name: {maze_name}')\n \n # valid_goals = np.where(maze == 0)\n # valid_goals = np.array(list(zip(valid_goals[0], valid_goals[1])), dtype=np.int32)\n if describe_function == \"describe_observation\":\n describe_function = describe_observation\n elif describe_function == \"describe_observation_give_position\":\n describe_function = describe_observation_give_position\n elif describe_function == \"describe_observation_only_walls\":\n describe_function = describe_observation_only_walls\n else:\n raise ValueError(f'unknown describe function: {describe_function}')\n \n if reward_function is None or reward_function == \"standard_reward\":\n reward_function = standard_reward\n elif reward_function == \"illegal_penalty_reward\":\n reward_function = illegal_penalty_reward\n elif reward_function == \"illegal_penalty_diff_scale\":\n reward_function = illegal_penalty_diff_scale\n else:\n raise ValueError(f'unknown reward function: {reward_function}')\n \n env = MazeEnv(\n maze=maze, \n valid_goals=valid_goals, \n actions=manhatten_actions, \n max_steps=max_steps, \n display_initial_position=True,\n describe_function=describe_function,\n reward_function=reward_function,\n last_k=last_k,\n )\n return env"
},
{
"identifier": "maze_solver",
"path": "llm_rl_scripts/maze/env/maze_utils.py",
"snippet": "def maze_solver(maze: np.ndarray, goal_positions: List[Tuple[int, int]]) -> Dict[Tuple[int, int], str]:\n maze = maze.tolist()\n assert len(maze) > 0 and len(maze[0]) > 0, 'maze must be non-zero in area'\n assert all([maze[goal_pos[0]][goal_pos[1]] == 1 for goal_pos in goal_positions]), 'goal pos must be 1'\n move_mapping = {\n (0, 1): 'move right\\n',\n (0, -1): 'move left\\n',\n (1, 0): 'move down\\n',\n (-1, 0): 'move up\\n',\n }\n x_size, y_size = len(maze), len(maze[0])\n out_of_bounds = lambda x, y: x < 0 or x >= x_size or y < 0 or y >= y_size\n directions = [(1, 0), (0, 1), (-1, 0), (0, -1)]\n optimal_policy = dict()\n queue = deque(goal_positions)\n seen_pos = set(goal_positions)\n while len(queue) > 0:\n x, y = queue.popleft()\n for dx, dy in directions:\n new_pos = (x+dx, y+dy)\n if new_pos in seen_pos or out_of_bounds(*new_pos) or maze[new_pos[0]][new_pos[1]] == 0:\n continue\n queue.append(new_pos)\n seen_pos.add(new_pos)\n optimal_policy[new_pos] = move_mapping[(-dx, -dy)]\n return optimal_policy"
},
{
"identifier": "ReRankerSamplePolicy",
"path": "LLM_RL/algorithms/ppo/reranker_policy.py",
"snippet": "class ReRankerSamplePolicy(TextPolicy):\n \n def __init__(self, proposal_fn, score_fn: Callable[[List[TextHistory]], List[float]]):\n self.proposal_fn = proposal_fn\n self.score_fn = score_fn\n \n def act(self, text_history: TextHistory) -> TextHistory:\n proposals = self.proposal_fn(text_history)\n scores = np.asarray(self.score_fn(proposals), dtype=np.float32)\n # sample from scores\n scores = np.exp(scores) / np.exp(scores).sum()\n selected = np.random.choice(len(scores), p=scores)\n # # zip proposals and scores together\n # proposals_and_scores = list(zip(proposals, scores))\n # print(proposals_and_scores)\n return proposals[selected]"
},
{
"identifier": "ReRankerPolicy",
"path": "LLM_RL/algorithms/ppo/reranker_policy.py",
"snippet": "class ReRankerPolicy(TextPolicy):\n \n def __init__(self, proposal_fn: Callable[[TextHistory], List[TextHistory]], score_fn: Callable[[List[TextHistory]], List[float]]):\n self.proposal_fn = proposal_fn\n self.score_fn = score_fn\n\n def act(self, text_history: TextHistory) -> TextHistory:\n proposals = self.proposal_fn(text_history)\n scores = self.score_fn(proposals)\n\n return proposals[np.argmax(np.asarray(scores, dtype=np.float32)).item()]"
},
{
"identifier": "maze_proposal_function",
"path": "llm_rl_scripts/maze/env/env.py",
"snippet": "def maze_proposal_function(text_history: TextHistory) -> List[TextHistory]:\n return [text_history+(Text(action, True),) for action in manhatten_actions.keys()]"
},
{
"identifier": "Text",
"path": "LLM_RL/environment.py",
"snippet": "class Text:\n text: str\n is_action: bool"
},
{
"identifier": "describe_observation_give_position",
"path": "llm_rl_scripts/maze/env/env.py",
"snippet": "def describe_observation_give_position(maze:np.ndarray,\n position: Tuple[int, int],\n goal_position: Tuple[int, int],\n initial_position: Tuple[int, int]=None,\n move_history: List[str]=None,\n ) -> str:\n goal_description = f\"The goal is at position {' '.join(str(goal_position[0]))}, {' '.join(str(goal_position[1]))}.\"\n curr_position_description = f\"Your current position is at position {' '.join(str(position[0]))}, {' '.join(str(position[1]))}.\"\n delta_descriptions = {\"to your right\": (0, 1), \"to your left\": (0, -1), \"above you\": (-1, 0), \"below you\": (1, 0)} \n\n walls = []\n for k, (dy, dx) in delta_descriptions.items():\n if maze[position[0]+dy, position[1]+dx] == 1:\n walls.append(k)\n \n wall_description = describe_objects(\"wall\", walls)\n \n return f\"{goal_description} {curr_position_description} {wall_description}\\n\""
},
{
"identifier": "GPT2ValuePolicy",
"path": "LLM_RL/algorithms/value_rl_base/gpt2/interface.py",
"snippet": "class GPT2ValuePolicy(ValueRLPolicy):\n def __init__(\n self, \n inference: ValueRLInference, \n prng_key: Optional[jax.random.KeyArray], \n generation_config: Optional[GenerationConfig]=None, \n blocking_strategy: BlockingStrategy=BlockingStrategy(padding=Padding.LEFT, truncation=Truncation.LEFT, max_length=None), \n in_str_process: Optional[Callable[[str], str]]=None, \n out_str_process: Optional[Callable[[str], str]]=None, \n input_token_process: Optional[Callable[[List[int]], List[int]]]=None, \n target_token_process: Optional[Callable[[List[int]], List[int]]]=None, \n trace: bool=True, \n ):\n self.inference = inference\n self.prng_key = prng_key\n self.generation_config = generation_config\n self.blocking_strategy = blocking_strategy\n self.in_str_process = in_str_process\n self.out_str_process = out_str_process\n self.input_token_process = input_token_process\n self.target_token_process = target_token_process\n if self.in_str_process is None:\n self.in_str_process = lambda x: x\n if self.out_str_process is None:\n self.out_str_process = lambda x: x\n self.trace = trace\n \n def act(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[TextHistory]]:\n if done is None:\n done = [False]*len(text_history)\n # force eos_token for done sequences\n eos_token = self.inference.tokenizer.eos_token\n if self.generation_config is not None and self.generation_config.eos_token_id is not None:\n eos_token = self.inference.tokenizer.decode(self.generation_config.eos_token_id)\n if eos_token is None:\n eos_token = self.inference.tokenizer.pad_token\n if eos_token is None:\n eos_token = ''\n \n raw_input_strs = [\n eos_token if d else self.in_str_process(text_history_to_str(item)) \\\n for item, d in zip(text_history, done)\n ]\n\n new_key = None\n if self.prng_key is not None:\n self.prng_key, new_key = jax.random.split(self.prng_key)\n model_outputs = self.inference.generate_from_str(\n input_strs=raw_input_strs, \n prng_key=new_key, \n blocking_strategy=self.blocking_strategy, \n generation_config=self.generation_config, \n input_token_process=self.input_token_process, \n target_token_process=self.target_token_process, \n trace=self.trace, \n )\n\n raw_output_strs = model_outputs.output_strs\n output_strs = [\n \"\" if d else self.out_str_process(strip_prompt_from_completion(raw_input_str, raw_output_str)) \\\n for raw_input_str, raw_output_str, d in zip(raw_input_strs, raw_output_strs, done)\n ]\n\n return [\n None if d else text_history_item+(Text(output_str, True),) \\\n for text_history_item, output_str, d in zip(text_history, output_strs, done)\n ]\n \n def set_params(self, policy_params: PyTree) -> None:\n pi_beta_params, base_params, \\\n q1_head_params, q2_head_params = policy_params\n self.inference = self.inference.replace(\n pi_beta_params=pi_beta_params, \n base_params=base_params, \n q1_head_params=q1_head_params, \n q2_head_params=q2_head_params, \n )"
},
{
"identifier": "GPT2ValueRLInference",
"path": "LLM_RL/algorithms/value_rl_base/gpt2/interface.py",
"snippet": "class GPT2ValueRLInference(ValueRLInference):\n @classmethod\n def load_inference(\n cls, \n pi_beta_params: Optional[PyTree], \n base_params: PyTree, \n q1_head_params: PyTree, \n q2_head_params: Optional[PyTree], \n v_head_params: Optional[PyTree], \n pi_beta_model: Optional[FlaxPreTrainedModel], \n base_model: FlaxPreTrainedModel, \n q_head_model: nn.Module, \n v_head_model: Optional[nn.Module], \n tokenizer: PreTrainedTokenizerBase, \n beta: float=0.0, \n dp_shard_logits: bool=True, \n ):\n mesh = base_model.config.mesh\n assert mesh is not None\n assert mesh == q_head_model.config.mesh\n if v_head_model is not None:\n assert mesh == v_head_model.config.mesh\n assert (pi_beta_model is None and pi_beta_params is None) or (pi_beta_model is not None and pi_beta_params is not None)\n \n pi_beta_params_partition_spec = PS() if pi_beta_params is None else match_partition_rules(pi_beta_model.config.get_partition_rules(), pi_beta_params)\n base_params_partition_spec = match_partition_rules(base_model.config.get_partition_rules(), base_params)\n q1_head_params_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q1_head_params)\n q2_head_params_partition_spec = PS() if q2_head_params is None else match_partition_rules(q_head_model.config.get_partition_rules(), q2_head_params)\n v_head_params_partition_spec = PS() if v_head_params is None else match_partition_rules(v_head_model.config.get_partition_rules(), v_head_params)\n\n generator = None\n if pi_beta_model is not None:\n generator = GPT2ValueRLGeneration(\n base_model_config=base_model.config, \n pi_beta=pi_beta_model, \n value_base=base_model, \n q_head=q_head_model, \n beta=beta, \n )\n\n if pi_beta_params is not None:\n @partial(\n pjit, \n static_argnames=('generation_config', 'trace'), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), pi_beta_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=NamedSharding(mesh, PS()), \n )\n def _generate(\n pi_beta_params: Optional[PyTree], \n base_params: PyTree, \n q1_head_params: PyTree, \n q2_head_params: Optional[PyTree], \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray]=None, \n generation_config: Optional[FrozenDict]=None, \n trace: bool=True, \n ) -> Union[FlaxSampleOutput, FlaxGreedySearchOutput, FlaxBeamSearchOutput]:\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS((\"dp\", \"fsdp\"), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n # NOTE: position_ids ignored by transformers\n\n # generate from model\n output = generator.generate(\n input_ids=input_ids, \n attention_mask=attention_mask, \n params=(pi_beta_params, base_params, q1_head_params, q2_head_params), \n prng_key=prng_key, \n generation_config=StreamingGenerationConfig.from_dict(generation_config) if generation_config is not None else None, \n trace=trace, \n )\n \n return output\n else:\n def _generate(\n pi_beta_params: Optional[PyTree], \n base_params: PyTree, \n q1_head_params: PyTree, \n q2_head_params: Optional[PyTree], \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray]=None, \n generation_config: Optional[FrozenDict]=None, \n trace: bool=True, \n ) -> Union[FlaxSampleOutput, FlaxGreedySearchOutput, FlaxBeamSearchOutput]:\n raise NotImplementedError\n \n @partial(\n pjit, \n static_argnames=('output_attentions', 'train'), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), v_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=ValueRLForwardOutput(\n base_raw_output=FlaxCausalLMOutputWithCrossAttentions(\n logits=NamedSharding(mesh, PS((\"dp\", \"fsdp\"), None, None)) if dp_shard_logits else NamedSharding(mesh, PS()), \n hidden_states=NamedSharding(mesh, PS()), # assume no sharding for hidden states\n attentions=NamedSharding(mesh, PS()), # assume no sharding for attentions\n cross_attentions=NamedSharding(mesh, PS()), # assume no sharding for cross attentions\n past_key_values=NamedSharding(mesh, PS()), # assume no sharding for past key values\n ), \n q1=NamedSharding(mesh, PS((\"dp\", \"fsdp\"), None, None)) if dp_shard_logits else NamedSharding(mesh, PS()), \n q2=NamedSharding(mesh, PS((\"dp\", \"fsdp\"), None, None)) if (dp_shard_logits and q2_head_params is not None) else NamedSharding(mesh, PS()), \n v=NamedSharding(mesh, PS()), \n ), \n )\n def _forward(\n base_params: PyTree, \n q1_head_params: PyTree, \n q2_head_params: Optional[PyTree], \n v_head_params: Optional[PyTree], \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray]=None, \n output_attentions: Optional[bool]=None, \n train: bool=False, \n ) -> ValueRLForwardOutput:\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS((\"dp\", \"fsdp\"), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n\n # get logits\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n base_output = base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=base_params, \n train=train, \n output_attentions=output_attentions, \n output_hidden_states=True, \n dropout_rng=new_key, \n )\n # trunc padded logits\n base_output = base_output.replace(logits=base_output.logits.at[:, :, base_model.config.unpadded_vocab_size:].set(-float('inf')))\n\n # get q1\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q1 = q_head_model.apply(\n {'params': q1_head_params}, \n base_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n # trunc padded qs\n q1 = q1.at[:, :, base_model.config.unpadded_vocab_size:].set(-float('inf'))\n\n # get q2\n if q2_head_params is not None:\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q2 = q_head_model.apply(\n {'params': q2_head_params}, \n base_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n # trunc padded qs\n q2 = q2.at[:, :, base_model.config.unpadded_vocab_size:].set(-float('inf'))\n else:\n q2 = None\n\n if v_head_params is not None:\n # get v\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n v = v_head_model.apply(\n {'params': v_head_params}, \n base_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n ).squeeze(2)\n else:\n v = None\n\n # assert sharding on outputs\n if dp_shard_logits:\n base_output = base_output.replace(logits=with_named_sharding_constraint(base_output.logits, mesh, PS((\"dp\", \"fsdp\"), None, None)))\n q1 = with_named_sharding_constraint(q1, mesh, PS((\"dp\", \"fsdp\"), None, None))\n if q2 is not None:\n q2 = with_named_sharding_constraint(q2, mesh, PS((\"dp\", \"fsdp\"), None, None))\n return ValueRLForwardOutput(\n base_raw_output=base_output, \n q1=q1, \n q2=q2, \n v=v, \n )\n\n return cls(\n pi_beta_params=pi_beta_params, \n base_params=base_params, \n q1_head_params=q1_head_params, \n q2_head_params=q2_head_params, \n v_head_params=v_head_params, \n pi_beta_model=pi_beta_model, \n base_model=base_model, \n q_head_model=q_head_model, \n v_head_model=v_head_model, \n tokenizer=tokenizer, \n _generate=_generate, \n _forward=_forward,\n )"
},
{
"identifier": "load_params",
"path": "LLM_RL/heads/mlp_head.py",
"snippet": "def load_params(\n model_load_mode: Union[ModelLoadMode, str], \n model_load_path: str, \n model_dtype: Union[str, jnp.dtype], \n mesh: Mesh, \n prng_key: Optional[jax.random.PRNGKeyArray]=None, \n pad_to_output_dim: Optional[int]=None, \n params_dtype: Optional[Union[str, jnp.dtype]]=jnp.float32, \n) -> Tuple[PyTree, MLPHead]:\n \n if ModelLoadMode.match_load_mode(model_load_mode, ModelLoadMode.CONFIG):\n # load config\n assert prng_key is not None, 'Must provide prng_key when loading from config.'\n with open(model_load_path, 'r') as f:\n model_config = MLPHeadConfig(**json.load(f))\n params, model = load_params_from_config(\n model_config=model_config, \n model_dtype=model_dtype, \n mesh=mesh, \n prng_key=prng_key, \n pad_to_output_dim=pad_to_output_dim, \n params_dtype=params_dtype, \n )\n elif ModelLoadMode.match_load_mode(model_load_mode, ModelLoadMode.PARAMS):\n # load model\n with open(os.path.join(model_load_path, 'config.json'), 'r') as f:\n model_config = MLPHeadConfig(**json.load(f))\n model = MLPHead(model_config, dtype=model_dtype)\n model.config.mesh = mesh\n # load params, shard params\n params = shard_params_from_checkpoint(model, os.path.join(model_load_path, 'params.msgpack'), params_dtype=params_dtype)\n # pad outputs\n if pad_to_output_dim is not None:\n params = freeze(pad_outputs(unfreeze(params), model, pad_to_output_dim, dtype=params_dtype))\n else:\n raise ValueError(f\"Invalid model_load_mode: {model_load_mode}\")\n \n return params, model"
},
{
"identifier": "build_ilql_score_fn",
"path": "LLM_RL/algorithms/ilql/gpt2/score_fn.py",
"snippet": "def build_ilql_score_fn(\n inference: ILQLInference, \n pi_beta_inference: Optional[GPT2Inference], \n tokenizer: PreTrainedTokenizer, \n max_length: int, \n value_weight: float, \n logit_weight: Optional[float], \n bsize: int, \n):\n assert (pi_beta_inference is None and logit_weight is None) or (pi_beta_inference is not None and logit_weight is not None)\n \n def score_fn(text_histories: List[TextHistory], done:Optional[List]=None) -> List[float]:\n assert all([text_history[-1].is_action for text_history in text_histories])\n \n prev_token_histories = []\n token_histories = []\n for text_history in text_histories:\n prev_token_histories.append(TokenHistory.from_text_history(text_history[:-1], tokenizer))\n token_histories.append(TokenHistory.from_text_history(text_history, tokenizer))\n \n # truncate to end and pad tokens\n tokens = np.stack([np.concatenate((token_history.tokens[-max_length:], np.full((max_length-min(token_history.tokens.shape[0], max_length),), tokenizer.pad_token_id)), axis=0) for token_history in token_histories], axis=0)\n tokens = jnp.asarray(tokens, dtype=jnp.int32)\n \n advantages = []\n \n for i in range(0, len(text_histories), bsize):\n batch = tokens[i:i+bsize, :]\n values = inference.forward(batch)\n # check prefix len is getting action\n prefix_len = jnp.asarray([prev_token_histories[i+x].tokens.shape[0] for x in range(batch.shape[0])], dtype=jnp.int32)\n attention_mask = (batch != tokenizer.pad_token_id).astype(np.float32)\n # embed()\n try:\n qs = jnp.minimum(values.target_output.q1, values.target_output.q2)\n except AttributeError:\n qs = jnp.minimum(values.q1, values.q2)\n qsa = jnp.take_along_axis(qs[:, :-1], batch[:, 1:][..., None], axis=2).squeeze(2)\n action_advs = jnp.empty(prefix_len.shape, dtype=jnp.float32)\n for x in range(len(prefix_len)):\n # embed()\n # check if this is getting rid of non-action states\n try:\n action_advs = action_advs.at[x].set(value_weight * ((qsa[x] - values.output.v[x, :-1]) * attention_mask[x, 1:])[(prefix_len[x]-1):].sum(axis=0))\n except AttributeError:\n action_advs = action_advs.at[x].set(value_weight * ((qsa[x] - values.v[x, :-1]) * attention_mask[x, 1:])[(prefix_len[x]-1):].sum(axis=0))\n\n if logit_weight is not None:\n logprobs = jax.nn.log_softmax(pi_beta_inference.get_logits_from_tokens(batch), axis=-1)\n action_logits = jnp.take_along_axis(logprobs[:, :-1], batch[:, 1:][..., None], axis=2).squeeze(2)\n for x in range(len(prefix_len)):\n action_advs = action_advs.at[x].add(logit_weight * (action_logits[x] * attention_mask[x, 1:])[(prefix_len[x]-1):].sum(axis=0))\n\n advantages.extend(jax.device_get(action_advs).tolist())\n \n return advantages\n\n return score_fn"
}
] | from typing import Optional
from JaxSeq.bucket_manager import open_with_bucket as open
from transformers import AutoTokenizer
from JaxSeq.utils import convert_path, load_mesh, create_path
from JaxSeq.utils import BlockingStrategy, Padding, Truncation
from JaxSeq.models.gpt2.interface import GPT2InferenceMask
from JaxSeq.models.gpt2.load import ModelLoadMode, load_params
from transformers.generation import GenerationConfig
from LLM_RL.environment import text_env_eval
from llm_rl_scripts.maze.env.maze_utils import setup_maze_env, maze_solver
from collections import defaultdict
from LLM_RL.algorithms.ppo.reranker_policy import ReRankerSamplePolicy, ReRankerPolicy
from llm_rl_scripts.maze.env.env import maze_proposal_function
from flax.traverse_util import flatten_dict, unflatten_dict
from LLM_RL.environment import Text
from llm_rl_scripts.maze.env.env import describe_observation_give_position
from LLM_RL.algorithms.value_rl_base.gpt2.interface import GPT2ValuePolicy, GPT2ValueRLInference
from LLM_RL.heads.mlp_head import load_params as load_head_params
from LLM_RL.algorithms.ilql.gpt2.score_fn import build_ilql_score_fn
import tyro
import jax
import jax.numpy as jnp
import os
import pickle as pkl
import json
import numpy as np | 8,557 | policy_temperature: Optional[float]=None,
policy_top_p: Optional[float]=None,
policy_top_k: Optional[int]=None,
policy_beta: float=16.0,
maze_name:str="double_t_maze",
describe_function:str="describe_observation_give_position",
maze_last_k: int=1,
maze_reward_function: str="standard_reward",
do_accuracy_eval: bool=True,
do_reward_eval: bool=True,
use_reranker_for_reward_eval: bool=False,
force_pad_embeddings: bool=False,
):
assert model_load_mode != ModelLoadMode.HF
input_args = locals()
print(input_args)
tokenizer = AutoTokenizer.from_pretrained('gpt2')
tokenizer.add_special_tokens({'pad_token': '<|pad|>'})
mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp'))
is_main_process = jax.process_index() == 0
print(f"Mesh: {mesh}")
print(f"Is main process: {is_main_process}")
env = setup_maze_env(
maze_name=maze_name,
describe_function=describe_function,
reward_function=maze_reward_function,
last_k=maze_last_k,
)
possible_positions = list(zip(*np.where(env.maze==0)))
for goal in env.valid_goals:
possible_positions.remove(tuple(goal.tolist()))
optimal_policy = maze_solver(1-env.maze, list(map(tuple, env.valid_goals.tolist())))
pi_beta_prng_key = jax.random.PRNGKey(0)
pi_beta_params, _ = load_params(
model_load_mode=pi_beta_load_mode,
model_load_path=convert_path(pi_beta_load_path) if pi_beta_load_mode != ModelLoadMode.HF else pi_beta_load_path,
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
tokenizer=tokenizer,
mesh=mesh,
prng_key=pi_beta_prng_key,
force_pad_embeddings=force_pad_embeddings,
params_dtype=jnp.float32,
)
base_prng_key = jax.random.PRNGKey(0)
base_params, base_model = load_params(
model_load_mode=model_load_mode,
model_load_path=convert_path(os.path.join(model_load_path, 'base')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
tokenizer=tokenizer,
mesh=mesh,
prng_key=base_prng_key,
force_pad_embeddings=force_pad_embeddings,
params_dtype=jnp.float32,
)
q1_head_params, q_head = load_head_params(
model_load_mode=model_load_mode.value,
model_load_path=convert_path(os.path.join(model_load_path, 'q1_head')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
mesh=mesh,
prng_key=jax.random.PRNGKey(0),
pad_to_output_dim=None,
params_dtype=jnp.float32,
)
q2_head_params, _ = load_head_params(
model_load_mode=model_load_mode.value,
model_load_path=convert_path(os.path.join(model_load_path, 'q2_head')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
mesh=mesh,
prng_key=jax.random.PRNGKey(0),
pad_to_output_dim=None,
params_dtype=jnp.float32,
)
v_head_params, v_head = load_head_params(
model_load_mode=model_load_mode.value,
model_load_path=convert_path(os.path.join(model_load_path, 'v_head')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
mesh=mesh,
prng_key=jax.random.PRNGKey(0),
pad_to_output_dim=None,
params_dtype=jnp.float32,
)
inference = GPT2ValueRLInference.load_inference(
pi_beta_params=pi_beta_params,
base_params=base_params,
q1_head_params=q1_head_params,
q2_head_params=q2_head_params,
v_head_params=v_head_params,
pi_beta_model=base_model,
base_model=base_model,
q_head_model=q_head,
v_head_model=v_head,
tokenizer=tokenizer,
beta=policy_beta,
dp_shard_logits=True,
)
policy_prng = jax.random.PRNGKey(0)
def evaluator(inference: GPT2InferenceMask):
nonlocal policy_prng
policy_prng, new_key = jax.random.split(policy_prng)
all_results = dict()
interactions = dict()
if do_reward_eval:
if use_reranker_for_reward_eval:
if policy_do_sample:
policy = ReRankerSamplePolicy(
|
def main(
model_load_mode: ModelLoadMode,
model_load_path: str,
pi_beta_load_mode: ModelLoadMode,
pi_beta_load_path: str,
/, # Mark the end of positional arguments.
outputs_path: Optional[str]=None,
data_mesh_shape: int=1,
fsdp_mesh_shape: int=1,
model_mesh_shape: int=-1,
bf16_activations: bool=False,
policy_n_rollouts: int=32,
policy_bsize: int=1,
policy_max_input_length: int=256,
policy_max_output_length: int=256,
policy_do_sample: bool=True,
policy_num_beams: int=1,
policy_temperature: Optional[float]=None,
policy_top_p: Optional[float]=None,
policy_top_k: Optional[int]=None,
policy_beta: float=16.0,
maze_name:str="double_t_maze",
describe_function:str="describe_observation_give_position",
maze_last_k: int=1,
maze_reward_function: str="standard_reward",
do_accuracy_eval: bool=True,
do_reward_eval: bool=True,
use_reranker_for_reward_eval: bool=False,
force_pad_embeddings: bool=False,
):
assert model_load_mode != ModelLoadMode.HF
input_args = locals()
print(input_args)
tokenizer = AutoTokenizer.from_pretrained('gpt2')
tokenizer.add_special_tokens({'pad_token': '<|pad|>'})
mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp'))
is_main_process = jax.process_index() == 0
print(f"Mesh: {mesh}")
print(f"Is main process: {is_main_process}")
env = setup_maze_env(
maze_name=maze_name,
describe_function=describe_function,
reward_function=maze_reward_function,
last_k=maze_last_k,
)
possible_positions = list(zip(*np.where(env.maze==0)))
for goal in env.valid_goals:
possible_positions.remove(tuple(goal.tolist()))
optimal_policy = maze_solver(1-env.maze, list(map(tuple, env.valid_goals.tolist())))
pi_beta_prng_key = jax.random.PRNGKey(0)
pi_beta_params, _ = load_params(
model_load_mode=pi_beta_load_mode,
model_load_path=convert_path(pi_beta_load_path) if pi_beta_load_mode != ModelLoadMode.HF else pi_beta_load_path,
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
tokenizer=tokenizer,
mesh=mesh,
prng_key=pi_beta_prng_key,
force_pad_embeddings=force_pad_embeddings,
params_dtype=jnp.float32,
)
base_prng_key = jax.random.PRNGKey(0)
base_params, base_model = load_params(
model_load_mode=model_load_mode,
model_load_path=convert_path(os.path.join(model_load_path, 'base')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
tokenizer=tokenizer,
mesh=mesh,
prng_key=base_prng_key,
force_pad_embeddings=force_pad_embeddings,
params_dtype=jnp.float32,
)
q1_head_params, q_head = load_head_params(
model_load_mode=model_load_mode.value,
model_load_path=convert_path(os.path.join(model_load_path, 'q1_head')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
mesh=mesh,
prng_key=jax.random.PRNGKey(0),
pad_to_output_dim=None,
params_dtype=jnp.float32,
)
q2_head_params, _ = load_head_params(
model_load_mode=model_load_mode.value,
model_load_path=convert_path(os.path.join(model_load_path, 'q2_head')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
mesh=mesh,
prng_key=jax.random.PRNGKey(0),
pad_to_output_dim=None,
params_dtype=jnp.float32,
)
v_head_params, v_head = load_head_params(
model_load_mode=model_load_mode.value,
model_load_path=convert_path(os.path.join(model_load_path, 'v_head')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
mesh=mesh,
prng_key=jax.random.PRNGKey(0),
pad_to_output_dim=None,
params_dtype=jnp.float32,
)
inference = GPT2ValueRLInference.load_inference(
pi_beta_params=pi_beta_params,
base_params=base_params,
q1_head_params=q1_head_params,
q2_head_params=q2_head_params,
v_head_params=v_head_params,
pi_beta_model=base_model,
base_model=base_model,
q_head_model=q_head,
v_head_model=v_head,
tokenizer=tokenizer,
beta=policy_beta,
dp_shard_logits=True,
)
policy_prng = jax.random.PRNGKey(0)
def evaluator(inference: GPT2InferenceMask):
nonlocal policy_prng
policy_prng, new_key = jax.random.split(policy_prng)
all_results = dict()
interactions = dict()
if do_reward_eval:
if use_reranker_for_reward_eval:
if policy_do_sample:
policy = ReRankerSamplePolicy( | proposal_fn=maze_proposal_function, | 5 | 2023-11-21 00:16:42+00:00 | 12k |
jzmzhong/Automatic-Prosody-Annotator-with-SSWP-CLAP | src/clap_module/factory.py | [
{
"identifier": "CLAP",
"path": "src/clap_module/model.py",
"snippet": "class CLAP(nn.Module):\r\n def __init__(\r\n self,\r\n args,\r\n joint_embed_shape: int,\r\n audio_cfg: CLAPAudioCfp,\r\n text_cfg: CLAPTextCfg,\r\n enable_fusion: bool = False,\r\n fusion_type: str = 'None',\r\n mlp_act: str = 'relu',\r\n quick_gelu: bool = False,\r\n ):\r\n\r\n super().__init__()\r\n if isinstance(audio_cfg, dict):\r\n if audio_cfg[\"model_type\"] in (\"Conformer\"):\r\n audio_cfg = CLAPConformerAudioCfp(**audio_cfg)\r\n elif audio_cfg[\"model_type\"] in (\"hubert\"):\r\n audio_cfg = CLAPHuBERTAudioCfp(**audio_cfg)\r\n else:\r\n audio_cfg = CLAPAudioCfp(**audio_cfg)\r\n if isinstance(text_cfg, dict):\r\n text_cfg = CLAPTextCfg(**text_cfg)\r\n\r\n self.data_filling = args.data_filling\r\n self.data_truncating = args.data_truncating\r\n\r\n self.joint_embed_shape = joint_embed_shape\r\n self.audio_cfg = audio_cfg\r\n self.text_cfg = text_cfg\r\n self.enable_fusion = enable_fusion\r\n self.fusion_type = fusion_type\r\n self.mlp_act = mlp_act\r\n self.context_length = text_cfg.context_length\r\n\r\n # set activation of clip text encoder\r\n \"\"\"OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more\r\n memory efficient in recent PyTorch releases (>= 1.10).\r\n NOTE: timm models always use native GELU regardless of quick_gelu flag.\"\"\"\r\n act_layer = QuickGELU if quick_gelu else nn.GELU\r\n\r\n # set activation of MLP\r\n if mlp_act == 'relu':\r\n mlp_act_layer = nn.ReLU()\r\n elif mlp_act == 'gelu':\r\n mlp_act_layer = nn.GELU()\r\n else:\r\n raise NotImplementedError\r\n\r\n # audio encoder\r\n if audio_cfg.model_type == \"PANN\":\r\n self.audio_branch = create_pann_model(audio_cfg, enable_fusion, fusion_type)\r\n elif audio_cfg.model_type == \"HTSAT\":\r\n self.audio_branch = create_htsat_model(audio_cfg, enable_fusion, fusion_type)\r\n elif audio_cfg.model_type == \"Conformer\":\r\n self.audio_branch = create_conformer_model(audio_cfg, enable_fusion, fusion_type)\r\n elif audio_cfg.model_type == \"hubert\":\r\n # hubert model\r\n if audio_cfg.load_pretrained_weights:\r\n config = AutoConfig.from_pretrained(\"facebook/hubert-{}\".format(audio_cfg.model_name))\r\n config.mask_time_prob = 0.\r\n # config.mask_time_length = 1\r\n config.mask_feature_prob = 0.\r\n # config.mask_feature_length = 1\r\n self.audio_branch = HubertModel.from_pretrained(\"facebook/hubert-{}\".format(audio_cfg.model_name),\r\n config=config)\r\n else:\r\n config = AutoConfig.from_pretrained(\"facebook/hubert-{}\".format(audio_cfg.model_name))\r\n self.audio_branch = HubertModel(config)\r\n # attentive pooling\r\n if self.enable_fusion and (self.fusion_type in ['daf_1d', 'aff_1d', 'iaff_1d']):\r\n raise NotImplementedError\r\n elif self.enable_fusion and (self.fusion_type in ['attnpool_1d']):\r\n self.frames2frame = AttentionPool1d(audio_cfg.max_time_bins, audio_cfg.hidden_size, audio_cfg.heads)\r\n else:\r\n self.frames2frame = None\r\n else:\r\n logging.error(f\"Model config for {audio_cfg.model_type} not found\")\r\n raise RuntimeError(f\"Model config for {audio_cfg.model_type} not found.\")\r\n try:\r\n self.audio_projection = nn.Sequential(\r\n nn.Linear(audio_cfg.attn_dim, self.joint_embed_shape),\r\n mlp_act_layer,\r\n nn.Linear(self.joint_embed_shape, self.joint_embed_shape)\r\n )\r\n except:\r\n self.audio_projection = nn.Sequential(\r\n nn.Linear(audio_cfg.hidden_size, self.joint_embed_shape),\r\n mlp_act_layer,\r\n nn.Linear(self.joint_embed_shape, self.joint_embed_shape)\r\n )\r\n self.audio_transform = MLPLayers(units=[self.joint_embed_shape,\r\n self.joint_embed_shape,\r\n self.joint_embed_shape], dropout=0.1)\r\n self.audio_branch_type = audio_cfg.model_type\r\n\r\n # text encoder\r\n if text_cfg.model_type == \"transformer\":\r\n self.text_branch = Transformer(\r\n width=text_cfg.width,\r\n layers=text_cfg.layers,\r\n heads=text_cfg.heads,\r\n act_layer=act_layer,\r\n )\r\n self.vocab_size = text_cfg.vocab_size\r\n self.token_embedding = nn.Embedding(text_cfg.vocab_size, text_cfg.width)\r\n self.positional_embedding = nn.Parameter(\r\n torch.empty(self.context_length, text_cfg.width)\r\n )\r\n self.ln_final = LayerNorm(text_cfg.width)\r\n elif text_cfg.model_type in (\"bert\", \"roberta\", \"bart\"):\r\n if text_cfg.model_type == \"bert\":\r\n if text_cfg.load_pretrained_weights:\r\n try:\r\n self.text_branch = BertModel.from_pretrained(\"bert-{}\".format(text_cfg.model_name))\r\n except:\r\n self.text_branch = BertModel.from_pretrained(\"prajjwal1/bert-{}\".format(text_cfg.model_name))\r\n else:\r\n try:\r\n config = AutoConfig.from_pretrained(\"bert-{}\".format(text_cfg.model_name))\r\n except:\r\n config = AutoConfig.from_pretrained(\"prajjwal1/bert-{}\".format(text_cfg.model_name))\r\n self.text_branch = BertModel(config)\r\n elif text_cfg.model_type == \"roberta\":\r\n self.text_branch = RobertaModel.from_pretrained('roberta-base')\r\n elif text_cfg.model_type == \"bart\":\r\n self.text_branch = BartModel.from_pretrained('facebook/bart-base')\r\n else:\r\n raise NotImplementedError\r\n else:\r\n logging.error(f\"Model config for {text_cfg.model_type} not found\")\r\n raise RuntimeError(f\"Model config for {text_cfg.model_type} not found.\")\r\n if self.enable_fusion and (self.fusion_type in ['daf_1d', 'aff_1d', 'iaff_1d']):\r\n raise NotImplementedError\r\n\r\n elif self.enable_fusion and (self.fusion_type in ['attnpool_1d']):\r\n self.subword2word = AttentionPool1d(text_cfg.max_num_subword, text_cfg.width, text_cfg.heads)\r\n else:\r\n self.subword2word = None\r\n self.text_projection = nn.Sequential(\r\n nn.Linear(text_cfg.width, self.joint_embed_shape),\r\n mlp_act_layer,\r\n nn.Linear(self.joint_embed_shape, self.joint_embed_shape)\r\n )\r\n self.text_transform = MLPLayers(units=[self.joint_embed_shape,\r\n self.joint_embed_shape,\r\n self.joint_embed_shape], dropout=0.1)\r\n self.text_branch_type = text_cfg.model_type\r\n\r\n self.logit_scale_a = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\r\n self.logit_scale_t = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\r\n self.register_buffer(\"attn_mask\", self.build_attention_mask(), persistent=False)\r\n\r\n self.init_text_branch_parameters()\r\n\r\n def init_text_branch_parameters(self):\r\n if self.text_branch_type == \"transformer\":\r\n nn.init.normal_(self.token_embedding.weight, std=0.02)\r\n nn.init.normal_(self.positional_embedding, std=0.01)\r\n proj_std = (self.text_branch.width ** -0.5) * (\r\n (2 * self.text_branch.layers) ** -0.5\r\n )\r\n attn_std = self.text_branch.width ** -0.5\r\n fc_std = (2 * self.text_branch.width) ** -0.5\r\n for block in self.text_branch.resblocks:\r\n nn.init.normal_(block.attn.in_proj_weight, std=attn_std)\r\n nn.init.normal_(block.attn.out_proj.weight, std=proj_std)\r\n nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)\r\n nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)\r\n if self.text_branch_type in (\"bert\", \"roberta\"):\r\n width = self.text_branch.embeddings.word_embeddings.weight.shape[-1]\r\n elif self.text_branch_type == \"bart\":\r\n width = self.text_branch.shared.weight.shape[-1]\r\n else:\r\n width = self.text_branch.width\r\n nn.init.constant_(self.logit_scale_a, np.log(1 / 0.07))\r\n nn.init.constant_(self.logit_scale_t, np.log(1 / 0.07))\r\n\r\n def build_attention_mask(self):\r\n # lazily create causal attention mask, with full attention between the vision tokens\r\n # pytorch uses additive attention mask; fill with -inf\r\n mask = torch.empty(self.context_length, self.context_length)\r\n mask.fill_(float(\"-inf\"))\r\n mask.triu_(1) # zero out the lower diagonal\r\n return mask\r\n\r\n def encode_audio(self, audio, device):\r\n if self.audio_branch_type == \"hubert\":\r\n if \"sent_wavs\" in audio:\r\n xs = pad_sequence([torch.from_numpy(x) for x in audio[\"sent_wavs\"]], batch_first=True,\r\n padding_value=0.).to(device=device)\r\n xs = self.audio_branch(input_values=xs, output_hidden_states=False,\r\n return_dict=True) # mix lambda needs to add\r\n xs = xs[\"last_hidden_state\"]\r\n # select the word-aligned audio latents from the sequence, pad to certain length and truncate if necessary\r\n new_xs = []\r\n for x, start_end in zip(xs, audio[\"token_indices\"]):\r\n start, end = int(start_end[0] / self.audio_cfg.frame_rate), int(\r\n start_end[1] / self.audio_cfg.frame_rate)\r\n assert start < end, (start, end, x)\r\n assert end <= len(x), (start, end, x)\r\n x = x[start:end, :]\r\n if x.shape[0] > self.audio_cfg.max_time_bins:\r\n if self.data_truncating == \"front_trunc\":\r\n x = x[:self.audio_cfg.max_time_bins, :]\r\n elif self.data_truncating == \"back_trunc\":\r\n x = x[-self.audio_cfg.max_time_bins:, :]\r\n elif self.data_truncating == \"cent_trunc\":\r\n x = x[int(0.5 * (x.shape[0] - self.audio_cfg.max_time_bins)): int(\r\n 0.5 * (x.shape[0] + self.audio_cfg.max_time_bins)), :]\r\n else:\r\n raise NotImplementedError\r\n new_xs.append(x)\r\n if self.data_filling == \"pad\":\r\n new_xs.append(torch.ones((self.audio_cfg.max_time_bins, new_xs[-1].shape[1]), dtype=float))\r\n new_xs = pad_sequence(new_xs, batch_first=True, padding_value=0.)[:-1, :, :]\r\n else:\r\n raise NotImplementedError\r\n else:\r\n xs = pad_sequence([torch.from_numpy(x) for x in audio[\"token_wavs\"]], batch_first=True,\r\n padding_value=0.).to(device=device)\r\n xs = self.audio_branch(input_values=xs, output_hidden_states=False,\r\n return_dict=True) # mix lambda needs to add\r\n xs = xs[\"last_hidden_state\"]\r\n # pad to certain length and truncate if necessary\r\n new_xs = []\r\n for x in xs:\r\n if x.shape[0] > self.audio_cfg.max_time_bins:\r\n if self.data_truncating == \"front_trunc\":\r\n x = x[:self.audio_cfg.max_time_bins, :]\r\n elif self.data_truncating == \"back_trunc\":\r\n x = x[-self.audio_cfg.max_time_bins:, :]\r\n elif self.data_truncating == \"cent_trunc\":\r\n x = x[int(0.5 * (x.shape[0] - self.audio_cfg.max_time_bins)): int(\r\n 0.5 * (x.shape[0] + self.audio_cfg.max_time_bins)), :]\r\n else:\r\n raise NotImplementedError\r\n new_xs.append(x)\r\n if self.data_filling == \"pad\":\r\n new_xs.append(torch.ones((self.audio_cfg.max_time_bins, new_xs[-1].shape[1]), dtype=float))\r\n new_xs = pad_sequence(new_xs, batch_first=True, padding_value=0.)[:-1, :, :]\r\n else:\r\n raise NotImplementedError\r\n if self.frames2frame:\r\n x = self.frames2frame(new_xs)\r\n x = self.audio_projection(x)\r\n else:\r\n x = self.audio_branch(audio, mixup_lambda=None, device=device) # mix lambda needs to add\r\n x = self.audio_projection(x[\"embedding\"])\r\n return x\r\n\r\n def encode_text(self, text, device):\r\n if self.text_branch_type == \"transformer\":\r\n text = text.to(device=device, non_blocking=True)\r\n x = self.token_embedding(text) # [batch_size, n_ctx, d_model]\r\n x = x + self.positional_embedding\r\n x = x.permute(1, 0, 2) # NLD -> LND\r\n x = self.text_branch(x, attn_mask=self.attn_mask)\r\n x = x.permute(1, 0, 2) # LND -> NLD\r\n x = self.ln_final(x)\r\n x = self.text_projection(x[torch.arange(x.shape[0]), text.argmax(dim=-1)])\r\n elif self.text_branch_type == \"bert\":\r\n if self.subword2word:\r\n xs = self.text_branch(\r\n input_ids=text[\"input_ids\"].to(\r\n device=device, non_blocking=True\r\n ),\r\n attention_mask=text[\"attention_mask\"].to(\r\n device=device, non_blocking=True\r\n ),\r\n token_type_ids=text[\"token_type_ids\"].to(\r\n device=device, non_blocking=True\r\n ),\r\n )[\"last_hidden_state\"]\r\n # import pdb; pdb.set_trace()\r\n # select the subwords from the sequence, pad to certain length and truncate if necessary\r\n new_xs = []\r\n for x, start_end in zip(xs, text[\"token_indices\"]):\r\n x = x[int(start_end[0]):int(start_end[1]), :]\r\n if x.shape[0] > self.text_cfg.max_num_subword:\r\n if self.data_truncating == \"front_trunc\":\r\n x = x[:self.text_cfg.max_num_subword, :]\r\n elif self.data_truncating == \"back_trunc\":\r\n x = x[-self.text_cfg.max_num_subword:, :]\r\n elif self.data_truncating == \"cent_trunc\":\r\n x = x[int(0.5 * (x.shape[0] - self.text_cfg.max_num_subword)): int(\r\n 0.5 * (x.shape[0] + self.text_cfg.max_num_subword)), :]\r\n else:\r\n raise NotImplementedError\r\n new_xs.append(x)\r\n if self.data_filling == \"pad\":\r\n new_xs.append(torch.ones((self.text_cfg.max_num_subword, new_xs[-1].shape[1]), dtype=float))\r\n new_xs = pad_sequence(new_xs, batch_first=True, padding_value=0.)[:-1, :, :]\r\n else:\r\n raise NotImplementedError\r\n x = self.subword2word(new_xs)\r\n x = self.text_projection(x)\r\n else:\r\n x = self.text_branch(\r\n input_ids=text[\"input_ids\"].to(\r\n device=device, non_blocking=True\r\n ),\r\n attention_mask=text[\"attention_mask\"].to(\r\n device=device, non_blocking=True\r\n ),\r\n token_type_ids=text[\"token_type_ids\"].to(\r\n device=device, non_blocking=True\r\n ),\r\n )[\"pooler_output\"]\r\n x = self.text_projection(x)\r\n elif self.text_branch_type == \"roberta\":\r\n x = self.text_branch(\r\n input_ids=text[\"input_ids\"].to(device=device, non_blocking=True),\r\n attention_mask=text[\"attention_mask\"].to(\r\n device=device, non_blocking=True\r\n ),\r\n )[\"pooler_output\"]\r\n x = self.text_projection(x)\r\n elif self.text_branch_type == \"bart\":\r\n x = torch.mean(self.text_branch(\r\n input_ids=text[\"input_ids\"].to(device=device, non_blocking=True),\r\n attention_mask=text[\"attention_mask\"].to(\r\n device=device, non_blocking=True\r\n ),\r\n )[\"encoder_last_hidden_state\"], axis=1)\r\n x = self.text_projection(x)\r\n else:\r\n logging.error(f\"Model type {self.text_branch_type} not found\")\r\n raise RuntimeError(f\"Model type {self.text_branch_type} not found.\")\r\n return x\r\n\r\n def encode_text_sent(self, texts, device):\r\n if self.text_branch_type == \"bert\":\r\n if self.subword2word:\r\n xs = self.text_branch(\r\n input_ids=text[\"input_ids\"].to(\r\n device=device, non_blocking=True\r\n ),\r\n attention_mask=text[\"attention_mask\"].to(\r\n device=device, non_blocking=True\r\n ),\r\n token_type_ids=text[\"token_type_ids\"].to(\r\n device=device, non_blocking=True\r\n ),\r\n )[\"last_hidden_state\"]\r\n # import pdb; pdb.set_trace()\r\n # select the subwords from the sequence, pad to certain length and truncate if necessary\r\n new_xs = []\r\n for x, start_end in zip(xs, text[\"token_indices\"]):\r\n x = x[int(start_end[0]):int(start_end[1]), :]\r\n if x.shape[0] > self.text_cfg.max_num_subword:\r\n if self.data_truncating == \"front_trunc\":\r\n x = x[:self.text_cfg.max_num_subword, :]\r\n elif self.data_truncating == \"back_trunc\":\r\n x = x[-self.text_cfg.max_num_subword:, :]\r\n elif self.data_truncating == \"cent_trunc\":\r\n x = x[int(0.5 * (x.shape[0] - self.text_cfg.max_num_subword)): int(\r\n 0.5 * (x.shape[0] + self.text_cfg.max_num_subword)), :]\r\n else:\r\n raise NotImplementedError\r\n new_xs.append(x)\r\n if self.data_filling == \"pad\":\r\n new_xs.append(torch.ones((self.text_cfg.max_num_subword, new_xs[-1].shape[1]), dtype=float))\r\n new_xs = pad_sequence(new_xs, batch_first=True, padding_value=0.)[:-1, :, :]\r\n else:\r\n raise NotImplementedError\r\n x = self.subword2word(new_xs)\r\n x = self.text_projection(x)\r\n else:\r\n raise NotImplementedError\r\n else:\r\n logging.error(f\"Model type {self.text_branch_type} not found\")\r\n raise RuntimeError(f\"Model type {self.text_branch_type} not found.\")\r\n return x\r\n\r\n def forward(self, audio, text, device=None):\r\n \"\"\"Forward audio and text into the CLAP\r\n\r\n Args:\r\n audio (torch.Tensor): (batch_size, audio_length) the time-domain audio input / the batch of \r\n mel_spec and longer list.\r\n text (torch.Tensor): the text token input\r\n device (str, optional): device. Defaults to None.\r\n\r\n Returns:\r\n audio_features (torch.Tensor): (batch_size, audio_feature_dim)\r\n text_features (torch.Tensor): (batch_size, text_feature_dim)\r\n audio_features_mlp (torch.Tensor): (batch_size, audio_feature_dim)\r\n text_features_mlp (torch.Tensor): (batch_size, text_feature_dim)\r\n \"\"\"\r\n if device is None:\r\n if audio is not None:\r\n device = audio.device\r\n elif text is not None:\r\n device = text.device\r\n if audio is None and text is None:\r\n # a hack to get the logit scale\r\n return self.logit_scale_a.exp(), self.logit_scale_t.exp()\r\n elif audio is None:\r\n return self.encode_text(text, device=device)\r\n elif text is None:\r\n return self.encode_audio(audio, device=device)\r\n\r\n audio_features = self.encode_audio(audio, device=device)\r\n audio_features = F.normalize(audio_features, dim=-1)\r\n text_features = self.encode_text(text, device=device)\r\n text_features = F.normalize(text_features, dim=-1)\r\n\r\n audio_features_mlp = self.audio_transform(audio_features)\r\n text_features_mlp = self.text_transform(text_features)\r\n # Four outputs: audio features (basic & MLP), text features (basic & MLP)\r\n return (\r\n audio_features,\r\n text_features,\r\n audio_features_mlp,\r\n text_features_mlp,\r\n self.logit_scale_a.exp(),\r\n self.logit_scale_t.exp(),\r\n )\r\n\r\n def forward_sent(self, audios, texts, device=None):\r\n \"\"\"Forward audio and text into the CLAP\r\n\r\n Args:\r\n audios (torch.Tensor): (batch_size, audio_length) the time-domain audio input / the batch of mel_spec and longer list.\r\n texts (torch.Tensor): the text token input\r\n device (str, optional): device. Defaults to None.\r\n \"\"\"\r\n if device is None:\r\n if audios is not None:\r\n device = audios.device\r\n elif texts is not None:\r\n device = texts.device\r\n if audios is None and texts is None:\r\n # a hack to get the logit scale\r\n return self.logit_scale_a.exp(), self.logit_scale_t.exp()\r\n elif audios is None:\r\n return self.encode_text(texts, device=device)\r\n elif texts is None:\r\n return self.encode_audio(audios, device=device)\r\n\r\n audio_features = self.encode_audio(audios, device=device)\r\n audio_features = F.normalize(audio_features, dim=-1)\r\n text_features = self.encode_text(texts, device=device)\r\n text_features = F.normalize(text_features, dim=-1)\r\n\r\n audio_features_mlp = self.audio_transform(audio_features)\r\n text_features_mlp = self.text_transform(text_features)\r\n # Four outputs: audio features (basic & MLP), text features (basic & MLP)\r\n return (\r\n audio_features,\r\n text_features,\r\n audio_features_mlp,\r\n text_features_mlp,\r\n self.logit_scale_a.exp(),\r\n self.logit_scale_t.exp(),\r\n )\r\n\r\n def get_logit_scale(self):\r\n return self.logit_scale_a.exp(), self.logit_scale_t.exp()\r\n\r\n def get_text_embedding(self, data):\r\n \"\"\"Get the text embedding from the model\r\n\r\n Args:\r\n data (torch.Tensor): a tensor of text embedding\r\n\r\n Returns:\r\n text_embed (torch.Tensor): a tensor of text_embeds (N, D)\r\n \"\"\"\r\n device = next(self.parameters()).device\r\n for k in data:\r\n data[k] = data[k].to(device)\r\n text_embeds = self.encode_text(data, device=device)\r\n text_embeds = F.normalize(text_embeds, dim=-1)\r\n\r\n return text_embeds\r\n\r\n def get_audio_embedding(self, data):\r\n \"\"\"Get the audio embedding from the model\r\n\r\n Args:\r\n data (list): a list of dict the audio input dict list from 'get_audio_feature' method\r\n\r\n Returns:\r\n audio_embed (torch.Tensor): a tensor of audio_embeds (N, D)\r\n \"\"\"\r\n device = next(self.parameters()).device\r\n input_dict = {}\r\n keys = data[0].keys()\r\n for k in keys:\r\n input_dict[k] = torch.cat([d[k].unsqueeze(0) for d in data], dim=0).to(device)\r\n audio_embeds = self.encode_audio(input_dict, device=device)[\"embedding\"]\r\n audio_embeds = self.audio_projection(audio_embeds)\r\n audio_embeds = F.normalize(audio_embeds, dim=-1)\r\n return audio_embeds\r\n\r\n def audio_infer(self, audio, hopsize=None, device=None):\r\n \"\"\"Forward one audio and produce the audio embedding\r\n\r\n Args:\r\n audio (audio_length): the time-domain audio input, notice that it must be only one input\r\n hopsize (int, optional): the overlap hopsize as the sliding window. Defaults to None.\r\n device (str, optional): device. Defaults to None.\r\n\r\n Returns:\r\n output_dict ({\r\n key: [n, (embedding_shape)] if \"HTS-AT\"\r\n or\r\n key: [(embedding_shape)] if \"PANN\"\r\n }): the list of key values of the audio branch\r\n \"\"\"\r\n\r\n assert not self.training, \"the inference mode must be run at eval stage\"\r\n output_dict = {}\r\n # PANN\r\n if self.audio_cfg.model_type == \"PANN\":\r\n audio_input = audio.unsqueeze(dim=0)\r\n output_dict[key] = self.encode_audio(audio_input, device=device)[key].squeeze(dim=0)\r\n elif self.audio_cfg.model_type == \"HTSAT\":\r\n # repeat\r\n audio_len = len(audio)\r\n k = self.audio_cfg.clip_samples // audio_len\r\n if k > 1:\r\n audio = audio.repeat(k)\r\n audio_len = len(audio)\r\n\r\n if hopsize is None:\r\n hopsize = min(hopsize, audio_len)\r\n\r\n if audio_len > self.audio_cfg.clip_samples:\r\n audio_input = [\r\n audio[pos: pos + self.audio_cfg.clip_samples].clone()\r\n for pos in range(\r\n 0, audio_len - self.audio_cfg.clip_samples, hopsize\r\n )\r\n ]\r\n audio_input.append(audio[-self.audio_cfg.clip_samples:].clone())\r\n audio_input = torch.stack(audio_input)\r\n output_dict[key] = self.encode_audio(audio_input, device=device)[key]\r\n else:\r\n audio_input = audio.unsqueeze(dim=0)\r\n output_dict[key] = self.encode_audio(audio_input, device=device)[key].squeeze(dim=0)\r\n\r\n return output_dict\r"
},
{
"identifier": "convert_weights_to_fp16",
"path": "src/clap_module/model.py",
"snippet": "def convert_weights_to_fp16(model: nn.Module):\r\n \"\"\"Convert applicable model parameters to fp16\r\n \"\"\"\r\n\r\n def _convert_weights_to_fp16(l):\r\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):\r\n l.weight.data = l.weight.data.half()\r\n if l.bias is not None:\r\n l.bias.data = l.bias.data.half()\r\n\r\n if isinstance(l, nn.MultiheadAttention):\r\n for attr in [\r\n *[f\"{s}_proj_weight\" for s in [\"in\", \"q\", \"k\", \"v\"]],\r\n \"in_proj_bias\",\r\n \"bias_k\",\r\n \"bias_v\",\r\n ]:\r\n tensor = getattr(l, attr)\r\n if tensor is not None:\r\n tensor.data = tensor.data.half()\r\n\r\n for name in [\"text_projection\", \"proj\"]:\r\n if hasattr(l, name):\r\n attr = getattr(l, name)\r\n if attr is not None:\r\n attr.data = attr.data.half()\r\n\r\n model.apply(_convert_weights_to_fp16)\r"
}
] | import json
import logging
import os
import re
import torch
from copy import deepcopy
from pathlib import Path
from .model import CLAP, convert_weights_to_fp16
| 7,879 | checkpoint_path (str): checkpoint path
map_location (str, optional): a function, :class:`torch.device`, string or a dict specifying how to
remap storage locations. Defaults to "cpu".
skip_params (bool, optional): Remove the module from the key field. Defaults to True.
Returns:
state_dict (dict): model state dict
"""
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
if skip_params:
if next(iter(state_dict.items()))[0].startswith("module"):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def create_model(
args,
model_name: str,
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
):
"""Create a CLAP model from a model config.
Args:
args (argparse.Namespace): Command-line arguments.
model_name (str): model name
precision (str, optional): Model parameter accuracy. Defaults to "fp32".
device (torch.device, optional): device. Defaults to torch.device("cpu").
jit (bool, optional): torch.jit.script operations. Defaults to False.
Returns:
model (nn.Module): CLAP model
model_cfg (dict): model config
"""
if model_name in _MODEL_CONFIGS:
logging.info(f"Loading {model_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[model_name])
else:
logging.error(
f"Model config for {model_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {model_name} not found.")
model = CLAP(args, **model_cfg)
# load pretrained CLAP model
if args.pretrained:
pretrained_clap = torch.load(args.pretrained, map_location='cpu')
model.load_state_dict(pretrained_clap["state_dict"], strict=False)
logging.info(f"Loaded pretrained CLAP model weights !!!")
else:
# load pretrained audio encoder
pretrained_audio = model_cfg["audio_cfg"]["pretrained_audio"]
amodel_type = model_cfg["audio_cfg"]["model_type"]
if pretrained_audio:
if amodel_type.startswith('PANN'):
if 'Cnn14_mAP' in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['model']
keys = list(audio_ckpt.keys())
for key in keys:
if 'spectrogram_extractor' not in key and 'logmel_extractor' not in key:
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key] = v
# checkpoint trained via HTSAT codebase
elif os.path.basename(pretrained_audio).startswith('PANN'):
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model'):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith(
'finetuned'): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
else:
raise ValueError('Unknown audio checkpoint')
elif amodel_type.startswith('HTSAT'):
if 'HTSAT_AudioSet_Saved' in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model') and ('spectrogram_extractor' not in key
and 'logmel_extractor' not in key):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
# checkpoint trained via HTSAT codebase
elif os.path.basename(pretrained_audio).startswith('HTSAT'):
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model'):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith(
'finetuned'): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
else:
raise ValueError('Unknown audio checkpoint')
else:
raise f'this audio encoder pretrained checkpoint is not support'
model.load_state_dict(audio_ckpt, strict=False)
logging.info(f"Loading pretrained {amodel_type} weights ({pretrained_audio}).")
param_names = [n for n, p in model.named_parameters()]
for n in param_names:
print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded")
model.to(device=device)
if precision == "fp16":
assert device.type != "cpu"
|
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
def _rescan_model_configs():
"""Rescan model config directory for new configs.
"""
global _MODEL_CONFIGS
config_ext = (".json",)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f"*{ext}"))
for cf in config_files:
with open(cf, "r") as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ("joint_embed_shape", "audio_cfg", "text_cfg")):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {
k: v
for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
}
_rescan_model_configs() # initial populate of model config registry
def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
"""Load a checkpoint from a file.
Args:
checkpoint_path (str): checkpoint path
map_location (str, optional): a function, :class:`torch.device`, string or a dict specifying how to
remap storage locations. Defaults to "cpu".
skip_params (bool, optional): Remove the module from the key field. Defaults to True.
Returns:
state_dict (dict): model state dict
"""
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
if skip_params:
if next(iter(state_dict.items()))[0].startswith("module"):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def create_model(
args,
model_name: str,
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
):
"""Create a CLAP model from a model config.
Args:
args (argparse.Namespace): Command-line arguments.
model_name (str): model name
precision (str, optional): Model parameter accuracy. Defaults to "fp32".
device (torch.device, optional): device. Defaults to torch.device("cpu").
jit (bool, optional): torch.jit.script operations. Defaults to False.
Returns:
model (nn.Module): CLAP model
model_cfg (dict): model config
"""
if model_name in _MODEL_CONFIGS:
logging.info(f"Loading {model_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[model_name])
else:
logging.error(
f"Model config for {model_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {model_name} not found.")
model = CLAP(args, **model_cfg)
# load pretrained CLAP model
if args.pretrained:
pretrained_clap = torch.load(args.pretrained, map_location='cpu')
model.load_state_dict(pretrained_clap["state_dict"], strict=False)
logging.info(f"Loaded pretrained CLAP model weights !!!")
else:
# load pretrained audio encoder
pretrained_audio = model_cfg["audio_cfg"]["pretrained_audio"]
amodel_type = model_cfg["audio_cfg"]["model_type"]
if pretrained_audio:
if amodel_type.startswith('PANN'):
if 'Cnn14_mAP' in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['model']
keys = list(audio_ckpt.keys())
for key in keys:
if 'spectrogram_extractor' not in key and 'logmel_extractor' not in key:
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key] = v
# checkpoint trained via HTSAT codebase
elif os.path.basename(pretrained_audio).startswith('PANN'):
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model'):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith(
'finetuned'): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
else:
raise ValueError('Unknown audio checkpoint')
elif amodel_type.startswith('HTSAT'):
if 'HTSAT_AudioSet_Saved' in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model') and ('spectrogram_extractor' not in key
and 'logmel_extractor' not in key):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
# checkpoint trained via HTSAT codebase
elif os.path.basename(pretrained_audio).startswith('HTSAT'):
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model'):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith(
'finetuned'): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
else:
raise ValueError('Unknown audio checkpoint')
else:
raise f'this audio encoder pretrained checkpoint is not support'
model.load_state_dict(audio_ckpt, strict=False)
logging.info(f"Loading pretrained {amodel_type} weights ({pretrained_audio}).")
param_names = [n for n, p in model.named_parameters()]
for n in param_names:
print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded")
model.to(device=device)
if precision == "fp16":
assert device.type != "cpu"
| convert_weights_to_fp16(model)
| 1 | 2023-11-25 02:38:32+00:00 | 12k |
ubc-vision/StableKeypoints | unsupervised_keypoints/main.py | [
{
"identifier": "load_ldm",
"path": "unsupervised_keypoints/optimize_token.py",
"snippet": "def load_ldm(device, type=\"CompVis/stable-diffusion-v1-4\", feature_upsample_res=256):\n scheduler = DDIMScheduler(\n beta_start=0.00085,\n beta_end=0.012,\n beta_schedule=\"scaled_linear\",\n clip_sample=False,\n set_alpha_to_one=False,\n )\n\n MY_TOKEN = \"\"\n NUM_DDIM_STEPS = 50\n scheduler.set_timesteps(NUM_DDIM_STEPS)\n\n\n ldm = StableDiffusionPipeline.from_pretrained(\n type, use_auth_token=MY_TOKEN, scheduler=scheduler\n ).to(device)\n \n if device != \"cpu\":\n ldm.unet = nn.DataParallel(ldm.unet)\n ldm.vae = nn.DataParallel(ldm.vae)\n \n controllers = {}\n for device_id in ldm.unet.device_ids:\n device = torch.device(\"cuda\", device_id)\n controller = ptp_utils.AttentionStore()\n controllers[device] = controller\n else:\n controllers = {}\n _device = torch.device(\"cpu\")\n controller = ptp_utils.AttentionStore()\n controllers[_device] = controller\n\n # patched_devices = set()\n\n def hook_fn(module, input):\n _device = input[0].device\n # if device not in patched_devices:\n ptp_utils.register_attention_control(module, controllers[_device], feature_upsample_res=feature_upsample_res)\n # patched_devices.add(device)\n\n if device != \"cpu\":\n ldm.unet.module.register_forward_pre_hook(hook_fn)\n else:\n ldm.unet.register_forward_pre_hook(hook_fn)\n \n num_gpus = torch.cuda.device_count()\n\n for param in ldm.vae.parameters():\n param.requires_grad = False\n for param in ldm.text_encoder.parameters():\n param.requires_grad = False\n for param in ldm.unet.parameters():\n param.requires_grad = False\n\n return ldm, controllers, num_gpus"
},
{
"identifier": "optimize_embedding",
"path": "unsupervised_keypoints/optimize.py",
"snippet": "def optimize_embedding(\n ldm,\n top_k_strategy=\"entropy\",\n wandb_log=True,\n context=None,\n device=\"cuda\",\n num_steps=2000,\n from_where=[\"down_cross\", \"mid_cross\", \"up_cross\"],\n upsample_res=256,\n layers=[0, 1, 2, 3, 4, 5],\n lr=5e-3,\n noise_level=-1,\n num_tokens=1000,\n top_k=10,\n augment_degrees=30,\n augment_scale=(0.9, 1.1),\n augment_translate=(0.1, 0.1),\n dataset_loc=\"~\",\n sigma=1.0,\n sharpening_loss_weight=100,\n equivariance_attn_loss_weight=100,\n batch_size=4,\n num_gpus=1,\n dataset_name = \"celeba_aligned\",\n max_len=-1,\n min_dist=0.05,\n furthest_point_num_samples=50,\n controllers=None,\n validation = False,\n num_subjects=1,\n):\n \n if dataset_name == \"celeba_aligned\":\n dataset = CelebA(split=\"train\", dataset_loc=dataset_loc, max_len=max_len)\n elif dataset_name == \"celeba_wild\":\n dataset = CelebA(split=\"train\", dataset_loc=dataset_loc, align = False, max_len=max_len)\n elif dataset_name == \"cub_aligned\":\n dataset = cub.TrainSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"cub_001\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"train\", single_class=1)\n elif dataset_name == \"cub_002\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"train\", single_class=2)\n elif dataset_name == \"cub_003\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"train\", single_class=3)\n elif dataset_name == \"cub_all\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"train\")\n elif dataset_name == \"taichi\":\n dataset = taichi.TrainSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"human3.6m\":\n dataset = human36m.TrainSet(data_root=dataset_loc, validation=validation)\n elif dataset_name == \"unaligned_human3.6m\":\n dataset = unaligned_human36m.TrainSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"deepfashion\":\n dataset = deepfashion.TrainSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"custom\":\n dataset = custom_images.CustomDataset(data_root=dataset_loc, image_size=512)\n else:\n raise NotImplementedError\n\n\n invertible_transform = RandomAffineWithInverse(\n degrees=augment_degrees,\n scale=augment_scale,\n translate=augment_translate,\n )\n\n # every iteration return image, pixel_loc\n\n if context is None:\n context = ptp_utils.init_random_noise(device, num_words=num_tokens)\n\n context.requires_grad = True\n\n # optimize context to maximize attention at pixel_loc\n optimizer = torch.optim.Adam([context], lr=lr)\n\n # time the optimization\n import time\n\n start = time.time()\n it_start = time.time()\n\n running_equivariance_attn_loss = 0\n running_sharpening_loss = 0\n running_total_loss = 0\n \n # create dataloader for the dataset\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=num_gpus, shuffle=True, drop_last=True)\n\n dataloader_iter = iter(dataloader)\n \n # import ipdb; ipdb.set_trace() \n \n for iteration in tqdm(range(int(num_steps*(batch_size//num_gpus)))):\n \n try:\n mini_batch = next(dataloader_iter)\n except StopIteration: # Explicitly catch StopIteration\n dataloader_iter = iter(dataloader)\n mini_batch = next(dataloader_iter)\n\n image = mini_batch[\"img\"]\n\n attn_maps = ptp_utils.run_and_find_attn(\n ldm,\n image,\n context,\n layers=layers,\n noise_level=noise_level,\n from_where=from_where,\n upsample_res=-1,\n device=device,\n controllers=controllers,\n )\n \n # import ipdb; ipdb.set_trace()\n\n transformed_img = invertible_transform(image)\n\n attention_maps_transformed = ptp_utils.run_and_find_attn(\n ldm,\n transformed_img,\n context,\n layers=layers,\n noise_level=noise_level,\n from_where=from_where,\n upsample_res=-1,\n device=device,\n controllers=controllers,\n )\n \n _sharpening_loss = []\n _loss_equivariance_attn = []\n \n for index, attn_map, attention_map_transformed in zip(torch.arange(num_gpus), attn_maps, attention_maps_transformed):\n\n if top_k_strategy == \"entropy\":\n top_embedding_indices = ptp_utils.entropy_sort(\n attn_map, furthest_point_num_samples,\n )\n elif top_k_strategy == \"gaussian\":\n top_embedding_indices = ptp_utils.find_top_k_gaussian(\n attn_map, furthest_point_num_samples, sigma=sigma, num_subjects = num_subjects\n )\n elif top_k_strategy == \"consistent\":\n top_embedding_indices = torch.arange(furthest_point_num_samples)\n else:\n raise NotImplementedError\n \n top_embedding_indices = ptp_utils.furthest_point_sampling(attention_map_transformed, top_k, top_embedding_indices)\n\n _sharpening_loss.append(sharpening_loss(attn_map[top_embedding_indices], device=device, sigma=sigma, num_subjects = num_subjects))\n\n _loss_equivariance_attn.append(equivariance_loss(\n attn_map[top_embedding_indices], attention_map_transformed[top_embedding_indices][None].repeat(num_gpus, 1, 1, 1), invertible_transform, index\n ))\n \n\n\n _sharpening_loss = torch.stack([loss.to('cuda:0') for loss in _sharpening_loss]).mean()\n _loss_equivariance_attn = torch.stack([loss.to('cuda:0') for loss in _loss_equivariance_attn]).mean()\n \n\n # use the old loss for the first 1000 iterations\n # new loss is unstable for early iterations\n loss = (\n + _loss_equivariance_attn * equivariance_attn_loss_weight\n + _sharpening_loss * sharpening_loss_weight\n )\n\n running_equivariance_attn_loss += _loss_equivariance_attn / (batch_size//num_gpus) * equivariance_attn_loss_weight\n running_sharpening_loss += _sharpening_loss / (batch_size//num_gpus) * sharpening_loss_weight\n running_total_loss += loss / (batch_size//num_gpus)\n\n loss = loss / (batch_size//num_gpus)\n\n loss.backward()\n if (iteration + 1) % (batch_size//num_gpus) == 0:\n optimizer.step()\n optimizer.zero_grad()\n\n if wandb_log:\n wandb.log(\n {\n \"loss\": running_total_loss.item(),\n \"running_equivariance_attn_loss\": running_equivariance_attn_loss.item(),\n \"running_sharpening_loss\": running_sharpening_loss.item(),\n \"iteration time\": time.time() - it_start,\n }\n )\n else:\n print(\n f\"loss: {loss.item()}, \\\n _loss_equivariance_attn: {running_equivariance_attn_loss.item()} \\\n sharpening_loss: {running_sharpening_loss.item()}, \\\n running_total_loss: {running_total_loss.item()}, \\\n iteration time: {time.time() - it_start}\"\n )\n running_equivariance_attn_loss = 0\n running_sharpening_loss = 0\n running_total_loss = 0\n \n it_start = time.time()\n\n print(f\"optimization took {time.time() - start} seconds\")\n\n return context.detach()"
},
{
"identifier": "find_best_indices",
"path": "unsupervised_keypoints/keypoint_regressor.py",
"snippet": "@torch.no_grad()\ndef find_best_indices(\n ldm,\n context,\n num_steps=100,\n device=\"cuda\",\n noise_level=-1,\n upsample_res=256,\n layers=[0, 1, 2, 3, 4, 5],\n from_where=[\"down_cross\", \"mid_cross\", \"up_cross\"],\n num_tokens=1000,\n top_k=30,\n dataset_loc=\"~\",\n dataset_name = \"celeba_aligned\",\n min_dist = 0.05,\n furthest_point_num_samples=50,\n controllers=None,\n num_gpus=1,\n top_k_strategy = \"entropy\",\n sigma = 3,\n validation = False,\n num_subjects=1,\n):\n if dataset_name == \"celeba_aligned\":\n dataset = CelebA(split=\"train\", dataset_loc=dataset_loc)\n elif dataset_name == \"celeba_wild\":\n dataset = CelebA(split=\"train\", dataset_loc=dataset_loc, align = False)\n elif dataset_name == \"cub_aligned\":\n dataset = cub.TrainSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"cub_001\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"train\", single_class=1)\n elif dataset_name == \"cub_002\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"train\", single_class=2)\n elif dataset_name == \"cub_003\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"train\", single_class=3)\n elif dataset_name == \"cub_all\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"train\")\n elif dataset_name == \"taichi\":\n dataset = taichi.TrainSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"human3.6m\":\n dataset = human36m.TrainSet(data_root=dataset_loc, validation=validation)\n elif dataset_name == \"unaligned_human3.6m\":\n dataset = unaligned_human36m.TrainSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"deepfashion\":\n dataset = deepfashion.TrainSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"custom\":\n dataset = custom_images.CustomDataset(data_root=dataset_loc, image_size=512)\n else:\n raise NotImplementedError\n\n maps = []\n indices_list = []\n\n # create dataloader for the dataset\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=num_gpus, shuffle=True, drop_last=True)\n\n dataloader_iter = iter(dataloader)\n\n for _ in tqdm(range(num_steps//num_gpus)):\n\n try:\n mini_batch = next(dataloader_iter)\n except StopIteration: # Explicitly catch StopIteration\n dataloader_iter = iter(dataloader)\n mini_batch = next(dataloader_iter)\n\n image = mini_batch[\"img\"]\n\n attention_maps = ptp_utils.run_and_find_attn(\n ldm,\n image,\n context,\n layers=layers,\n noise_level=noise_level,\n from_where=from_where,\n upsample_res=upsample_res,\n controllers=controllers,\n )\n \n for attention_map in attention_maps:\n \n if top_k_strategy == \"entropy\":\n top_initial_candidates = ptp_utils.entropy_sort(\n attention_map, furthest_point_num_samples, \n )\n elif top_k_strategy == \"gaussian\":\n top_initial_candidates = ptp_utils.find_top_k_gaussian(\n attention_map, furthest_point_num_samples, sigma=sigma, num_subjects = num_subjects\n )\n elif top_k_strategy == \"consistent\":\n top_initial_candidates = torch.arange(furthest_point_num_samples)\n else:\n raise NotImplementedError\n \n top_embedding_indices = ptp_utils.furthest_point_sampling(attention_map, top_k, top_initial_candidates)\n \n indices_list.append(top_embedding_indices.cpu())\n \n # find the top_k most common indices\n indices_list = torch.cat([index for index in indices_list])\n # indices_list = indices_list.reshape(-1)\n indices, counts = torch.unique(indices_list, return_counts=True)\n indices = indices[counts.argsort(descending=True)]\n indices = indices[:top_k]\n\n return indices"
},
{
"identifier": "precompute_all_keypoints",
"path": "unsupervised_keypoints/keypoint_regressor.py",
"snippet": "@torch.no_grad()\ndef precompute_all_keypoints(\n ldm,\n context,\n top_indices,\n device=\"cuda\",\n noise_level=-1,\n layers=[0, 1, 2, 3, 4, 5],\n from_where=[\"down_cross\", \"mid_cross\", \"up_cross\"],\n augment_degrees=30,\n augment_scale=(0.9, 1.1),\n augment_translate=(0.1, 0.1),\n augmentation_iterations=20,\n dataset_loc=\"~\",\n visualize=False,\n dataset_name = \"celeba_aligned\",\n controllers=None,\n num_gpus=1,\n max_num_points = 50_000,\n max_loc_strategy=\"argmax\",\n save_folder=\"outputs\",\n validation = False,\n):\n if dataset_name == \"celeba_aligned\":\n dataset = CelebA(split=\"train\", dataset_loc=dataset_loc)\n elif dataset_name == \"celeba_wild\":\n dataset = CelebA(split=\"train\", dataset_loc=dataset_loc, align = False)\n elif dataset_name == \"cub_aligned\":\n dataset = cub.TrainRegSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"cub_001\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"train\", single_class=1)\n elif dataset_name == \"cub_002\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"train\", single_class=2)\n elif dataset_name == \"cub_003\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"train\", single_class=3)\n elif dataset_name == \"cub_all\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"train\")\n elif dataset_name == \"taichi\":\n dataset = taichi.TrainRegSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"human3.6m\":\n dataset = human36m.TrainRegSet(data_root=dataset_loc, validation=validation)\n elif dataset_name == \"unaligned_human3.6m\":\n dataset = unaligned_human36m.TrainRegSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"deepfashion\":\n dataset = deepfashion.TrainRegSet(data_root=dataset_loc, image_size=512)\n else:\n raise NotImplementedError\n\n source_keypoints = []\n target_keypoints = []\n visibility = []\n\n # create dataloader for the dataset\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, drop_last=True)\n\n dataloader_iter = iter(dataloader)\n\n for _ in tqdm(range(min(len(dataset), max_num_points))):\n\n mini_batch = next(dataloader_iter)\n\n\n image = mini_batch[\"img\"][0]\n kpts = mini_batch[\"kpts\"][0]\n \n \n target_keypoints.append(kpts)\n \n \n if \"visibility\" in mini_batch:\n visibility.append(mini_batch[\"visibility\"][0])\n\n # if image is a torch.tensor, convert to numpy\n if type(image) == torch.Tensor:\n image = image.permute(1, 2, 0).detach().cpu().numpy()\n\n attention_maps = run_image_with_context_augmented(\n ldm,\n image,\n context,\n top_indices,\n device=device,\n from_where=from_where,\n layers=layers,\n noise_level=noise_level,\n augmentation_iterations=augmentation_iterations,\n augment_degrees=augment_degrees,\n augment_scale=augment_scale,\n augment_translate=augment_translate,\n controllers=controllers,\n save_folder=save_folder,\n num_gpus=num_gpus,\n )\n if max_loc_strategy == \"argmax\":\n highest_indices = find_max_pixel(attention_maps) / 512.0\n else:\n highest_indices = pixel_from_weighted_avg(attention_maps) / 512.0\n\n source_keypoints.append(highest_indices)\n\n return torch.stack(source_keypoints), torch.stack(target_keypoints), torch.stack(visibility) if len(visibility) > 0 else None"
},
{
"identifier": "return_regressor",
"path": "unsupervised_keypoints/keypoint_regressor.py",
"snippet": "def return_regressor(X, Y):\n import numpy as np\n \n # find mean of X\n X = X - 0.5\n Y = Y - 0.5\n\n # # W = np.linalg.inv(X.T @ X) @ X.T @ Y\n W = np.linalg.pinv(X.T @ X) @ X.T @ Y\n\n return W"
},
{
"identifier": "return_regressor_visible",
"path": "unsupervised_keypoints/keypoint_regressor.py",
"snippet": "def return_regressor_visible(X, Y, visible):\n import numpy as np\n \n # find mean of X\n X = X - 0.5\n Y = Y - 0.5\n\n # Initialize W to have the same number of columns as keypoints\n W = np.zeros((X.shape[1], Y.shape[1]))\n\n # Iterate through each keypoint\n for j in range(Y.shape[1]):\n # Indices where this keypoint is visible\n visible_indices = np.where(visible[:, j] == 1)[0]\n \n # Filter X and Y matrices based on visibility of this keypoint\n X_filtered = X[visible_indices, :]\n Y_filtered = Y[visible_indices, j]\n\n # Solve for the weights related to this keypoint\n W_j = np.linalg.pinv(X_filtered.T @ X_filtered) @ X_filtered.T @ Y_filtered\n \n # Store these weights in the W matrix\n W[:, j] = W_j\n\n return W"
},
{
"identifier": "return_regressor_human36m",
"path": "unsupervised_keypoints/keypoint_regressor.py",
"snippet": "def return_regressor_human36m(X, Y):\n \n from unsupervised_keypoints.eval import swap_points\n \n import numpy as np\n \n X = torch.tensor(X)-0.5\n Y = torch.tensor(Y)-0.5\n \n XTXXT = (X.T @ X).inverse() @ X.T\n \n while True:\n W = XTXXT @ Y\n pred_y = X @ W\n \n pred_y = torch.tensor(pred_y)\n\n dist = (pred_y - Y).reshape(X.shape[0], -1, 2).norm(dim=2).mean(dim=1)\n\n swaped_y = swap_points(Y.reshape(Y.shape[0], -1, 2)).reshape(Y.shape[0], -1)\n swaped_dist = (pred_y - swaped_y).reshape(X.shape[0], -1, 2).norm(dim=2).mean(dim=1)\n\n should_swap = dist > swaped_dist\n\n if should_swap.sum() > 10:\n print(\"should swap sum, \", should_swap.sum())\n Y[should_swap] = swaped_y[should_swap]\n else:\n break\n \n\n return W.numpy()"
},
{
"identifier": "evaluate",
"path": "unsupervised_keypoints/eval.py",
"snippet": "@torch.no_grad()\ndef evaluate(\n ldm,\n context,\n indices,\n regressor,\n device=\"cuda\",\n from_where=[\"down_cross\", \"mid_cross\", \"up_cross\"],\n upsample_res=32,\n layers=[0, 1, 2, 3, 4, 5],\n noise_level=-1,\n num_tokens=1000,\n augment_degrees=30,\n augment_scale=(0.9, 1.1),\n augment_translate=(0.1, 0.1),\n augmentation_iterations=20,\n dataset_loc=\"~\",\n save_folder=\"outputs\",\n wandb_log=False,\n visualize=False,\n dataset_name = \"celeba_aligned\",\n evaluation_method=\"inter_eye_distance\",\n controllers=None,\n num_gpus=1,\n max_loc_strategy = \"argmax\",\n validation = False,\n):\n if dataset_name == \"celeba_aligned\":\n dataset = CelebA(split=\"test\", dataset_loc=dataset_loc)\n elif dataset_name == \"celeba_wild\":\n dataset = CelebA(split=\"test\", dataset_loc=dataset_loc, align = False)\n elif dataset_name == \"cub_aligned\":\n dataset = cub.TestSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"cub_001\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"test\", single_class=1)\n elif dataset_name == \"cub_002\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"test\", single_class=2)\n elif dataset_name == \"cub_003\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"test\", single_class=3)\n elif dataset_name == \"cub_all\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"test\")\n elif dataset_name == \"taichi\":\n dataset = taichi.TestSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"human3.6m\":\n dataset = human36m.TestSet(data_root=dataset_loc, validation=validation)\n elif dataset_name == \"unaligned_human3.6m\":\n dataset = unaligned_human36m.TestSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"deepfashion\":\n dataset = deepfashion.TestSet(data_root=dataset_loc, image_size=512)\n else:\n raise NotImplementedError\n\n distances = []\n\n max_value = 0\n\n all_values = []\n \n \n # create dataloader for the dataset\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, drop_last=True)\n\n dataloader_iter = iter(dataloader)\n\n for i in range(len(dataset)):\n\n batch = next(dataloader_iter)\n\n img = batch[\"img\"][0]\n\n attention_maps = run_image_with_context_augmented(\n ldm,\n img,\n context,\n indices.cpu(),\n device=device,\n from_where=from_where,\n layers=layers,\n noise_level=noise_level,\n augmentation_iterations=augmentation_iterations,\n augment_degrees=augment_degrees,\n augment_scale=augment_scale,\n augment_translate=augment_translate,\n controllers=controllers,\n num_gpus=num_gpus,\n save_folder=save_folder,\n visualize=(i==0),\n )\n \n if max_loc_strategy == \"argmax\":\n highest_indices = find_max_pixel(attention_maps) / 512.0\n else:\n highest_indices = pixel_from_weighted_avg(attention_maps) / 512.0\n\n # estimated_kpts = regressor(highest_indices.view(-1))\n estimated_kpts = ((highest_indices.view(1, -1)-0.5) @ regressor)+0.5\n\n estimated_kpts = estimated_kpts.view(-1, 2)\n\n gt_kpts = batch[\"kpts\"][0].cuda()\n \n if evaluation_method == \"mean_average_error\" or evaluation_method == \"pck\":\n estimated_kpts *= 256\n gt_kpts *= 256\n\n # get l2 distance between estimated and gt kpts\n l2 = (estimated_kpts - gt_kpts).norm(dim=-1)\n \n if evaluation_method == \"inter_eye_distance\":\n\n eye_dist = torch.sqrt(torch.sum((gt_kpts[0] - gt_kpts[1]) ** 2, dim=-1))\n\n l2 = l2 / eye_dist\n \n l2_mean = torch.mean(l2)\n \n if evaluation_method == \"visible\" or evaluation_method == \"mean_average_error\":\n visible = batch['visibility'][0].to(device) if 'visibility' in batch else torch.ones_like(l2)\n \n l2_mean = (l2*visible).sum()\n \n if evaluation_method == \"visible\":\n l2_mean /= visible.sum()\n \n if evaluation_method == \"pck\":\n l2_mean = (l2 < 6).float().mean()\n \n if evaluation_method == \"orientation_invariant\":\n l2_mean = l2.mean()\n swapped_kpts = swap_points(estimated_kpts[None])[0]\n \n swapped_l2_mean = (swapped_kpts - gt_kpts).norm(dim=-1).mean()\n \n if swapped_l2_mean < l2_mean:\n l2_mean = swapped_l2_mean\n \n l2_mean *= 128\n\n\n \n\n all_values.append(l2_mean.item())\n\n if l2_mean > max_value:\n print(f\"new max value: {l2_mean}, {i} \\n\")\n print(i)\n max_value = l2_mean\n\n distances.append(l2_mean.cpu())\n # eye_dists.append(eye_dist.cpu())\n\n print(\n f\"{(i/len(dataset)):06f}: {i} mean distance: {torch.mean(torch.stack(distances))}, per keypoint: {torch.mean(torch.stack(distances), dim=0)}\",\n end=\"\\r\",\n )\n\n if i % 100 == 0:\n print()\n # Extract the 10 worst distances (and their indices) from the priority queue\n\n if wandb_log:\n wandb.log({\"mean_distance\": torch.mean(torch.stack(distances))})\n print()\n\n # save argsorted all_values in torch\n torch.save(torch.tensor(all_values), os.path.join(save_folder, \"all_errors.pt\"))"
},
{
"identifier": "visualize_attn_maps",
"path": "unsupervised_keypoints/visualize.py",
"snippet": "@torch.no_grad()\ndef visualize_attn_maps(\n ldm,\n context,\n indices,\n device=\"cuda\",\n from_where=[\"down_cross\", \"mid_cross\", \"up_cross\"],\n upsample_res=32,\n layers=[0, 1, 2, 3, 4, 5],\n lr=5e-3,\n noise_level=-1,\n num_tokens=1000,\n num_points=30,\n num_images=100,\n regressor=None,\n augment_degrees=30,\n augment_scale=(0.9, 1.1),\n augment_translate=(0.1, 0.1),\n augmentation_iterations=20,\n dataset_loc=\"~\",\n save_folder=\"outputs\",\n visualize=False,\n dataset_name = \"celeba_aligned\",\n controllers=None,\n num_gpus=1,\n max_loc_strategy=\"argmax\",\n height = 11,\n width = 9,\n validation = False,\n):\n if dataset_name == \"celeba_aligned\":\n dataset = CelebA(split=\"test\", dataset_loc=dataset_loc)\n elif dataset_name == \"celeba_wild\":\n dataset = CelebA(split=\"test\", dataset_loc=dataset_loc, align = False)\n elif dataset_name == \"cub_aligned\":\n dataset = cub.TestSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"cub_001\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"test\", single_class=1)\n elif dataset_name == \"cub_002\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"test\", single_class=2)\n elif dataset_name == \"cub_003\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"test\", single_class=3)\n elif dataset_name == \"cub_all\":\n dataset = cub_parts.CUBDataset(dataset_root=dataset_loc, split=\"test\")\n elif dataset_name == \"taichi\":\n dataset = taichi.TestSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"human3.6m\":\n dataset = human36m.TestSet(data_root=dataset_loc, validation=validation)\n elif dataset_name == \"unaligned_human3.6m\":\n dataset = unaligned_human36m.TestSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"deepfashion\":\n dataset = deepfashion.TestSet(data_root=dataset_loc, image_size=512)\n elif dataset_name == \"custom\":\n dataset = custom_images.CustomDataset(data_root=dataset_loc, image_size=512)\n else:\n raise NotImplementedError\n\n imgs = []\n maps = []\n gt_kpts = []\n \n # random permute the dataset\n randperm = torch.randperm(len(dataset))\n \n for i in tqdm(range(height * width)):\n batch = dataset[randperm[i%len(dataset)].item()]\n\n img = batch[\"img\"]\n\n _gt_kpts = batch[\"kpts\"] \n gt_kpts.append(_gt_kpts)\n imgs.append(img.cpu())\n\n map = run_image_with_context_augmented(\n ldm,\n img,\n context,\n indices.cpu(),\n device=device,\n from_where=from_where,\n layers=layers,\n noise_level=noise_level,\n augment_degrees=augment_degrees,\n augment_scale=augment_scale,\n augment_translate=augment_translate,\n augmentation_iterations=augmentation_iterations,\n visualize=(i==0),\n controllers=controllers,\n num_gpus=num_gpus,\n save_folder=save_folder,\n )\n\n maps.append(map.cpu())\n maps = torch.stack(maps)\n gt_kpts = torch.stack(gt_kpts)\n\n if max_loc_strategy == \"argmax\":\n points = find_max_pixel(maps.view(height * width * num_points, 512, 512)) / 512.0\n else:\n points = pixel_from_weighted_avg(maps.view(height * width * num_points, 512, 512)) / 512.0\n points = points.reshape(height * width, num_points, 2)\n\n plot_point_correspondences(\n imgs, points.cpu(), os.path.join(save_folder, \"unsupervised_keypoints.pdf\"), height, width\n )\n\n for i in range(num_points):\n save_grid(\n maps[:, i].cpu(), imgs, os.path.join(save_folder, f\"keypoint_{i:03d}.png\")\n )\n\n if regressor is not None:\n est_points = ((points.view(num_images, -1)-0.5) @ regressor)+0.5\n\n plot_point_correspondences(\n imgs,\n est_points.view(num_images, -1, 2).cpu(),\n os.path.join(save_folder, \"estimated_keypoints.pdf\"),\n height,\n width,\n )\n\n plot_point_correspondences(\n imgs, gt_kpts, os.path.join(save_folder, \"gt_keypoints.pdf\"), height, width\n )"
}
] | import os
import wandb
import numpy as np
import argparse
import torch
import numpy as np
from unsupervised_keypoints.optimize_token import load_ldm
from unsupervised_keypoints.optimize import optimize_embedding
from unsupervised_keypoints.keypoint_regressor import (
find_best_indices,
precompute_all_keypoints,
return_regressor,
return_regressor_visible,
return_regressor_human36m,
)
from unsupervised_keypoints.eval import evaluate
from unsupervised_keypoints.visualize import visualize_attn_maps | 9,555 | )
parser.add_argument(
"--max_loc_strategy",
type=str,
default="argmax",
choices=["argmax", "weighted_avg"],
help="strategy for choosing max location in the attention map",
)
parser.add_argument(
"--evaluation_method",
type=str,
default="inter_eye_distance",
choices=["inter_eye_distance", "visible", "mean_average_error", "pck", "orientation_invariant"],
help="strategy for evaluation",
)
parser.add_argument(
"--min_dist",
type=float,
default=0.1,
help="minimum distance between the keypoints, as a fraction of the image size",
)
parser.add_argument(
"--furthest_point_num_samples",
type=int,
default=25,
help="the number of samples to use if using the furthest point strategy",
)
parser.add_argument(
"--num_indices",
type=int,
default=100,
help="the number of samples to use for finding the indices of the best tokens",
)
parser.add_argument(
"--num_subjects",
type=int,
default=1,
help="the number of subjects within each image",
)
parser.add_argument(
"--sharpening_loss_weight",
type=float,
default=100,
help="Weight of the sharpening loss",
)
parser.add_argument(
"--equivariance_attn_loss_weight",
type=float,
default=1000.0,
help="Weight of the old equivariance loss",
)
parser.add_argument("--layers", type=int, nargs="+", default=[0, 1, 2, 3])
parser.add_argument(
"--noise_level",
type=int,
default=-1,
help="noise level for the test set between 0 and 49 where 0 is the highest noise level and 49 is the lowest noise level",
)
parser.add_argument(
"--max_num_points",
type=int,
default=50_000,
help="number of samples to precompute",
)
parser.add_argument(
"--sigma", type=float, default=2.0, help="sigma for the gaussian kernel"
)
parser.add_argument(
"--augment_degrees",
type=float,
default=15.0,
help="rotation degrees for augmentation",
)
parser.add_argument(
"--augment_scale",
type=float,
# 2 arguments
nargs="+",
default=[0.8, 1.0],
help="scale factor for augmentation",
)
parser.add_argument(
"--augment_translate",
type=float,
nargs="+",
default=[0.25, 0.25],
help="amount of translation for augmentation along x and y axis",
)
parser.add_argument(
"--augmentation_iterations",
type=int,
default=10,
help="number of iterations for augmentation",
)
# store true the boolean argument 'visualize'
parser.add_argument(
"--visualize", action="store_true", help="visualize the attention maps"
)
parser.add_argument(
"--validation", action="store_true", help="use the validation sets instead of the training/testing set"
)
parser.add_argument("--top_k", type=int, default=10, help="number of points to choose")
args = parser.parse_args()
ldm, controllers, num_gpus = load_ldm(args.device, args.model_type, feature_upsample_res=args.feature_upsample_res)
# if args.save_folder doesnt exist create it
if not os.path.exists(args.save_folder):
os.makedirs(args.save_folder)
# print number of gpus
print("Number of GPUs: ", torch.cuda.device_count())
if args.wandb:
# start a wandb session
wandb.init(project="attention_maps", name=args.wandb_name, config=vars(args))
if args.start_from_stage == "optimize":
|
# Argument parsing
parser = argparse.ArgumentParser(description="optimize a class embedding")
# Network details
parser.add_argument(
"--model_type",
type=str,
default="runwayml/stable-diffusion-v1-5",
help="ldm model type",
)
# Dataset details
parser.add_argument(
"--dataset_loc",
type=str,
default="~",
help="Path to dataset",
)
parser.add_argument(
"--save_folder",
type=str,
default="outputs",
help="Where to save visualizations and checkpoints",
)
parser.add_argument(
"--wandb_name",
type=str,
default="temp",
help="name of the wandb run",
)
parser.add_argument(
"--dataset_name",
# set the choices to be "mafl" and "celeba_aligned"
choices=["celeba_aligned", "celeba_wild", "cub_aligned", "cub_001", "cub_002", "cub_003", "cub_all", "deepfashion", "taichi", "human3.6m", "unaligned_human3.6m", "custom"],
type=str,
default="celeba_aligned",
help="name of the dataset to use",
)
parser.add_argument(
"--max_len",
type=int,
default=-1,
help="max length of the dataset. -1 means no max length",
)
parser.add_argument(
"--start_from_stage",
choices=["optimize", "find_indices", "precompute", "evaluate"],
type=str,
default="optimize",
help="Specify the stage from which the process should start."
)
parser.add_argument("--device", type=str, default="cuda:0", help="device to use")
parser.add_argument("--wandb", action="store_true", help="wandb logging")
parser.add_argument("--lr", type=float, default=5e-3, help="learning rate")
parser.add_argument(
"--num_steps", type=int, default=500, help="number of steps to optimize for"
)
parser.add_argument(
"--num_tokens", type=int, default=500, help="number of tokens to optimize"
)
parser.add_argument(
"--feature_upsample_res", type=int, default=128, help="upsampled resolution for latent features grabbed from the attn operation"
)
parser.add_argument(
"--batch_size", type=int, default=4, help="size of the batch for optimization"
)
parser.add_argument(
"--top_k_strategy",
type=str,
default="gaussian",
choices=["entropy", "gaussian", "consistent"],
help="strategy for choosing top k tokens",
)
parser.add_argument(
"--max_loc_strategy",
type=str,
default="argmax",
choices=["argmax", "weighted_avg"],
help="strategy for choosing max location in the attention map",
)
parser.add_argument(
"--evaluation_method",
type=str,
default="inter_eye_distance",
choices=["inter_eye_distance", "visible", "mean_average_error", "pck", "orientation_invariant"],
help="strategy for evaluation",
)
parser.add_argument(
"--min_dist",
type=float,
default=0.1,
help="minimum distance between the keypoints, as a fraction of the image size",
)
parser.add_argument(
"--furthest_point_num_samples",
type=int,
default=25,
help="the number of samples to use if using the furthest point strategy",
)
parser.add_argument(
"--num_indices",
type=int,
default=100,
help="the number of samples to use for finding the indices of the best tokens",
)
parser.add_argument(
"--num_subjects",
type=int,
default=1,
help="the number of subjects within each image",
)
parser.add_argument(
"--sharpening_loss_weight",
type=float,
default=100,
help="Weight of the sharpening loss",
)
parser.add_argument(
"--equivariance_attn_loss_weight",
type=float,
default=1000.0,
help="Weight of the old equivariance loss",
)
parser.add_argument("--layers", type=int, nargs="+", default=[0, 1, 2, 3])
parser.add_argument(
"--noise_level",
type=int,
default=-1,
help="noise level for the test set between 0 and 49 where 0 is the highest noise level and 49 is the lowest noise level",
)
parser.add_argument(
"--max_num_points",
type=int,
default=50_000,
help="number of samples to precompute",
)
parser.add_argument(
"--sigma", type=float, default=2.0, help="sigma for the gaussian kernel"
)
parser.add_argument(
"--augment_degrees",
type=float,
default=15.0,
help="rotation degrees for augmentation",
)
parser.add_argument(
"--augment_scale",
type=float,
# 2 arguments
nargs="+",
default=[0.8, 1.0],
help="scale factor for augmentation",
)
parser.add_argument(
"--augment_translate",
type=float,
nargs="+",
default=[0.25, 0.25],
help="amount of translation for augmentation along x and y axis",
)
parser.add_argument(
"--augmentation_iterations",
type=int,
default=10,
help="number of iterations for augmentation",
)
# store true the boolean argument 'visualize'
parser.add_argument(
"--visualize", action="store_true", help="visualize the attention maps"
)
parser.add_argument(
"--validation", action="store_true", help="use the validation sets instead of the training/testing set"
)
parser.add_argument("--top_k", type=int, default=10, help="number of points to choose")
args = parser.parse_args()
ldm, controllers, num_gpus = load_ldm(args.device, args.model_type, feature_upsample_res=args.feature_upsample_res)
# if args.save_folder doesnt exist create it
if not os.path.exists(args.save_folder):
os.makedirs(args.save_folder)
# print number of gpus
print("Number of GPUs: ", torch.cuda.device_count())
if args.wandb:
# start a wandb session
wandb.init(project="attention_maps", name=args.wandb_name, config=vars(args))
if args.start_from_stage == "optimize": | embedding = optimize_embedding( | 1 | 2023-11-23 00:04:17+00:00 | 12k |
BigRoy/usd-qtpy | usd_qtpy/prim_hierarchy.py | [
{
"identifier": "get_prim_types_by_group",
"path": "usd_qtpy/lib/usd.py",
"snippet": "def get_prim_types_by_group() -> dict:\n \"\"\"Return all registered concrete type names by nice plug-in grouping.\n\n Returns:\n dict: Schema type names grouped by plug-in name.\n\n \"\"\"\n\n plug_reg = Plug.Registry()\n schema_reg = Usd.SchemaRegistry\n\n # Get schema types by plug-in group\n types_by_group = defaultdict(list)\n for t in plug_reg.GetAllDerivedTypes(Tf.Type.FindByName(\"UsdSchemaBase\")):\n if not schema_reg.IsConcrete(t):\n continue\n\n plugin = plug_reg.GetPluginForType(t)\n if not plugin:\n continue\n\n plugin_name = plugin.name\n plugin_name = NICE_PLUGIN_TYPE_NAMES.get(plugin_name, plugin_name)\n\n # We don't list empty names. This allows hiding certain plugins too.\n if not plugin_name:\n continue\n\n type_name = schema_reg.GetConcreteSchemaTypeName(t)\n types_by_group[plugin_name].append(type_name)\n\n return {\n key: sorted(value) for key, value in sorted(types_by_group.items())\n }"
},
{
"identifier": "parent_prims",
"path": "usd_qtpy/lib/usd.py",
"snippet": "def parent_prims(prims: list[Usd.Prim],\n new_parent: Sdf.Path,\n layers: list[Sdf.Layer] = None) -> bool:\n \"\"\"Move Prims to a new parent in given layers.\n\n Note:\n This will only reparent prims to the new parent if the new parent\n exists in the layer.\n\n Arguments:\n prims (list[Usd.Prim]): The prims to move the new parent\n new_parent (Sdf.Path): Parent path to be moved to.\n layers (list[Sdf.Layer]): The layers to apply the reparenting\n in. If None are provided the stage's full layer stack will be used.\n\n \"\"\"\n if not prims:\n return False\n\n # Only consider prims not already parented to the new parent\n prims = [\n prim for prim in prims if prim.GetPath().GetParentPath() != new_parent\n ]\n if not prims:\n return False\n\n if layers is None:\n stage = prims[0].GetStage()\n layers = stage.GetLayerStack()\n\n edit_batch = Sdf.BatchNamespaceEdit()\n for prim in prims:\n edit = Sdf.NamespaceEdit.Reparent(\n prim.GetPath(),\n new_parent,\n -1\n )\n edit_batch.Add(edit)\n\n any_edits_made = False\n with Sdf.ChangeBlock():\n for layer in layers:\n applied = layer.Apply(edit_batch)\n if applied:\n any_edits_made = True\n for edit in edit_batch.edits:\n repath_properties(layer,\n edit.currentPath,\n edit.newPath)\n return any_edits_made"
},
{
"identifier": "remove_spec",
"path": "usd_qtpy/lib/usd.py",
"snippet": "def remove_spec(spec):\n \"\"\"Remove Sdf.Spec authored opinion.\"\"\"\n if spec.expired:\n return\n\n if isinstance(spec, Sdf.PrimSpec):\n # PrimSpec\n parent = spec.nameParent\n if parent:\n view = parent.nameChildren\n else:\n # Assume PrimSpec is root prim\n view = spec.layer.rootPrims\n del view[spec.name]\n\n elif isinstance(spec, Sdf.PropertySpec):\n # Relationship and Attribute specs\n del spec.owner.properties[spec.name]\n\n elif isinstance(spec, Sdf.VariantSetSpec):\n # Owner is Sdf.PrimSpec (or can also be Sdf.VariantSpec)\n del spec.owner.variantSets[spec.name]\n\n elif isinstance(spec, Sdf.VariantSpec):\n # Owner is Sdf.VariantSetSpec\n spec.owner.RemoveVariant(spec)\n\n else:\n raise TypeError(f\"Unsupported spec type: {spec}\")"
},
{
"identifier": "unique_name",
"path": "usd_qtpy/lib/usd.py",
"snippet": "def unique_name(stage: Usd.Stage, prim_path: Sdf.Path) -> Sdf.Path:\n \"\"\"Return Sdf.Path that is unique under the current composed stage.\n\n Note that this technically does not ensure that the Sdf.Path does not\n exist in any of the layers, e.g. it could be defined within a currently\n unselected variant or a muted layer.\n\n \"\"\"\n src = prim_path.pathString.rstrip(\"123456789\")\n i = 1\n while stage.GetPrimAtPath(prim_path):\n prim_path = Sdf.Path(f\"{src}{i}\")\n i += 1\n return prim_path"
},
{
"identifier": "copy_spec_merge",
"path": "usd_qtpy/lib/usd_merge_spec.py",
"snippet": "def copy_spec_merge(src_layer: Sdf.Layer,\n src_path: Sdf.Path,\n dest_layer: Sdf.Layer,\n dest_path: Sdf.Path) -> bool:\n \"\"\"Copy spec while merging into the existing opinions instead of replacing.\n\n The children hierarchy will be merged so that existing children will be\n preserved, but new children will be applied on top of the existing ones,\n including overlaying onto existing children prims with the same name.\n\n For copying values onto existing prims:\n - specifier is only copied if copied spec sets `Sdf.SpecifierDef`\n - type name is only copied if original spec had no or empty type name\n\n \"\"\"\n return Sdf.CopySpec(src_layer, src_path, dest_layer, dest_path,\n should_copy_value_fn, should_copy_children_fn)"
},
{
"identifier": "iter_model_rows",
"path": "usd_qtpy/lib/qt.py",
"snippet": "def iter_model_rows(model, column, include_root=False):\n \"\"\"Iterate over all row indices in a model\"\"\"\n indices = [QtCore.QModelIndex()] # start iteration at root\n\n for index in indices:\n # Add children to the iterations\n child_rows = model.rowCount(index)\n for child_row in range(child_rows):\n child_index = model.index(child_row, column, index)\n indices.append(child_index)\n\n if not include_root and not index.isValid():\n continue\n\n yield index"
},
{
"identifier": "DrawRectsDelegate",
"path": "usd_qtpy/prim_delegate.py",
"snippet": "class DrawRectsDelegate(QtWidgets.QStyledItemDelegate):\n \"\"\"Draws rounded rects 'tags' to the right hand side of items.\n\n The tags to be drawn should be returned by index's data via the\n BlockTagsRole on this class. The returned data should be a list\n with dicts defining each tag:\n {\n \"text\": \"text\", # text value in the block\n \"background-color\": \"#FFFFFF\", # background color\n \"color\": \"#FF9999\" # text color\n }\n\n These tags are clickable and will emit the `rect_clicked` event with\n the model's index and the `str` value of the tag.\n\n \"\"\"\n\n RectDataRole = QtCore.Qt.UserRole + 1001\n\n rect_clicked = QtCore.Signal(QtCore.QEvent, QtCore.QModelIndex, dict)\n\n def iter_rects(self, blocks, option):\n \"\"\"Yield each QRect used for drawing\"\"\"\n\n rect = QtCore.QRect(option.rect)\n padding_topbottom = 2\n padding_sides = 4\n width = 30\n for i, _block_data in enumerate(blocks):\n\n # Calculate left by computing offset from\n # right hand side to align right\n i = i + 1\n right = rect.right()\n left = right - (width * i) - (\n 2 * i * padding_sides) + padding_sides\n yield QtCore.QRect(left, rect.top() + padding_topbottom,\n width, rect.height() - padding_topbottom * 2)\n\n def paint(self, painter, option, index):\n\n super(DrawRectsDelegate, self).paint(painter, option, index)\n\n corner_radius = 5\n painter.setRenderHint(QtGui.QPainter.Antialiasing)\n blocks = index.data(self.RectDataRole) or []\n\n for block_data, block_rect in zip(blocks, self.iter_rects(blocks,\n option)):\n\n text = block_data.get(\"text\", \"\")\n background_color = QtGui.QColor(block_data.get(\"background-color\",\n \"#FF9999\"))\n text_color = QtGui.QColor(block_data.get(\"color\", \"#FFFFFF\"))\n painter.setPen(text_color)\n\n # Draw the block rect\n path = QtGui.QPainterPath()\n path.addRoundedRect(block_rect, corner_radius, corner_radius)\n painter.fillPath(path, background_color)\n\n # Draw text in the block - vertically centered\n point = block_rect.topLeft()\n point.setY(point.y() + block_rect.height() * 0.5)\n\n painter.drawText(block_rect, QtCore.Qt.AlignCenter, text)\n\n def editorEvent(self, event, model, option, index) -> bool:\n if (\n isinstance(event, QtGui.QMouseEvent)\n and event.type() == QtCore.QEvent.MouseButtonPress\n and event.button() == QtCore.Qt.LeftButton\n ):\n blocks = index.data(self.RectDataRole) or []\n if blocks:\n point = event.position().toPoint()\n for block, rect in zip(blocks,\n self.iter_rects(blocks, option)):\n if rect.contains(point):\n self.rect_clicked.emit(event, index, block)\n event.accept()\n return True\n\n return super(DrawRectsDelegate, self).editorEvent(event,\n model,\n option,\n index)\n\n def helpEvent(self,\n event: QtGui.QHelpEvent,\n view: QtWidgets.QAbstractItemView,\n option: QtWidgets.QStyleOptionViewItem,\n index: QtCore.QModelIndex) -> bool:\n if event.type() == QtCore.QEvent.ToolTip:\n\n blocks = index.data(self.RectDataRole) or []\n for block_data, block_rect in zip(blocks, self.iter_rects(blocks,\n option)):\n if block_rect.contains(event.pos()):\n QtWidgets.QToolTip.showText(\n event.globalPos(),\n block_data.get(\"tooltip\", \"\"),\n view\n )\n return True\n\n return super(DrawRectsDelegate, self).helpEvent(event,\n view,\n option,\n index)"
},
{
"identifier": "HierarchyModel",
"path": "usd_qtpy/prim_hierarchy_model.py",
"snippet": "class HierarchyModel(QtCore.QAbstractItemModel):\n \"\"\"Base class for adapting a stage's prim hierarchy for Qt ItemViews\n\n Most clients will want to use a configuration of the `HierachyStandardModel`\n which has a standard set of columns and data or subclass this to provide\n their own custom set of columns.\n\n Clients are encouraged to subclass this module because it provides both\n robust handling of change notification and an efficient lazy population.\n This model listens for TfNotices and emits the appropriate Qt signals.\n \"\"\"\n PrimRole = QtCore.Qt.UserRole + 1\n\n def __init__(\n self,\n stage: Usd.Stage=None,\n predicate=Usd.TraverseInstanceProxies(Usd.PrimIsDefined |\n ~Usd.PrimIsDefined),\n parent=None,\n ) -> None:\n \"\"\"Instantiate a QAbstractItemModel adapter for a UsdStage.\n\n It's safe for the 'stage' to be None if the model needs to be\n instantiated without knowing the stage its interacting with.\n\n 'predicate' specifies the prims that may be accessed via the model on\n the stage. A good policy is to be as accepting of prims as possible\n and rely on a QSortFilterProxyModel to interactively reduce the view.\n Changing the predicate is a potentially expensive operation requiring\n rebuilding internal caches, making not ideal for interactive filtering.\n \"\"\"\n super(HierarchyModel, self).__init__(parent=parent)\n\n self._predicate = predicate\n self._stage = None\n self._index: Union[None, HierarchyCache] = None\n self._listeners = []\n self._icon_provider = PrimTypeIconProvider()\n self.log = logging.getLogger(\"HierarchyModel\")\n\n # Set stage\n self.set_stage(stage)\n\n @property\n def stage(self):\n return self._stage\n\n @stage.setter\n def stage(self, stage):\n self.set_stage(stage)\n\n def set_stage(self, stage: Usd.Stage):\n \"\"\"Resets the model for use with a new stage.\n\n If the stage isn't valid, this effectively becomes an empty model.\n \"\"\"\n if stage == self._stage:\n return\n\n self.revoke_listeners()\n\n self._stage = stage\n with self.reset_model():\n if self._is_stage_valid():\n self._index = HierarchyCache(\n root=stage.GetPrimAtPath(\"/\"),\n predicate=self._predicate\n )\n self.register_listeners()\n else:\n self._index = None\n\n def _is_stage_valid(self):\n return self._stage and self._stage.GetPseudoRoot()\n\n def register_listeners(self):\n \"\"\"Register Tf.Notice listeners\"\"\"\n\n if self._listeners:\n # Do not allow to register more than once, clear old listeners\n self.revoke_listeners()\n\n if self._is_stage_valid():\n # Listen to state changes of the stage to stay in sync\n self._listeners.append(Tf.Notice.Register(\n Usd.Notice.ObjectsChanged,\n self.on_objects_changed,\n self._stage\n ))\n\n def revoke_listeners(self):\n \"\"\"Revoke Tf.Notice listeners\"\"\"\n for listener in self._listeners:\n listener.Revoke()\n self._listeners.clear()\n\n @contextlib.contextmanager\n def reset_model(self):\n \"\"\"Reset the model via context manager.\n\n During the context additional changes can be done before the reset\n of the model is 'finished', like e.g. changing Tf.Notice listeners.\n \"\"\"\n self.beginResetModel()\n try:\n yield\n finally:\n self.endResetModel()\n\n @report_error\n def on_objects_changed(self, notice, sender):\n resynced_paths = notice.GetResyncedPaths()\n resynced_paths = {\n path for path in resynced_paths if path.IsPrimPath()\n # Also include the absolute root path (e.g. layer muting)\n or path.IsAbsoluteRootPath()\n }\n if not resynced_paths:\n return\n\n # Include parents so we can use it as lookup for the \"sibling\" check\n resynced_paths_and_parents = resynced_paths.copy()\n resynced_paths_and_parents.update(\n path.GetParentPath() for path in list(resynced_paths)\n )\n with layout_change_context(self):\n persistent_indices = self.persistentIndexList()\n index_to_path = {}\n for index in persistent_indices:\n index_prim = index.internalPointer().get_prim()\n index_path = index_prim.GetPath()\n if (\n index_path in resynced_paths_and_parents\n or index_path.GetParentPath() in resynced_paths_and_parents\n ):\n index_to_path[index] = index_path\n\n self._index.resync_subtrees(resynced_paths)\n\n from_indices = []\n to_indices = []\n for index in index_to_path:\n path = index_to_path[index]\n\n if path in self._index:\n new_proxy = self._index.get_proxy(path)\n new_row = self._index.get_row(new_proxy)\n\n if index.row() != new_row:\n for _i in range(\n self.columnCount(QtCore.QModelIndex())\n ):\n from_indices.append(index)\n to_indices.append(self.createIndex(\n new_row, index.column(), new_proxy)\n )\n else:\n from_indices.append(index)\n to_indices.append(QtCore.QModelIndex())\n self.changePersistentIndexList(from_indices, to_indices)\n\n def _prim_to_row_index(self,\n path: Sdf.Path) -> Optional[QtCore.QModelIndex]:\n \"\"\"Given a path, retrieve the appropriate model index.\"\"\"\n if path in self._index:\n proxy = self._index[path]\n row = self._index.get_row(proxy)\n return self.createIndex(row, 0, proxy)\n\n def _index_to_prim(self,\n model_index: QtCore.QModelIndex) -> Optional[Usd.Prim]:\n \"\"\"Retrieve the prim for the input model index\n\n External clients should use `UsdQt.roles.HierarchyPrimRole` to access\n the prim for an index.\n \"\"\"\n if model_index.isValid():\n proxy = model_index.internalPointer() # -> Proxy\n if type(proxy) is Proxy:\n return proxy.get_prim()\n\n # region Qt methods\n def flags(self, index):\n # Make name editable\n if index.column() == 0:\n return (\n QtCore.Qt.ItemIsEnabled\n | QtCore.Qt.ItemIsSelectable\n | QtCore.Qt.ItemIsEditable\n )\n return super(HierarchyModel, self).flags(index)\n\n def setData(self, index, value, role):\n if role == QtCore.Qt.EditRole:\n if index.column() == 0:\n # Rename prim\n prim = self._index_to_prim(index)\n if not value:\n # Keep original name\n return False\n\n rename_prim(prim, value)\n return True\n\n return super(HierarchyModel, self).setData(index, value, role)\n\n def columnCount(self, parent):\n return 1\n\n def rowCount(self, parent):\n if not self._is_stage_valid():\n return 0\n\n if parent.column() > 0:\n return 0\n\n if not parent.isValid():\n return 1\n\n parent_proxy = parent.internalPointer()\n return self._index.get_child_count(parent_proxy)\n\n def index(self, row, column, parent):\n if not self._is_stage_valid():\n return QtCore.QModelIndex()\n\n if not self.hasIndex(row, column, parent):\n self.log.debug(\"Index does not exist: %s %s %s\", row, column, parent)\n return QtCore.QModelIndex()\n\n if not parent.isValid():\n # We assume the root has already been registered.\n root = self._index.root\n return self.createIndex(row, column, root)\n\n parent_proxy = parent.internalPointer()\n child = self._index.get_child(parent_proxy, row)\n return self.createIndex(row, column, child)\n\n def parent(self, index):\n if not self._is_stage_valid():\n return QtCore.QModelIndex()\n\n if not index.isValid():\n return QtCore.QModelIndex()\n\n proxy = index.internalPointer()\n if proxy is None:\n return QtCore.QModelIndex()\n\n if self._index.is_root(proxy):\n return QtCore.QModelIndex()\n\n parent_proxy = self._index.get_parent(proxy)\n parent_row = self._index.get_row(parent_proxy)\n return self.createIndex(parent_row, index.column(), parent_proxy)\n\n def data(self, index, role):\n if not self._is_stage_valid():\n return\n\n if not index.isValid():\n return\n\n if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:\n prim = index.internalPointer().get_prim()\n return prim.GetName()\n\n if role == QtCore.Qt.DecorationRole:\n # icon\n prim = index.internalPointer().get_prim()\n return self._icon_provider.get_icon(prim)\n\n if role == QtCore.Qt.ToolTipRole:\n prim = index.internalPointer().get_prim()\n return prim.GetTypeName()\n\n if role == self.PrimRole:\n return index.internalPointer().get_prim()\n\n if role == DrawRectsDelegate.RectDataRole:\n prim = index.internalPointer().get_prim()\n rects = []\n if prim == self.stage.GetDefaultPrim():\n rects.append(\n {\"text\": \"DFT\",\n \"tooltip\": \"This prim is the default prim on \"\n \"the stage's root layer.\",\n \"background-color\": \"#553333\"}\n )\n if prim.HasAuthoredPayloads() or prim.HasAuthoredReferences():\n rects.append(\n {\"text\": \"REF\",\n \"tooltip\": \"This prim has one or more references \"\n \"and/or payloads.\",\n \"background-color\": \"#333355\"},\n )\n if prim.HasVariantSets():\n rects.append(\n {\"text\": \"VAR\",\n \"tooltip\": \"One or more variant sets exist on this prim.\",\n \"background-color\": \"#335533\"},\n )\n\n return rects\n # endregion"
},
{
"identifier": "ReferenceListWidget",
"path": "usd_qtpy/references.py",
"snippet": "class ReferenceListWidget(QtWidgets.QDialog):\n \"\"\"Manage lists of references/payloads for a single prim\"\"\"\n def __init__(self, prim, parent=None):\n super(ReferenceListWidget, self).__init__(parent=parent)\n\n title = \"USD Reference/Payload Editor\"\n if prim and prim.IsValid():\n title = f\"{title}: {prim.GetPath().pathString}\"\n self.setWindowTitle(title)\n\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(QtWidgets.QLabel(\"References\"))\n references = QtWidgets.QVBoxLayout()\n references.setContentsMargins(0, 0, 0, 0)\n layout.addLayout(references)\n\n add_icon = get_icon(\"plus\")\n add_button = DropFilesPushButton(add_icon, \"\")\n add_button.setToolTip(\"Add reference\")\n add_button.clicked.connect(self.on_add_reference)\n add_button.files_dropped.connect(partial(self.on_dropped_files,\n \"references\"))\n layout.addWidget(add_button)\n\n layout.addWidget(QtWidgets.QLabel(\"Payloads\"))\n payloads = QtWidgets.QVBoxLayout()\n payloads.setContentsMargins(0, 0, 0, 0)\n layout.addLayout(payloads)\n\n add_button = DropFilesPushButton(add_icon, \"\")\n add_button.setToolTip(\"Add payload\")\n add_button.clicked.connect(self.on_add_payload)\n add_button.files_dropped.connect(partial(self.on_dropped_files,\n \"payloads\"))\n layout.addWidget(add_button)\n\n layout.addStretch()\n\n # Add some standard buttons (Cancel/Ok) at the bottom of the dialog\n buttons = QtWidgets.QDialogButtonBox(\n QtWidgets.QDialogButtonBox.Ok |\n QtWidgets.QDialogButtonBox.Cancel,\n QtCore.Qt.Horizontal,\n self\n )\n layout.addWidget(buttons)\n\n buttons.accepted.connect(self.accept)\n buttons.rejected.connect(self.reject)\n\n self.prim = prim\n self.references_layout = references\n self.payloads_layout = payloads\n\n self.refresh()\n\n self.accepted.connect(self.on_accept)\n\n def refresh(self):\n\n def clear(layout):\n while layout_item:= layout.takeAt(0):\n widget = layout_item.widget()\n if widget:\n widget.deleteLater()\n\n clear(self.payloads_layout)\n clear(self.references_layout)\n\n # Store items and widgets for the references\n prim = self.prim\n\n stack = prim.GetPrimStack()\n\n # Get all references/payloads across the prim stack\n references = []\n payloads = []\n for prim_spec in stack:\n references.extend(get_applied_items(prim_spec.referenceList))\n payloads.extend(get_applied_items(prim_spec.payloadList))\n\n for reference in references:\n self._add_widget(self.references_layout, item=reference)\n\n for payload in payloads:\n self._add_widget(self.payloads_layout, item=payload)\n\n def on_dropped_files(self, key, urls):\n files = [url.toLocalFile() for url in urls]\n if key == \"references\":\n for filepath in files:\n self._add_widget(self.references_layout,\n item=Sdf.Reference(assetPath=filepath))\n elif key == \"payloads\":\n for filepath in files:\n self._add_widget(self.payloads_layout,\n item=Sdf.Payload(assetPath=filepath))\n\n def on_add_payload(self):\n self._add_widget(self.payloads_layout, item_type=Sdf.Payload)\n\n def on_add_reference(self):\n self._add_widget(self.references_layout, item_type=Sdf.Reference)\n\n def _add_widget(self, layout, item=None, item_type=None):\n def remove_widget(layout, widget):\n index = layout.indexOf(widget)\n if index >= 0:\n layout.takeAt(index)\n widget.deleteLater()\n\n widget = RefPayloadWidget(item=item, item_type=item_type)\n widget.delete_requested.connect(partial(remove_widget, layout, widget))\n layout.addWidget(widget)\n\n def on_accept(self):\n Change = namedtuple(\"change\", [\"old\", \"new\"])\n\n # Get the configured references/payloads\n items = defaultdict(list)\n for key, layout in {\n \"references\": self.references_layout,\n \"payloads\": self.payloads_layout\n }.items():\n for i in range(layout.count()):\n layout_item = layout.itemAt(i)\n widget = layout_item.widget() # -> RefPayloadWidget\n\n new_item = widget.item\n if not new_item:\n # Skip empty entries\n continue\n change = Change(old=widget.original_item, new=new_item)\n items[key].append(change)\n\n # Update all prim specs on the prim's current stack to the references\n # TODO: Preserve references/payloads specs across the different layers\n # and only update the changes that have an original item and remove\n # entries not amongst the new changes + ensure ordering is correct\n # For now we completely clear all specs\n prim = self.prim\n for prim_spec in list(prim.GetPrimStack()):\n if prim_spec.expired:\n continue\n\n # Remove any opinions on references/payloads\n prim_spec.referenceList.ClearEdits()\n prim_spec.payloadList.ClearEdits()\n\n references = prim.GetReferences()\n for reference_item in items[\"references\"]:\n references.AddReference(reference_item.new)\n\n payloads = prim.GetPayloads()\n for payload_item in items[\"payloads\"]:\n payloads.AddPayload(payload_item.new)"
},
{
"identifier": "CreateVariantSetDialog",
"path": "usd_qtpy/variants.py",
"snippet": "class CreateVariantSetDialog(QtWidgets.QDialog):\n \"\"\"Prompt for variant set name\"\"\"\n def __init__(self, parent=None):\n super(CreateVariantSetDialog, self).__init__(parent=parent)\n\n self.setWindowTitle(\"Create Variant Set\")\n\n form = QtWidgets.QFormLayout(self)\n\n name = QtWidgets.QLineEdit()\n form.addRow(QtWidgets.QLabel(\"Variant Set Name:\"), name)\n\n # Add some standard buttons (Cancel/Ok) at the bottom of the dialog\n buttons = QtWidgets.QDialogButtonBox(\n QtWidgets.QDialogButtonBox.Ok |\n QtWidgets.QDialogButtonBox.Cancel,\n QtCore.Qt.Horizontal,\n self\n )\n form.addRow(buttons)\n\n buttons.accepted.connect(self.accept)\n buttons.rejected.connect(self.reject)\n\n self.name = name\n\n @classmethod\n def get_variant_set_name(cls, parent=None):\n prompt = cls(parent=parent)\n if prompt.exec_() == QtWidgets.QDialog.Accepted:\n name = prompt.name.text()\n if name:\n return name"
},
{
"identifier": "VariantSetsWidget",
"path": "usd_qtpy/variants.py",
"snippet": "class VariantSetsWidget(QtWidgets.QDialog):\n \"\"\"Manage the variant sets and variants for a Usd.Prim\"\"\"\n def __init__(self, prim, parent=None):\n super(VariantSetsWidget, self).__init__(parent=parent)\n\n title = \"Variant Sets\"\n if prim and prim.IsValid():\n title = f\"{title}: {prim.GetPath().pathString}\"\n self.setWindowTitle(title)\n\n layout = QtWidgets.QVBoxLayout(self)\n\n add_icon = get_icon(\"plus\")\n add_button = QtWidgets.QPushButton(add_icon, \"Add variant set\")\n add_button.setToolTip(\"Add variant set\")\n add_button.clicked.connect(self.on_add_variant_set)\n layout.addWidget(add_button)\n\n variant_sets_layout = QtWidgets.QVBoxLayout()\n variant_sets_layout.setContentsMargins(0, 0, 0, 0)\n layout.addLayout(variant_sets_layout)\n\n layout.addStretch()\n\n self.prim = prim\n self.variant_sets_layout = variant_sets_layout\n\n self.refresh()\n\n def refresh(self):\n\n def clear(layout):\n for i in reversed(range(layout.count())):\n layout_item = layout.takeAt(i)\n widget = layout_item.widget()\n if widget:\n widget.deleteLater()\n layout.invalidate()\n\n clear(self.variant_sets_layout)\n\n # Store items and widgets for the references\n prim = self.prim\n\n # TODO: It is possible to have an authored variant selection\n # without the variant set being authored in the current stage.\n # For those cases we might want to expose being able to set e.g.\n # a custom variant selection and display those that do not have\n # an existing variant or even variant set on the composed stage.\n # E.g. see: Usd.VariantSets.GetAllVariantSelections\n\n variant_sets = prim.GetVariantSets()\n for variant_set_name in variant_sets.GetNames():\n # Add a variant set widget with its variants\n variant_set = variant_sets.GetVariantSet(variant_set_name)\n variant_set_widget = VariantSetWidget(variant_set=variant_set,\n parent=self)\n variant_set_widget.variant_set_deleted.connect(self.refresh)\n self.variant_sets_layout.addWidget(variant_set_widget)\n\n def on_add_variant_set(self):\n log.debug(\"Add variant set\")\n prim = self.prim\n assert prim.IsValid()\n name, ok = QtWidgets.QInputDialog.getText(\n self,\n \"Create Variant Set\",\n \"Variant Set Name:\"\n )\n if ok and name:\n # Create the variant set, even allowing to create it\n # without populating a variant name. If it already exists\n # this does nothing.\n prim.GetVariantSets().AddVariantSet(name)\n\n self.refresh()"
}
] | import logging
from functools import partial
from qtpy import QtWidgets, QtCore
from pxr import Sdf
from .lib.usd import (
get_prim_types_by_group,
parent_prims,
remove_spec,
unique_name,
)
from .lib.usd_merge_spec import copy_spec_merge
from .lib.qt import iter_model_rows
from .prim_delegate import DrawRectsDelegate
from .prim_hierarchy_model import HierarchyModel
from .references import ReferenceListWidget
from .variants import CreateVariantSetDialog, VariantSetsWidget | 8,495 | type_name = action.text()
# Ensure unique name
prim_path = parent_path.AppendChild(type_name)
prim_path = unique_name(stage, prim_path)
if type_name == "Def":
# Typeless
type_name = ""
# Define prim and signal change to the model
# TODO: Remove signaling once model listens to changes
current_rows = model.rowCount(index)
model.beginInsertRows(index, current_rows, current_rows+1)
new_prim = stage.DefinePrim(prim_path, type_name)
self.select_paths([new_prim.GetPath()])
model.endInsertRows()
# Create Prims
create_prim_menu = menu.addMenu("Create Prim")
create_prim_menu.addAction("Def")
create_prim_menu.addAction("Scope")
create_prim_menu.addAction("Xform")
create_prim_menu.addSeparator()
create_prim_menu.addAction("Cone")
create_prim_menu.addAction("Cube")
create_prim_menu.addAction("Cylinder")
create_prim_menu.addAction("Sphere")
create_prim_menu.addSeparator()
create_prim_menu.addAction("DistantLight")
create_prim_menu.addAction("DomeLight")
create_prim_menu.addAction("RectLight")
create_prim_menu.addAction("SphereLight")
create_prim_menu.addSeparator()
create_prim_menu.addAction("Camera")
create_prim_menu.addSeparator()
# TODO: Cache this submenu?
types_by_group = get_prim_types_by_group()
all_registered_menu = create_prim_menu.addMenu("All Registered")
for group, types in types_by_group.items():
group_menu = all_registered_menu.addMenu(group)
for type_name in types:
group_menu.addAction(type_name)
create_prim_menu.triggered.connect(create_prim)
# Set and clear default prim
if parent_path.IsRootPrimPath():
# This prim is a primitive directly under root so can be an
# active prim
if parent == stage.GetDefaultPrim():
label = "Clear default prim"
action = menu.addAction(label)
tip = (
"Clear the default prim from the stage's root layer.\n"
)
action.setToolTip(tip)
action.setStatusTip(tip)
action.triggered.connect(partial(stage.ClearDefaultPrim))
else:
label = "Set as default prim"
action = menu.addAction(label)
tip = "Set prim as default prim on the stage's root layer."
action.setToolTip(tip)
action.setStatusTip(tip)
action.triggered.connect(partial(stage.SetDefaultPrim, parent))
# Allow referencing / payloads / variants management
if not parent_path.IsAbsoluteRootPath():
action = menu.addAction("Add reference/payload..")
action.triggered.connect(partial(
self.on_manage_prim_reference_payload, parent)
)
def _add_variant_set(prim):
# TODO: maybe directly allow managing the individual variants
# from the same UI; and allow setting the default variant
# Prompt for a variant set name
name = CreateVariantSetDialog.get_variant_set_name(parent=self)
if name is not None:
# Create the variant set, even allowing to create it
# without populating a variant name
prim.GetVariantSets().AddVariantSet(name)
action = menu.addAction("Create Variant Set")
action.triggered.connect(partial(_add_variant_set, parent))
# Get mouse position
global_pos = self.viewport().mapToGlobal(point)
menu.exec_(global_pos)
def on_manage_prim_reference_payload(self, prim):
widget = ReferenceListWidget(prim=prim, parent=self)
widget.resize(800, 300)
widget.show()
def on_prim_tag_clicked(self, event, index, block):
text = block.get("text")
if text == "DFT":
# Allow to clear the prim from a menu
model = self.model()
stage = model.stage
menu = QtWidgets.QMenu(parent=self)
action = menu.addAction("Clear default prim")
tip = (
"Clear the default prim from the stage's root layer.\n"
)
action.setToolTip(tip)
action.setStatusTip(tip)
action.triggered.connect(partial(stage.ClearDefaultPrim))
point = event.position().toPoint()
menu.exec_(self.mapToGlobal(point))
elif text == "REF":
prim = index.data(HierarchyModel.PrimRole)
self.on_manage_prim_reference_payload(prim)
elif text == "VAR":
prim = index.data(HierarchyModel.PrimRole)
|
log = logging.getLogger(__name__)
class View(QtWidgets.QTreeView):
# TODO: Add shortcuts
# CTRL + D: Duplicate
# CTRL + G: Group (add Xform above current selection)
# Delete or backspace: Remove the selected prims
def __init__(self, *args, **kwargs):
super(View, self).__init__(*args, **kwargs)
self.setHeaderHidden(True)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.on_context_menu)
self._delegate = DrawRectsDelegate(parent=self)
self.setItemDelegateForColumn(0, self._delegate)
self._delegate.rect_clicked.connect(self.on_prim_tag_clicked)
def on_context_menu(self, point):
index = self.indexAt(point)
model = self.model()
stage = model.stage
parent = index.data(HierarchyModel.PrimRole)
if not parent:
parent = stage.GetPseudoRoot()
parent_path = parent.GetPath()
menu = QtWidgets.QMenu(self)
def create_prim(action):
type_name = action.text()
# Ensure unique name
prim_path = parent_path.AppendChild(type_name)
prim_path = unique_name(stage, prim_path)
if type_name == "Def":
# Typeless
type_name = ""
# Define prim and signal change to the model
# TODO: Remove signaling once model listens to changes
current_rows = model.rowCount(index)
model.beginInsertRows(index, current_rows, current_rows+1)
new_prim = stage.DefinePrim(prim_path, type_name)
self.select_paths([new_prim.GetPath()])
model.endInsertRows()
# Create Prims
create_prim_menu = menu.addMenu("Create Prim")
create_prim_menu.addAction("Def")
create_prim_menu.addAction("Scope")
create_prim_menu.addAction("Xform")
create_prim_menu.addSeparator()
create_prim_menu.addAction("Cone")
create_prim_menu.addAction("Cube")
create_prim_menu.addAction("Cylinder")
create_prim_menu.addAction("Sphere")
create_prim_menu.addSeparator()
create_prim_menu.addAction("DistantLight")
create_prim_menu.addAction("DomeLight")
create_prim_menu.addAction("RectLight")
create_prim_menu.addAction("SphereLight")
create_prim_menu.addSeparator()
create_prim_menu.addAction("Camera")
create_prim_menu.addSeparator()
# TODO: Cache this submenu?
types_by_group = get_prim_types_by_group()
all_registered_menu = create_prim_menu.addMenu("All Registered")
for group, types in types_by_group.items():
group_menu = all_registered_menu.addMenu(group)
for type_name in types:
group_menu.addAction(type_name)
create_prim_menu.triggered.connect(create_prim)
# Set and clear default prim
if parent_path.IsRootPrimPath():
# This prim is a primitive directly under root so can be an
# active prim
if parent == stage.GetDefaultPrim():
label = "Clear default prim"
action = menu.addAction(label)
tip = (
"Clear the default prim from the stage's root layer.\n"
)
action.setToolTip(tip)
action.setStatusTip(tip)
action.triggered.connect(partial(stage.ClearDefaultPrim))
else:
label = "Set as default prim"
action = menu.addAction(label)
tip = "Set prim as default prim on the stage's root layer."
action.setToolTip(tip)
action.setStatusTip(tip)
action.triggered.connect(partial(stage.SetDefaultPrim, parent))
# Allow referencing / payloads / variants management
if not parent_path.IsAbsoluteRootPath():
action = menu.addAction("Add reference/payload..")
action.triggered.connect(partial(
self.on_manage_prim_reference_payload, parent)
)
def _add_variant_set(prim):
# TODO: maybe directly allow managing the individual variants
# from the same UI; and allow setting the default variant
# Prompt for a variant set name
name = CreateVariantSetDialog.get_variant_set_name(parent=self)
if name is not None:
# Create the variant set, even allowing to create it
# without populating a variant name
prim.GetVariantSets().AddVariantSet(name)
action = menu.addAction("Create Variant Set")
action.triggered.connect(partial(_add_variant_set, parent))
# Get mouse position
global_pos = self.viewport().mapToGlobal(point)
menu.exec_(global_pos)
def on_manage_prim_reference_payload(self, prim):
widget = ReferenceListWidget(prim=prim, parent=self)
widget.resize(800, 300)
widget.show()
def on_prim_tag_clicked(self, event, index, block):
text = block.get("text")
if text == "DFT":
# Allow to clear the prim from a menu
model = self.model()
stage = model.stage
menu = QtWidgets.QMenu(parent=self)
action = menu.addAction("Clear default prim")
tip = (
"Clear the default prim from the stage's root layer.\n"
)
action.setToolTip(tip)
action.setStatusTip(tip)
action.triggered.connect(partial(stage.ClearDefaultPrim))
point = event.position().toPoint()
menu.exec_(self.mapToGlobal(point))
elif text == "REF":
prim = index.data(HierarchyModel.PrimRole)
self.on_manage_prim_reference_payload(prim)
elif text == "VAR":
prim = index.data(HierarchyModel.PrimRole) | widget = VariantSetsWidget(prim=prim, parent=self) | 10 | 2023-11-22 15:56:35+00:00 | 12k |
jefferyZhan/Griffon | llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "build_attn_bias",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "MPTBlock",
"path": "llava/model/language_model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)"
},
{
"identifier": "SharedEmbedding",
"path": "llava/model/language_model/mpt/custom_embedding.py",
"snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "llava/model/language_model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}"
},
{
"identifier": "MPTConfig",
"path": "llava/model/language_model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')"
},
{
"identifier": "AutoTokenizerForMOD",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')"
},
{
"identifier": "init_empty_weights",
"path": "llava/model/language_model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}"
},
{
"identifier": "generic_param_init_fn_",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 7,343 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) | self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max) | 1 | 2023-11-22 03:18:57+00:00 | 12k |
josejuanmartinez/mindcraft | mindcraft/mind/npc.py | [
{
"identifier": "SummarizerTypes",
"path": "mindcraft/memory/summarizer_types.py",
"snippet": "class SummarizerTypes(Enum):\n T5_SMALL = \"Falconsai/text_summarization\""
},
{
"identifier": "STM",
"path": "mindcraft/memory/stm.py",
"snippet": "class STM:\n def __init__(self,\n ltm: LTM,\n capacity: int = 5,\n summarizer: SummarizerTypes = SummarizerTypes.T5_SMALL,\n max_summary_length: int = 230,\n min_summary_length: int = 30):\n \"\"\" Short-term memory is used to include always a summarized version of what has been discussed lately\n :param ltm: The Long-Term Memory object\n :param capacity: How many interactions from ltm to store\n :param summarizer: One of `SummarizerTypes` to use for including the summary of last interactions\n :param max_summary_length: max length of the summary\n :param min_summary_length: min length of the summary\n \"\"\"\n self._ltm = ltm\n self._summarizer = summarizer\n self._summarizer_model = pipeline(\"summarization\", model=str(summarizer.value))\n self._max_summary_length = max_summary_length\n self._min_summary_length = min_summary_length\n self._capacity = capacity\n self._summary = self.initialize_summary()\n\n def initialize_summary(self) -> str:\n \"\"\"\n Retrieves `self.capacity` last interactions from LTM and stores summarized\n :return: the summary\n \"\"\"\n search_result = self._ltm.get_last_interactions(self._capacity)\n text = \".\".join(search_result.documents)\n if len(text) < self._min_summary_length:\n return text\n text = self._summarizer_model(text,\n max_length=min(len(text), self._max_summary_length),\n min_length=self._min_summary_length,\n do_sample=False)\n return text[0]['summary_text']\n\n def refresh_summary(self, last_interaction: str):\n \"\"\"\n Refresh the summary with the last interaction\n :param last_interaction: last answer of the NPC\n :return: summary\n \"\"\"\n self.summary = \".\".join([self.initialize_summary(), last_interaction])\n return self.summary\n\n @property\n def summary(self):\n \"\"\" retrieves the summary property\"\"\"\n return self._summary\n\n @summary.setter\n def summary(self, value: str):\n \"\"\" sets the summary property\"\"\"\n self._summary = value"
},
{
"identifier": "StoresTypes",
"path": "mindcraft/infra/vectorstore/stores_types.py",
"snippet": "class StoresTypes(Enum):\n CHROMA = 0"
},
{
"identifier": "Feedback",
"path": "mindcraft/infra/sft/feedback.py",
"snippet": "class Feedback:\n def __init__(self,\n character_name: str,\n mood: Mood,\n conversational_style: ConversationalStyle,\n interaction: str, answer: str):\n \"\"\"\n Populates a dataset to be used in Supervised Fine-tuning as Preference Data and create your own\n NPC based on finetuned LLMs\n :param character_name: name of the NPC\n :param mood: mood string (e.g., 'angry')\n :param conversational_style: Conversational Style Object of the NPC, which will be updated using this\n interaction\n :param interaction: question/topic asked to the NPC\n :param answer: answer from the NPC\n \"\"\"\n self._character_name = character_name\n self._mood = mood\n self._conversational_style = conversational_style\n self._interaction = interaction\n self._answer = answer\n\n def accept(self,\n folder: str = STYLES_DATA_PATH,\n separator: str = SEPARATOR,\n mood: Mood = None):\n \"\"\"\n Accepts this interaction as valid for training purposes. It will populate it to a CSV and also store it as a\n conversational style for the character for future interactions.\n :param folder: csv path where to save the feedback\n :param separator: csv separator. Default: SEPARATOR (||)\n :param mood: Mood to overwrite (if not set, self._npc.mood will be taken)\n \"\"\"\n if mood is None:\n mood = self._mood\n with open(os.path.join(folder, self._character_name, \"sft.csv\"), \"a\") as f:\n f.write(separator.join([self._character_name if self._character_name is not None else '',\n mood.feature if mood is not None else Mood.DEFAULT,\n self._interaction.encode(\"unicode_escape\").decode(\"utf-8\"),\n self._answer.encode(\"unicode_escape\").decode(\"utf-8\")]))\n f.write(\"\\n\")\n logger.info(f\"Interaction appended to {folder}\")\n\n self._conversational_style.memorize(self._answer, self._mood)"
},
{
"identifier": "Motivation",
"path": "mindcraft/features/motivation.py",
"snippet": "class Motivation:\n def __init__(self, feature: str = None):\n \"\"\"\n Class that defines the motivations of a NPC.\n :param feature: the description of the motivation, for example, `Seeking the destruction of the all living`.\n \"\"\"\n self._feature = feature\n\n @property\n def feature(self):\n \"\"\"\n Getter of the `feature` property\n :return: string\n \"\"\"\n return self._feature\n\n @feature.setter\n def feature(self, value: str):\n \"\"\"\n Setter of the `feature` property\n :param value: string of the feature.\n \"\"\"\n self._feature = value"
},
{
"identifier": "Personality",
"path": "mindcraft/features/personality.py",
"snippet": "class Personality:\n def __init__(self, feature: str):\n \"\"\"\n Class that defines a permanent personality feature of a NPC. If you are looking for a feature that can change\n over the time, use `Mood` instead\n :param feature: the name of the personality feature, for example, `wise`.\n \"\"\"\n self._feature = feature\n\n @property\n def feature(self):\n \"\"\"\n Getter of the `feature` property\n :return: string\n \"\"\"\n return self._feature\n\n @feature.setter\n def feature(self, value: str):\n \"\"\"\n Setter of the `feature` property\n :param value: string of the feature.\n \"\"\"\n self._feature = value"
},
{
"identifier": "World",
"path": "mindcraft/lore/world.py",
"snippet": "class World:\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n \"\"\"\n World story. It stores everything that happened in a world.\n They are kept in the vector store.\n Not every NPC will know what happened in the world. Metadata will be used.\n :param world_id: the unique `id` of the character\n :param store: element of type StoresTypes\n :param ltm_embeddings: Embeddings to use in LTM in the Vector Store.\n :param llm_type: Embeddings to use in LTM in the Vector Store.\n :param world_path: Custom path where to store the data of the world. If not set, falls back to WORLD_DATA_PATH\n :param fast: use vLLM fast inference (requires vLLM running in docker)\n :param fast: use vLLM fast inference in cases vLLM is not in local but served in an external server.\n In this case, an HTTP connection will be established\n \"\"\"\n if 'world_name' not in kwargs:\n raise Exception(\"To instantiate a world, please add the name of the world in `world_name`\")\n\n if 'store_type' not in kwargs:\n raise Exception(\"`store_type` not found in World() initializer\")\n\n if 'embeddings' not in kwargs:\n logger.warning(\"`embeddings` not found in World() initializer. \"\n f\"Initializing to {str(EmbeddingsTypes.MINILM.value)}\")\n\n if 'fast' in kwargs and not isinstance(kwargs.get('fast'), bool):\n raise Exception(\"The value for `fast` param should be True or False\")\n\n if 'remote' in kwargs and not isinstance(kwargs.get('remote'), bool):\n raise Exception(\"The value for `remote` param should be True or False\")\n\n if 'streaming' in kwargs and not isinstance(kwargs.get('streaming'), bool):\n raise Exception(\"The value for `streaming` param should be True or False\")\n\n if 'llm_type' not in kwargs:\n logger.warning(f\"`llm_type` not found in World() initializer. Initializing to {LLMType.ZEPHYR7B_AWQ}\")\n elif not isinstance(kwargs.get('llm_type'), LLMType):\n raise Exception(f\"`llm_type` should be of type `LLMType`\")\n\n create_world = False\n destroying_world = False\n\n if cls._instance is None:\n create_world = True\n elif ('recreate' in kwargs and kwargs.get('recreate')) or(kwargs.get('world_name') != cls._instance.world_name):\n create_world = True\n destroying_world = True\n\n if create_world:\n if destroying_world:\n logger.info(f\"Changing world from {cls._instance.world_name} to {kwargs.get('world_name')}\")\n\n cls._instance = super().__new__(cls)\n cls._instance._world_name = kwargs.get('world_name')\n cls._instance._embeddings = kwargs.get('embeddings') if 'embeddings' in kwargs else EmbeddingsTypes.MINILM\n cls._instance._store_type = kwargs.get('store_type')\n cls._instance._llm_type = kwargs.get('llm_type') if 'llm_type' in kwargs else LLMType.ZEPHYR7B_AWQ\n cls._instance._world_data_path = kwargs.get('path') if 'path' in kwargs else WORLD_DATA_PATH\n cls._instance._fast = kwargs.get('fast') if 'fast' in kwargs else False\n cls._instance._remote = kwargs.get('remote') if cls._instance._fast and 'remote' in kwargs else False\n cls._instance._streaming = kwargs.get('streaming') \\\n if cls._instance._remote and 'streaming' in kwargs else False\n cls._instance._llm = None\n cls._instance._npcs = dict()\n\n match cls._instance._store_type.value:\n case StoresTypes.CHROMA.value:\n try:\n from mindcraft.infra.vectorstore.chroma import Chroma\n except ImportError:\n raise Exception(f\"To use `chromadb` as your vector store, please install it first using pip:\\n\"\n f\"`pip install chromadb`\")\n\n cls._instance._store = Chroma(cls._instance._world_data_path,\n cls._instance._world_name,\n cls._instance._embeddings)\n case _:\n raise NotImplementedError(f\"{kwargs.get('store_type')} not implemented\")\n\n if cls._instance._remote:\n print(\"Client for the Remote server configured. Please start your server running:\\n\"\n f\"`python -m vllm.entrypoints.openai.api_server \"\n f\"--model \\\"{cls._instance._llm_type.value['name']}\\\" --trust-remote-code &`\")\n print(f\"Mindcraft will try to reach out this server:\\n{FAST_INFERENCE_URL}\\n\")\n print(f\"If that's not the right HOST/PORT, overwrite them setting env vars `MINDCRAFT_HOST` and \"\n f\"`MINDCRAFT_PORT`.\")\n\n return cls._instance\n\n @property\n def embeddings(self):\n \"\"\" Getter for the embeddings property\"\"\"\n if self._instance is None:\n return None\n return self._instance._embeddings\n\n @embeddings.setter\n def embeddings(self, value: EmbeddingsTypes):\n \"\"\" Setter for the embeddings property\"\"\"\n if self._instance is None:\n return\n self._instance._embeddings = value\n\n @property\n def llm_type(self):\n \"\"\" Getter for the llm_type property\"\"\"\n if self._instance is None:\n return None\n return self._instance._llm_type\n\n @llm_type.setter\n def llm_type(self, value: LLMType):\n \"\"\" Setter for the llm_type property\"\"\"\n if self._instance is None:\n return\n self._instance._llm_type = value\n\n @property\n def llm(self):\n \"\"\" Getter for the llm_type property\"\"\"\n if self._instance is None:\n return None\n return self._instance._llm\n\n @llm.setter\n def llm(self, value: LLM):\n \"\"\" Setter for the llm_type property\"\"\"\n if self._instance is None:\n return\n self._instance._llm = value\n\n @property\n def npcs(self):\n \"\"\" Getter for the npcs property\"\"\"\n if self._instance is None:\n return None\n return self._instance._npcs\n\n @npcs.setter\n def npcs(self, value: dict):\n \"\"\" Setter for the npcs property\"\"\"\n if self._instance is None:\n return\n self._instance._npcs = value\n\n @property\n def fast(self):\n \"\"\" Getter for the fast property\"\"\"\n if self._instance is None:\n return None\n return self._instance._fast\n\n @fast.setter\n def fast(self, value: bool):\n \"\"\" Setter for the fast property\"\"\"\n if self._instance is None:\n return\n self._instance._fast = value\n\n @property\n def remote(self):\n \"\"\" Getter for the remote property\"\"\"\n if self._instance is None:\n return None\n return self._instance._remote\n\n @remote.setter\n def remote(self, value: bool):\n \"\"\" Setter for the remote property\"\"\"\n if self._instance is None:\n return\n self._instance._remote = value\n\n @property\n def streaming(self):\n \"\"\" Getter for the streaming property\"\"\"\n if self._instance is None:\n return None\n return self._instance._streaming\n\n @streaming.setter\n def streaming(self, value: bool):\n \"\"\" Setter for the streaming property\"\"\"\n if self._instance is None:\n return\n self._instance._streaming = value\n\n @property\n def world_name(self):\n \"\"\" Getter for the world_name property\"\"\"\n if self._instance is None:\n return None\n return self._instance._world_name\n\n @world_name.setter\n def world_name(self, value: str):\n \"\"\" Setter for the world_name property\"\"\"\n if self._instance is None:\n return\n self._instance._world_name = value\n\n @property\n def store(self):\n \"\"\" Getter for the store property\"\"\"\n if self._instance is None:\n return None\n return self._instance._store\n\n @store.setter\n def store(self, value: Store):\n \"\"\" Setter for the store property\"\"\"\n if self._instance is None:\n return\n self._instance._store = value\n\n @property\n def store_type(self):\n \"\"\" Getter for the store_type property\"\"\"\n if self._instance is None:\n return None\n return self._instance._store_type\n\n @store_type.setter\n def store_type(self, value: Store):\n \"\"\" Setter for the store_type property\"\"\"\n if self._instance is None:\n return\n self._instance._store_type = value\n\n @classmethod\n def is_created(cls) -> bool:\n \"\"\":return Returns true if the Singleton instance of the World is already created. False otherwise\"\"\"\n return cls._instance is not None\n\n @classmethod\n def get_lore(cls,\n topic: str,\n num_results: int = 5,\n known_by: str = None,\n exact_match: str = None,\n min_similarity: float = 0.85) -> SearchResult:\n \"\"\"\n Gets the lore from the world relevant to a topic, and filtered by who knows about it (known_by). You can use\n `num_results` to get the top-n results and `exact_match` if you want the results to include something literal.\n :param topic: the topic you are looking for in the Vector Store\n :param num_results: the max. number of results to retrieve\n :param known_by: filters by who know about this piece of lore. By default, (None) will look for commonly known\n by all NPCs.\n :param exact_match: Only returns documents which include literal expressions\n :param min_similarity: The minimum similarity the document should have compared to the topic\n :return SearchResult\n \"\"\"\n\n all_known_by = [settings.ALL]\n if known_by is not None and known_by != settings.ALL:\n all_known_by.append(known_by)\n\n return cls._instance.store.query(\n topic,\n num_results,\n all_known_by,\n exact_match,\n min_similarity)\n\n @classmethod\n def add_lore(cls,\n lore_text: str,\n lore_id: str,\n known_by: list[str]):\n \"\"\"\n Stores a piece of lore which happened in a world.\n :param lore_text: chronicle to be stored\n :param lore_id: the id of the piece of lore\n :param known_by: list of character_ids who know the chronicle\n \"\"\"\n logger.info(f\"Processing {lore_id} [{lore_text[:10]}...]\")\n cls._instance.store.add_to_collection(\n text=lore_text,\n metadata={\"known_by\": SEPARATOR.join(known_by)},\n text_id=lore_id\n )\n\n @classmethod\n def book_to_world(\n cls,\n book_path: str,\n text_splitter: TextSplitterTypes,\n max_units: int,\n overlap: int,\n known_by: list[str] = None,\n encoding='utf-8'):\n \"\"\"\n Reads a file describing a world (a book, for example). Splits the text into small chunks and stores them\n in the world. You can use any of the text splitters available in TextSplitterTypes.\n :param book_path: the path to the book\n :param text_splitter: one of those available in TextSplitterTypes (TokenTextSplitter, SentenceTextSplitter...)\n :param known_by: known by characters. If None, `all` will be included\n :param overlap: number of units (tokens, sentences) to overlap with previous/next chunks\n :param max_units: number of units (tokens, sentences) to accumulate in a chunk\n :param encoding: encoding of the books\n \"\"\"\n with open(book_path, 'r', encoding=encoding) as f:\n book = f.read()\n\n match text_splitter:\n case TextSplitterTypes.MAX_TOKENS_SPLITTER:\n text_splitter = TokenTextSplitter(\n overlap=overlap,\n max_units=max_units\n )\n case TextSplitterTypes.SENTENCE_SPLITTER:\n text_splitter = SentenceTextSplitter(\n overlap=overlap,\n max_units=max_units\n )\n case _:\n raise NotImplementedError(f\"{str(text_splitter)} not implemented\")\n\n loading = ['|', '/', '-', '\\\\']\n for i, chunk in enumerate(text_splitter.split_text(book)):\n print(f\"\\r{loading[i % len(loading)]}\", end=\"\")\n cls.add_lore(chunk,\n str(i),\n known_by if known_by is not None else [ALL])\n print()\n\n @classmethod\n def retrieve_answer_from_llm(cls,\n prompt: str,\n max_tokens: int = 100,\n do_sample: bool = True,\n temperature: float = 0.8) -> Union[Iterator[str], str]:\n \"\"\"\n Sends a prompt to the LLM. You can specify the max. number of tokens to retrieve and if you do sampling when\n generating the text.\n :param prompt: the prompt to use\n :param max_tokens: max tokens to receive\n :param do_sample: apply stochastic selection of tokens to prevent always generating the same wording.\n :param temperature: temperature or how creative the answer should be\n :return: an iterator to the text of the answer (streaming=True) or the answer (streaming=False)\n \"\"\"\n if cls._instance.fast:\n if cls._instance.llm is None:\n if cls._instance.remote:\n cls._instance.llm = RemoteVLLM(cls._instance.llm_type, temperature)\n else:\n cls._instance.llm = LocalVLLM(cls._instance.llm_type, temperature)\n else:\n if cls._instance.llm is None:\n cls._instance.llm = LocalLLM(cls._instance.llm_type, temperature)\n\n for chunk in cls._instance.llm.retrieve_answer(prompt,\n max_tokens,\n do_sample,\n cls._instance.llm_type.value['template'],\n cls._instance.streaming):\n yield chunk\n\n @classmethod\n def get_instance(cls):\n \"\"\" Returns the Singleton instance of the World\"\"\"\n return cls._instance\n\n @classmethod\n def delete_collection(cls):\n \"\"\"\n Deletes a collection from the Vector Store\n \"\"\"\n match cls._instance.store_type.value:\n case StoresTypes.CHROMA.value:\n try:\n from mindcraft.infra.vectorstore.chroma import Chroma\n except ImportError:\n raise Exception(f\"To use `chromadb` as your vector store, please install it first using pip:\\n\"\n f\"`pip install chromadb`\")\n cls._instance.store.delete_collection()\n case _:\n raise NotImplementedError(f\"{cls._instance.store_type} not implemented\")\n\n @classmethod\n def create_prompt(cls,\n memories: list[str],\n world_knowledge: list[str],\n character_name: str,\n topic: str,\n personalities: list[str],\n motivations: list[str],\n conversational_style: list[str],\n mood: str = None) -> str:\n \"\"\"\n Static method that creates the prompt to send to the LLM, gathering all the information from the world,\n past interactions, personalities, motivation, mood, conversational styles, etc.\n :param memories: A list of past interactions with a specific character about this topic\n :param world_knowledge: Pieces of lore/knowledge in the world about this topic\n :param character_name: The name of the character\n :param topic: The topic you are asking about\n :param personalities: A list of personalities of the NPC who is answering. For example: `wise`, `intelligent`\n :param motivations: A list of motivations seeked by the NPC who is answering. For example:\n `protecting the nature`\n :param conversational_style: A list of examples of a conversation which happened when the NPC was in a similar\n mood\n :param mood: The current mood of the NPC\n :return: the prompt\n \"\"\"\n\n return Prompt.create(memories,\n world_knowledge,\n character_name,\n cls._instance.world_name,\n topic,\n personalities,\n motivations,\n conversational_style,\n mood,\n prompt_template=cls._instance.llm_type)"
},
{
"identifier": "EmbeddingsTypes",
"path": "mindcraft/infra/embeddings/embeddings_types.py",
"snippet": "class EmbeddingsTypes(Enum):\n MINILM = \"all-MiniLM-L6-v2\""
},
{
"identifier": "LTM",
"path": "mindcraft/memory/ltm.py",
"snippet": "class LTM:\n def __init__(self,\n store_type: StoresTypes,\n character_name: str,\n ltm_embeddings: EmbeddingsTypes = EmbeddingsTypes.MINILM):\n \"\"\"\n Long-term memory. It stores everything that happened to a character.\n They are kept in the vector store, so the retrieval is slower than the STM.\n :param character_name: the unique `id` of the character\n :param ltm_embeddings: Embeddings to use in LTM in the VectorS Store.\n \"\"\"\n match store_type.value:\n case StoresTypes.CHROMA.value:\n try:\n from mindcraft.infra.vectorstore.chroma import Chroma\n except ImportError:\n raise Exception(f\"To use `chromadb` as your vector store, please install it first using pip:\\n\"\n f\"`pip install chromadb`\")\n\n self._store = Chroma(LTM_DATA_PATH, character_name, ltm_embeddings)\n\n case _:\n raise NotImplementedError(f\"{store_type} not implemented\")\n\n self._embeddings = ltm_embeddings\n self._character_id = character_name\n\n def memorize(self, text: str, mood: Mood):\n \"\"\"\n Stores a memory or interaction into the vector store, all along with the actual moods which produced it.\n :param text: last interaction happened to store in LTM.\n :param mood: current Mood of the character\n \"\"\"\n self._store.add_to_collection(\n text=text,\n metadata={'mood': mood.feature if mood is not None else Mood.DEFAULT},\n text_id=str(self._store.count()))\n\n def remember_about(self,\n topic: str,\n num_results: int = 3,\n min_similarity: float = 0.85) -> SearchResult:\n \"\"\"\n Retrieves memories from LTM of a character concerning a specific topic.\n :param topic: Topic to remember about\n :param num_results: Max. num of results\n :param min_similarity: min. similarity to filter out irrelevant memories\n \"\"\"\n return self._store.query(\n text=topic,\n num_results=num_results,\n known_by=[ALL, self._character_id],\n min_similarity=min_similarity\n )\n\n def get_last_interactions(self, n: int = 5) -> SearchResult:\n \"\"\" Retrieves last `n` interactions from the LTM\n :param n: number of interactions\n \"\"\"\n return self._store.get_last(n)"
},
{
"identifier": "LOGGER_FORMAT",
"path": "mindcraft/settings.py",
"snippet": "LOGGER_FORMAT = '%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s'"
},
{
"identifier": "DATE_FORMAT",
"path": "mindcraft/settings.py",
"snippet": "DATE_FORMAT = '%d-%m-%Y:%H:%M:%S'"
},
{
"identifier": "Mood",
"path": "mindcraft/features/mood.py",
"snippet": "class Mood:\n DEFAULT = 'default'\n\n def __init__(self, feature: str = None):\n \"\"\"\n Class that defines the current mood of a NPC. Moods can change over the time. If you are looking for something\n permanent, use `Personality` instead.\n :param feature: the name of the mood, for example, `angry`.\n \"\"\"\n self._feature = feature if feature is not None else self.DEFAULT\n\n @property\n def feature(self):\n \"\"\"\n Getter of the `feature` property\n :return: string\n \"\"\"\n return self._feature\n\n @feature.setter\n def feature(self, value: str):\n \"\"\"\n Setter of the `feature` property\n :param value: string of the feature.\n \"\"\"\n self._feature = value"
},
{
"identifier": "ConversationalStyle",
"path": "mindcraft/styles/conversational_style.py",
"snippet": "class ConversationalStyle:\n def __init__(self,\n store_type: StoresTypes,\n character_id: str,\n styles_embeddings: EmbeddingsTypes = EmbeddingsTypes.MINILM):\n \"\"\"\n Class that stores how characters speak depending on their moods.\n They are kept in the vector store\n :param store_type: type of vector store from those available in StoresTypes\n :param character_id: the unique `id` of the character\n :param styles_embeddings: Embeddings to use in the conversations in the Vector Store.\n \"\"\"\n match store_type.value:\n case StoresTypes.CHROMA.value:\n try:\n from mindcraft.infra.vectorstore.chroma import Chroma\n except ImportError:\n raise Exception(f\"To use `chromadb` as your vector store, please install it first using pip:\\n\"\n f\"`pip install chromadb`\")\n\n self.store = Chroma(STYLES_DATA_PATH, character_id, styles_embeddings)\n\n case _:\n raise NotImplementedError(f\"{store_type} not implemented\")\n\n self.embeddings = styles_embeddings\n\n def memorize(self, text: str, mood: Mood):\n \"\"\"\n Stores an example conversation of a character for a specific mood into the vector store.\n :param text: last interaction happened to store in LTM.\n :param mood: the mood the npc had when said this\n \"\"\"\n self.store.add_to_collection(\n text=text,\n metadata={'mood': mood.feature if mood is not None else Mood.DEFAULT},\n text_id=str(self.store.count()))\n\n def retrieve_interaction_by_mood(self,\n mood: str) -> SearchResult:\n \"\"\"\n Retrieves examples of interactions for a specific mood\n :param mood: the current mood of the character\n :return SearchResult\n \"\"\"\n return self.store.get(where={'mood': mood})"
}
] | from mindcraft.memory.summarizer_types import SummarizerTypes
from mindcraft.memory.stm import STM
from mindcraft.infra.vectorstore.stores_types import StoresTypes
from mindcraft.infra.sft.feedback import Feedback
from mindcraft.features.motivation import Motivation
from mindcraft.features.personality import Personality
from mindcraft.lore.world import World
from mindcraft.infra.embeddings.embeddings_types import EmbeddingsTypes
from mindcraft.memory.ltm import LTM
from mindcraft.settings import LOGGER_FORMAT, DATE_FORMAT
from mindcraft.features.mood import Mood
from mindcraft.styles.conversational_style import ConversationalStyle
import logging | 7,476 |
logging.basicConfig(format=LOGGER_FORMAT, datefmt=DATE_FORMAT, level=logging.INFO)
logger = logging.getLogger(__name__)
class NPC:
def __init__(self,
character_name: str,
description: str,
personalities: list[Personality],
motivations: list[Motivation],
mood: Mood,
store_type: StoresTypes,
ltm_embeddings: EmbeddingsTypes = EmbeddingsTypes.MINILM,
stm_capacity: int = 5,
stm_summarizer: SummarizerTypes = SummarizerTypes.T5_SMALL,
stm_max_summary_length: int = 230,
stm_min_summary_length: int = 30):
"""
A class managing the Non-player Character, including short-term, long-term memory, backgrounds, motivations
to create the answer.
:param character_name: the unique id of the character
:param description: a short description of who your character in the world is
:param personalities: a list of personalities that permanently define the character
(if it's a current state then use it in `moods`)
:param motivations: a list of motivations the character has
:param mood: current mood of the character. They can change over the time.
:param store_type: VectorStore from StoresTypes you prefer to use.
:param ltm_embeddings: embeddings from EmbeddingsTypes you prefer to use
:param stm_capacity: How many interactions from ltm to store
:param stm_summarizer: One of `SummarizerTypes` to use for including the summary of last interactions
:param stm_max_summary_length: max length of the summary
:param stm_min_summary_length: min length of the summary
"""
self._character_name = character_name
self._description = description
self._ltm = LTM(store_type, character_name, ltm_embeddings)
self._stm = STM(self._ltm, stm_capacity, stm_summarizer, stm_max_summary_length, stm_min_summary_length)
self._personalities = personalities
self._motivations = motivations
self._mood = mood
|
logging.basicConfig(format=LOGGER_FORMAT, datefmt=DATE_FORMAT, level=logging.INFO)
logger = logging.getLogger(__name__)
class NPC:
def __init__(self,
character_name: str,
description: str,
personalities: list[Personality],
motivations: list[Motivation],
mood: Mood,
store_type: StoresTypes,
ltm_embeddings: EmbeddingsTypes = EmbeddingsTypes.MINILM,
stm_capacity: int = 5,
stm_summarizer: SummarizerTypes = SummarizerTypes.T5_SMALL,
stm_max_summary_length: int = 230,
stm_min_summary_length: int = 30):
"""
A class managing the Non-player Character, including short-term, long-term memory, backgrounds, motivations
to create the answer.
:param character_name: the unique id of the character
:param description: a short description of who your character in the world is
:param personalities: a list of personalities that permanently define the character
(if it's a current state then use it in `moods`)
:param motivations: a list of motivations the character has
:param mood: current mood of the character. They can change over the time.
:param store_type: VectorStore from StoresTypes you prefer to use.
:param ltm_embeddings: embeddings from EmbeddingsTypes you prefer to use
:param stm_capacity: How many interactions from ltm to store
:param stm_summarizer: One of `SummarizerTypes` to use for including the summary of last interactions
:param stm_max_summary_length: max length of the summary
:param stm_min_summary_length: min length of the summary
"""
self._character_name = character_name
self._description = description
self._ltm = LTM(store_type, character_name, ltm_embeddings)
self._stm = STM(self._ltm, stm_capacity, stm_summarizer, stm_max_summary_length, stm_min_summary_length)
self._personalities = personalities
self._motivations = motivations
self._mood = mood | self._conversational_style = ConversationalStyle(store_type, character_name, ltm_embeddings) | 12 | 2023-11-24 19:23:37+00:00 | 12k |
Algomancer/The-Daily-Train | eval/lm_eval_harness.py | [
{
"identifier": "generate",
"path": "generate/base.py",
"snippet": "@torch.inference_mode()\ndef generate(\n model: GPT,\n prompt: torch.Tensor,\n max_returned_tokens: int,\n *,\n temperature: float = 1.0,\n top_k: Optional[int] = None,\n eos_id: Optional[int] = None,\n) -> torch.Tensor:\n \"\"\"Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.\n\n The implementation of this function is modified from A. Karpathy's nanoGPT.\n\n Args:\n model: The model to use.\n prompt: Tensor of shape (T) with indices of the prompt sequence.\n max_returned_tokens: The maximum number of tokens to return (given plus generated).\n temperature: Scales the predicted logits by 1 / temperature.\n top_k: If specified, only sample among the tokens with the k highest probabilities.\n eos_id: If specified, stop generating any more token once the <eos> token is triggered.\n \"\"\"\n T = prompt.size(0)\n assert max_returned_tokens > T\n if model.max_seq_length < max_returned_tokens - 1:\n # rolling the kv cache based on the `input_pos` value would be necessary. However, doing so would introduce a\n # data dependency on the `input_pos` tensor and impact model compilation. Since this setting is uncommon, we do\n # not support it to avoid negatively impacting the overall speed\n raise NotImplementedError(f\"max_seq_length {model.max_seq_length} needs to be >= {max_returned_tokens - 1}\")\n\n device = prompt.device\n tokens = [prompt]\n input_pos = torch.tensor([T], device=device)\n token = next_token(\n model, torch.arange(0, T, device=device), prompt.view(1, -1), temperature=temperature, top_k=top_k\n ).clone()\n tokens.append(token)\n for _ in range(2, max_returned_tokens - T + 1):\n token = next_token(model, input_pos, token.view(1, -1), temperature=temperature, top_k=top_k).clone()\n tokens.append(token)\n if token == eos_id:\n break\n input_pos = input_pos.add_(1)\n return torch.cat(tokens)"
},
{
"identifier": "GPT",
"path": "daily_train/model.py",
"snippet": "class GPT(nn.Module):\n def __init__(self, config: Config) -> None:\n super().__init__()\n assert config.padded_vocab_size is not None\n self.config = config\n\n self.lm_head = nn.Linear(config.n_embd, config.padded_vocab_size, bias=config.lm_head_bias)\n self.transformer = nn.ModuleDict(\n dict(\n wte=nn.Embedding(config.padded_vocab_size, config.n_embd),\n h=nn.ModuleList(Block(config) for _ in range(config.n_layer)),\n ln_f=config.norm_class(config.n_embd, eps=config.norm_eps),\n )\n )\n self.max_seq_length = self.config.block_size\n self.mask_cache: Optional[torch.Tensor] = None\n\n @property\n def max_seq_length(self) -> int:\n return self._max_seq_length\n\n @max_seq_length.setter\n def max_seq_length(self, value: int) -> None:\n \"\"\"\n When doing inference, the sequences used might be shorter than the model's context length.\n This allows setting a smaller number to avoid allocating unused memory\n \"\"\"\n if value > self.config.block_size:\n raise ValueError(f\"Cannot attend to {value}, block size is only {self.config.block_size}\")\n self._max_seq_length = value\n if not hasattr(self, \"cos\"):\n # first call\n cos, sin = self.rope_cache()\n self.register_buffer(\"cos\", cos, persistent=False)\n self.register_buffer(\"sin\", sin, persistent=False)\n elif value != self.cos.size(0):\n # override\n self.cos, self.sin = self.rope_cache(device=self.cos.device)\n # the mask and kv cache size will get updated on `set_kv_cache`. we cannot update it here because we don't know\n # if the kv cache is expected\n\n def reset_parameters(self) -> None:\n # Trigger resetting the rope-cache\n self.max_seq_length = self.config.block_size\n\n def _init_weights(self, module: nn.Module) -> None:\n \"\"\"Meant to be used with `gpt.apply(gpt._init_weights)`.\"\"\"\n if isinstance(module, nn.Linear):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.Embedding):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n\n def forward(self, idx: torch.Tensor, input_pos: Optional[torch.Tensor] = None, preamble: Optional[torch.Tensor] = None) -> torch.Tensor:\n T = idx.size(1)\n if preamble is not None:\n T = T + preamble.size(1)\n if self.max_seq_length < T:\n raise ValueError(f\"Cannot forward sequence of length {T}, max seq length is only {self.max_seq_length}.\")\n\n if input_pos is not None: # use the kv cache\n cos = self.cos.index_select(0, input_pos)\n sin = self.sin.index_select(0, input_pos)\n if self.mask_cache is None:\n raise TypeError(\"You need to call `gpt.set_kv_cache()`\")\n mask = self.mask_cache.index_select(2, input_pos)\n else:\n cos = self.cos[:T]\n sin = self.sin[:T]\n mask = None\n\n x = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)\n if preamble is not None:\n x = torch.cat([preamble, x], dim=1)\n for block in self.transformer.h:\n x = block(x, cos, sin, mask, input_pos)\n x = self.transformer.ln_f(x)\n if preamble is not None:\n x = x[:, preamble.size(1):, :]\n return self.lm_head(x) # (b, t, vocab_size)\n\n @classmethod\n def from_name(cls, name: str, **kwargs: Any) -> Self:\n return cls(Config.from_name(name, **kwargs))\n\n def rope_cache(self, device: Optional[torch.device] = None) -> Tuple[torch.Tensor, torch.Tensor]:\n return build_rope_cache(\n seq_len=self.max_seq_length,\n n_elem=self.config.rope_n_elem,\n device=device,\n condense_ratio=self.config.rope_condense_ratio,\n base=self.config.rope_base,\n )\n\n def set_kv_cache(\n self,\n batch_size: int,\n rope_cache_length: Optional[int] = None,\n device: Optional[torch.device] = None,\n dtype: Optional[torch.dtype] = None,\n ) -> None:\n if rope_cache_length is None:\n rope_cache_length = self.cos.size(-1)\n max_seq_length = self.max_seq_length\n\n # initialize the kv cache for all blocks\n for block in self.transformer.h:\n block.attn.kv_cache = block.attn.build_kv_cache(\n batch_size, max_seq_length, rope_cache_length, device, dtype\n )\n\n if self.mask_cache is None or self.mask_cache.size(3) != max_seq_length:\n # passing `attn_mask` to SDPA downgrades it to use the inefficient implementation. since we only need the mask\n # for the kv-cache support (only during inference), we only create it in that situation\n # this will be resolved by https://github.com/pytorch/pytorch/issues/96099\n ones = torch.ones((max_seq_length, max_seq_length), device=device, dtype=torch.bool)\n self.mask_cache = torch.tril(ones).unsqueeze(0).unsqueeze(0)\n\n def clear_kv_cache(self) -> None:\n self.mask_cache = None\n for block in self.transformer.h:\n block.attn.kv_cache = None"
},
{
"identifier": "Config",
"path": "daily_train/config.py",
"snippet": "class Config:\n name: str = \"\"\n hf_config: dict = field(default_factory=dict)\n block_size: int = 4096\n vocab_size: int = 50254\n padding_multiple: int = 512\n padded_vocab_size: Optional[int] = None\n n_layer: int = 16\n n_head: int = 32\n n_embd: int = 4096\n rotary_percentage: float = 0.25\n parallel_residual: bool = True\n bias: bool = True\n lm_head_bias: bool = False\n # to use multi-head attention (MHA), set this to `n_head` (default)\n # to use multi-query attention (MQA), set this to 1\n # to use grouped-query attention (GQA), set this to a value in between\n # Example with `n_head=4`\n # ┌───┐┌───┐┌───┐┌───┐ ┌───┐ ┌───┐ ┌───┐\n # │ v ││ v ││ v ││ v │ │ v │ │ v │ │ v │\n # └───┘└───┘└───┘└───┘ └───┘ └───┘ └───┘\n # │ │ │ │ │ │ │\n # ┌───┐┌───┐┌───┐┌───┐ ┌───┐ ┌───┐ ┌───┐\n # │ k ││ k ││ k ││ k │ │ k │ │ k │ │ k │\n # └───┘└───┘└───┘└───┘ └───┘ └───┘ └───┘\n # │ │ │ │ ┌──┴──┐ ┌──┴──┐ ┌────┬──┴─┬────┐\n # ┌───┐┌───┐┌───┐┌───┐ ┌───┐┌───┐┌───┐┌───┐ ┌───┐┌───┐┌───┐┌───┐\n # │ q ││ q ││ q ││ q │ │ q ││ q ││ q ││ q │ │ q ││ q ││ q ││ q │\n # └───┘└───┘└───┘└───┘ └───┘└───┘└───┘└───┘ └───┘└───┘└───┘└───┘\n # ◀──────────────────▶ ◀──────────────────▶ ◀──────────────────▶\n # MHA GQA MQA\n # n_query_groups=4 n_query_groups=2 n_query_groups=1\n #\n # credit https://arxiv.org/pdf/2305.13245.pdf\n n_query_groups: Optional[int] = None\n shared_attention_norm: bool = False\n _norm_class: Literal[\"LayerNorm\", \"RMSNorm\"] = \"LayerNorm\"\n norm_eps: float = 1e-5\n _mlp_class: Literal[\"GptNeoxMLP\", \"LLaMAMLP\"] = \"GptNeoxMLP\"\n gelu_approximate: str = \"none\"\n intermediate_size: Optional[int] = None\n rope_condense_ratio: int = 1\n rope_base: int = 10000\n use_smear: bool = False\n\n def __post_init__(self):\n if not self.name:\n self.name = self.hf_config.get(\"name\", self.name)\n\n assert self.n_embd % self.n_head == 0\n self.head_size = self.n_embd // self.n_head\n\n # vocab size should be a power of 2 to be optimal on hardware. compute the closest value\n if self.padded_vocab_size is None:\n self.padded_vocab_size = find_multiple(self.vocab_size, self.padding_multiple)\n else:\n # vocab size shouldn't be larger than padded vocab size\n self.vocab_size = min(self.vocab_size, self.padded_vocab_size)\n\n # compute the number of query groups\n if self.n_query_groups is not None:\n assert self.n_head % self.n_query_groups == 0\n else:\n self.n_query_groups = self.n_head\n\n # compute the intermediate size for MLP if not set\n if self.intermediate_size is None:\n if self._mlp_class == \"LLaMAMLP\":\n raise ValueError(\"The config needs to set the `intermediate_size`\")\n self.intermediate_size = 4 * self.n_embd\n\n self.rope_n_elem = int(self.rotary_percentage * self.head_size)\n\n @classmethod\n def from_name(cls, name: str, **kwargs: Any) -> Self:\n if name not in name_to_config:\n # search through all `config['hf_config']['name']`\n try:\n conf_dict = next(config for config in configs if name == config[\"hf_config\"][\"name\"])\n except StopIteration:\n raise ValueError(f\"{name!r} is not a supported config name\")\n else:\n conf_dict = name_to_config[name]\n\n conf_dict = conf_dict.copy()\n if \"condense_ratio\" in kwargs: # legacy name\n kwargs[\"rope_condense_ratio\"] = kwargs.pop(\"condense_ratio\")\n conf_dict.update(kwargs)\n return cls(**conf_dict)\n\n @classmethod\n def from_json(cls, path: Union[str, Path], **kwargs: Any) -> Self:\n with open(path, encoding=\"utf-8\") as fp:\n json_kwargs = json.load(fp)\n if \"condense_ratio\" in json_kwargs: # legacy name\n json_kwargs[\"rope_condense_ratio\"] = json_kwargs.pop(\"condense_ratio\")\n if \"condense_ratio\" in kwargs: # legacy name\n kwargs[\"rope_condense_ratio\"] = kwargs.pop(\"condense_ratio\")\n if \"org\" in json_kwargs: # legacy name\n json_kwargs[\"hf_config\"] = {\"name\": json_kwargs[\"name\"], \"org\": json_kwargs.pop(\"org\")}\n if \"org\" in kwargs: # legacy name\n kwargs[\"hf_config\"] = {\"name\": kwargs.get(\"name\", json_kwargs[\"name\"]), \"org\": kwargs.pop(\"org\")}\n json_kwargs.update(kwargs)\n return cls(**json_kwargs)\n\n @classmethod\n def from_checkpoint(cls, path: Path, **kwargs: Any) -> Self:\n \"\"\"Automatically load `lit_config.json` and if it doesn't exist - a matching config from `daily_train/config.py`.\"\"\"\n if (config_path := path / \"lit_config.json\").is_file():\n return cls.from_json(config_path, **kwargs)\n if (model_name := path.name) in name_to_config:\n return cls.from_name(model_name, **kwargs)\n raise FileNotFoundError(f\"For {str(path)!r} neither 'lit_config.json' nor matching config exists.\")\n\n @property\n def mlp_class(self) -> Type:\n # `self._mlp_class` cannot be the type to keep the config json serializable\n return getattr(daily_train.model, self._mlp_class)\n\n @property\n def norm_class(self) -> Type:\n # `self._norm_class` cannot be the type to keep the config json serializable\n if self._norm_class == \"RMSNorm\":\n from daily_train.rmsnorm import RMSNorm\n\n return RMSNorm\n return getattr(torch.nn, self._norm_class)"
},
{
"identifier": "Tokenizer",
"path": "daily_train/tokenizer.py",
"snippet": "class Tokenizer:\n def __init__(self, checkpoint_dir: Union[Path, str]) -> None:\n checkpoint_dir = Path(checkpoint_dir)\n if not checkpoint_dir.exists():\n raise NotADirectoryError(f\"The checkpoint directory does not exist: {str(checkpoint_dir)}\")\n\n self.use_bos = self.check_if_bos_token_used(checkpoint_dir)\n self.bos_id = None\n self.eos_id = None\n\n # some checkpoints have both files, `.model` takes precedence\n if (vocabulary_path := checkpoint_dir / \"tokenizer.model\").is_file():\n from sentencepiece import SentencePieceProcessor\n\n self.processor = SentencePieceProcessor(model_file=str(vocabulary_path))\n self.backend = \"sentencepiece\"\n self.bos_id = self.processor.bos_id()\n self.eos_id = self.processor.eos_id()\n\n elif (vocabulary_path := checkpoint_dir / \"tokenizer.json\").is_file():\n from tokenizers import Tokenizer as HFTokenizer\n\n self.processor = HFTokenizer.from_file(str(vocabulary_path))\n self.backend = \"huggingface\"\n\n if (special_tokens_path := checkpoint_dir / \"tokenizer_config.json\").is_file():\n with open(special_tokens_path) as fp:\n config = json.load(fp)\n bos_token = config.get(\"bos_token\")\n self.bos_id = self.token_to_id(bos_token) if bos_token is not None else None\n eos_token = config.get(\"eos_token\")\n self.eos_id = self.token_to_id(eos_token) if eos_token is not None else None\n if (special_tokens_path := checkpoint_dir / \"generation_config.json\").is_file():\n with open(special_tokens_path) as fp:\n config = json.load(fp)\n if self.bos_id is None:\n self.bos_id = config.get(\"bos_token_id\")\n if self.eos_id is None:\n self.eos_id = config.get(\"eos_token_id\")\n else:\n raise NotImplementedError\n\n @property\n def vocab_size(self) -> int:\n if self.backend == \"huggingface\":\n return self.processor.get_vocab_size(with_added_tokens=False)\n if self.backend == \"sentencepiece\":\n return self.processor.vocab_size()\n raise RuntimeError\n\n def token_to_id(self, token: str) -> int:\n if self.backend == \"huggingface\":\n id_ = self.processor.token_to_id(token)\n elif self.backend == \"sentencepiece\":\n id_ = self.processor.piece_to_id(token)\n else:\n raise RuntimeError\n if id_ is None:\n raise ValueError(f\"token {token!r} not found in the collection.\")\n return id_\n\n def check_if_bos_token_used(self, checkpoint_dir: Path) -> bool:\n if not (tokenizer_config_path := checkpoint_dir / \"tokenizer_config.json\").is_file():\n return False\n with open(tokenizer_config_path) as fp:\n config = json.load(fp)\n if any(config.get(check, False) for check in (\"add_bos_token\", \"add_prefix_space\")):\n return True\n # for examples that also use the Llama tokenizer, but do not have or set add_bos_token to True.\n # ex: https://huggingface.co/stabilityai/StableBeluga2/blob/main/tokenizer_config.json#L2\n return config.get(\"add_bos_token\") is None and config.get(\"tokenizer_class\") == \"LlamaTokenizer\"\n\n def encode(\n self,\n string: str,\n device: Optional[torch.device] = None,\n bos: Optional[bool] = None,\n eos: bool = False,\n max_length: int = -1,\n ) -> torch.Tensor:\n if self.backend == \"huggingface\":\n tokens = self.processor.encode(string).ids\n elif self.backend == \"sentencepiece\":\n tokens = self.processor.encode(string)\n else:\n raise RuntimeError\n if bos or (bos is None and self.use_bos):\n bos_id = self.bos_id\n if bos_id is None:\n raise NotImplementedError(\"This tokenizer does not have a defined a bos token\")\n tokens = [bos_id] + tokens\n if eos:\n tokens = tokens + [self.eos_id]\n if max_length > 0:\n tokens = tokens[:max_length]\n return torch.tensor(tokens, dtype=torch.int, device=device)\n\n def decode(self, tensor: torch.Tensor) -> str:\n tokens = [tensor.item()] if tensor.ndim == 0 else tensor.tolist()\n return self.processor.decode(tokens)"
},
{
"identifier": "check_valid_checkpoint_dir",
"path": "daily_train/utils.py",
"snippet": "def check_valid_checkpoint_dir(checkpoint_dir: Path) -> None:\n files = {\n \"lit_model.pth\": (checkpoint_dir / \"lit_model.pth\").is_file(),\n \"lit_config.json\": (checkpoint_dir / \"lit_config.json\").is_file(),\n \"tokenizer.json OR tokenizer.model\": (checkpoint_dir / \"tokenizer.json\").is_file() or (\n checkpoint_dir / \"tokenizer.model\"\n ).is_file(),\n \"tokenizer_config.json\": (checkpoint_dir / \"tokenizer_config.json\").is_file(),\n }\n if checkpoint_dir.is_dir():\n if all(files.values()):\n # we're good\n return\n problem = f\" is missing the files: {[f for f, exists in files.items() if not exists]!r}\"\n else:\n problem = \" is not a checkpoint directory\"\n\n # list locally available checkpoints\n available = list(Path(\"checkpoints\").glob(\"*/*\"))\n if available:\n options = \"\\n --checkpoint_dir \".join([\"\"] + [repr(str(p.resolve())) for p in available])\n extra = f\"\\nYou have downloaded locally:{options}\\n\"\n else:\n extra = \"\"\n\n error_message = (\n f\"--checkpoint_dir {str(checkpoint_dir.absolute())!r}{problem}.\"\n \"\\nFind download instructions at https://github.com/Lightning-AI/lit-gpt/blob/main/tutorials\\n\"\n f\"{extra}\\nSee all download options by running:\\n python scripts/download.py\"\n )\n print(error_message, file=sys.stderr)\n raise SystemExit(1)"
},
{
"identifier": "get_default_supported_precision",
"path": "daily_train/utils.py",
"snippet": "def get_default_supported_precision(training: bool) -> str:\n \"\"\"Return default precision that is supported by the hardware: either `bf16` or `16`.\n\n Args:\n training: `-mixed` or `-true` version of the precision to use\n\n Returns:\n default precision that is suitable for the task and is supported by the hardware\n \"\"\"\n from lightning.fabric.accelerators import MPSAccelerator\n\n if MPSAccelerator.is_available() or (torch.cuda.is_available() and not torch.cuda.is_bf16_supported()):\n return \"16-mixed\" if training else \"16-true\"\n return \"bf16-mixed\" if training else \"bf16-true\""
},
{
"identifier": "gptq_quantization",
"path": "daily_train/utils.py",
"snippet": "def gptq_quantization(enabled: bool = False) -> ContextManager:\n if not enabled:\n return nullcontext()\n\n from lightning.fabric.plugins.precision.utils import _ClassReplacementContextManager\n\n from quantize.gptq import ColBlockQuantizedLinear\n\n class QuantizedLinear(ColBlockQuantizedLinear):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, bits=4, tile_cols=-1, **kwargs)\n\n return _ClassReplacementContextManager({\"torch.nn.Linear\": QuantizedLinear})"
},
{
"identifier": "load_checkpoint",
"path": "daily_train/utils.py",
"snippet": "def load_checkpoint(fabric: L.Fabric, model: nn.Module, checkpoint_path: Path, strict: bool = True) -> None:\n if isinstance(fabric.strategy, FSDPStrategy):\n fabric.load_raw(checkpoint_path, model, strict=strict)\n else:\n state_dict = lazy_load(checkpoint_path)\n state_dict = state_dict.get(\"model\", state_dict)\n model.load_state_dict(state_dict, strict=strict)"
}
] | import json
import sys
import lightning as L
import torch
import fnmatch
from pathlib import Path
from typing import Dict, List, Literal, Optional
from lightning.fabric.plugins import BitsandbytesPrecision
from lm_eval import base, evaluator, tasks
from lm_eval.base import BaseLM
from generate.base import generate
from daily_train import GPT, Config, Tokenizer
from daily_train.utils import (
check_valid_checkpoint_dir,
get_default_supported_precision,
gptq_quantization,
load_checkpoint,
)
from jsonargparse import CLI | 7,220 | return self.batch_size_per_gpu * self.fabric.world_size
@property
def device(self):
return self.fabric.device
def tok_encode(self, string: str) -> List[int]:
return self.tokenizer.encode(string, bos=False, eos=False).tolist()
def tok_decode(self, tokens: List[int]) -> str:
t = torch.tensor(tokens)
return self.tokenizer.decode(t)
@torch.inference_mode()
def _model_call(self, inps):
return self.model(inps)
def _model_generate(self, context, max_length, eos_token_id) -> torch.Tensor:
# this only supports batch size 1
assert context.shape[0] == 1
out = generate(self.model, context[0], max_length, eos_id=eos_token_id)
for block in self.model.transformer.h:
block.attn.kv_cache.reset_parameters()
return out.unsqueeze(0)
@torch.inference_mode()
def run_eval(
self, eval_tasks: List[str], num_fewshot: int, limit: Optional[int], bootstrap_iters: int, no_cache: bool
) -> Dict:
# Returns a list containing all values of the task registry that
# match at least one of the patterns
def pattern_match(patterns, source_list):
task_names = set()
for pattern in patterns:
for matching in fnmatch.filter(source_list, pattern):
task_names.add(matching)
return list(task_names)
eval_tasks = pattern_match(eval_tasks, tasks.ALL_TASKS)
print(f"Found tasks: {eval_tasks}")
# **HACK INCOMING**:
# first get task dict on local main rank
# the tasks are downloaded *as they are initialized*, and the downloads don't like multithreading.
# so we download them once on the local main rank, wait, and then initialize them on all other ranks, which *should* load from the cache.
if self.fabric.local_rank == 0:
tasks.get_task_dict(eval_tasks)
# torch barrier
self.fabric.barrier()
tasks.get_task_dict(eval_tasks)
lm = self
if not no_cache:
lm = base.CachingLM(lm, "lm_cache/lit-gpt.db")
results = evaluator.evaluate(
lm=lm,
task_dict=tasks.get_task_dict(eval_tasks),
num_fewshot=num_fewshot,
limit=limit,
bootstrap_iters=bootstrap_iters,
)
results["config"] = dict(
model=self.model.config.name,
batch_size=self.batch_size,
device=str(self.device),
num_fewshot=num_fewshot,
limit=limit,
bootstrap_iters=bootstrap_iters,
no_cache=no_cache,
)
return results
@torch.inference_mode()
def run_eval_harness(
checkpoint_dir: Path,
precision: Optional[str] = None,
quantize: Optional[Literal["bnb.nf4", "bnb.nf4-dq", "bnb.fp4", "bnb.fp4-dq", "bnb.int8", "gptq.int4"]] = None,
eval_tasks: List[str] = ["arc_challenge", "piqa", "hellaswag", "hendrycksTest-*"],
save_filepath: Optional[Path] = None,
num_fewshot: int = 0,
limit: Optional[int] = None,
bootstrap_iters: int = 100000,
no_cache: bool = True,
):
if precision is None:
precision = get_default_supported_precision(training=False)
plugins = None
if quantize is not None and quantize.startswith("bnb."):
if "mixed" in precision:
raise ValueError("Quantization and mixed precision is not supported.")
dtype = {"16-true": torch.float16, "bf16-true": torch.bfloat16, "32-true": torch.float32}[precision]
plugins = BitsandbytesPrecision(quantize[4:], dtype)
precision = None
fabric = L.Fabric(devices=1, precision=precision, plugins=plugins)
check_valid_checkpoint_dir(checkpoint_dir)
tokenizer = Tokenizer(checkpoint_dir)
config = Config.from_json(checkpoint_dir / "lit_config.json")
if quantize == "gptq.int4":
model_file = "lit_model_gptq.4bit.pth"
if not (checkpoint_dir / model_file).is_file():
raise ValueError("Please run `python quantize/gptq.py` first")
else:
model_file = "lit_model.pth"
checkpoint_path = checkpoint_dir / model_file
print(f"Loading model {str(checkpoint_path)!r} with {config.__dict__}", file=sys.stderr)
with fabric.init_module(empty_init=True), gptq_quantization(quantize == "gptq.int4"):
model = GPT(config)
model.eval()
model = fabric.setup_module(model)
|
# support running without installing as a package
wd = Path(__file__).parent.parent.resolve()
sys.path.append(str(wd))
class EvalHarnessBase(BaseLM):
# Credits:
# https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py
def __init__(self, fabric: L.Fabric, model: GPT, tokenizer: Tokenizer, batch_size: int):
super().__init__()
self.fabric = fabric
self.model = model
self.tokenizer = tokenizer
self.batch_size_per_gpu = batch_size
with fabric.init_tensor():
model.set_kv_cache(batch_size=batch_size)
@classmethod
def create_from_arg_string(cls, arg_string, additional_config=None):
kwargs = {el.split("=")[0]: el.split("=")[1] for el in arg_string.split(",")}
return cls(**kwargs, **additional_config)
@property
def eot_token_id(self):
# we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
return self.tokenizer.eos_id
@property
def max_length(self):
return self.model.max_seq_length
@property
def vocab_size(self):
return self.tokenizer.vocab_size
@property
def max_gen_toks(self):
return 256
@property
def batch_size(self):
return self.batch_size_per_gpu * self.fabric.world_size
@property
def device(self):
return self.fabric.device
def tok_encode(self, string: str) -> List[int]:
return self.tokenizer.encode(string, bos=False, eos=False).tolist()
def tok_decode(self, tokens: List[int]) -> str:
t = torch.tensor(tokens)
return self.tokenizer.decode(t)
@torch.inference_mode()
def _model_call(self, inps):
return self.model(inps)
def _model_generate(self, context, max_length, eos_token_id) -> torch.Tensor:
# this only supports batch size 1
assert context.shape[0] == 1
out = generate(self.model, context[0], max_length, eos_id=eos_token_id)
for block in self.model.transformer.h:
block.attn.kv_cache.reset_parameters()
return out.unsqueeze(0)
@torch.inference_mode()
def run_eval(
self, eval_tasks: List[str], num_fewshot: int, limit: Optional[int], bootstrap_iters: int, no_cache: bool
) -> Dict:
# Returns a list containing all values of the task registry that
# match at least one of the patterns
def pattern_match(patterns, source_list):
task_names = set()
for pattern in patterns:
for matching in fnmatch.filter(source_list, pattern):
task_names.add(matching)
return list(task_names)
eval_tasks = pattern_match(eval_tasks, tasks.ALL_TASKS)
print(f"Found tasks: {eval_tasks}")
# **HACK INCOMING**:
# first get task dict on local main rank
# the tasks are downloaded *as they are initialized*, and the downloads don't like multithreading.
# so we download them once on the local main rank, wait, and then initialize them on all other ranks, which *should* load from the cache.
if self.fabric.local_rank == 0:
tasks.get_task_dict(eval_tasks)
# torch barrier
self.fabric.barrier()
tasks.get_task_dict(eval_tasks)
lm = self
if not no_cache:
lm = base.CachingLM(lm, "lm_cache/lit-gpt.db")
results = evaluator.evaluate(
lm=lm,
task_dict=tasks.get_task_dict(eval_tasks),
num_fewshot=num_fewshot,
limit=limit,
bootstrap_iters=bootstrap_iters,
)
results["config"] = dict(
model=self.model.config.name,
batch_size=self.batch_size,
device=str(self.device),
num_fewshot=num_fewshot,
limit=limit,
bootstrap_iters=bootstrap_iters,
no_cache=no_cache,
)
return results
@torch.inference_mode()
def run_eval_harness(
checkpoint_dir: Path,
precision: Optional[str] = None,
quantize: Optional[Literal["bnb.nf4", "bnb.nf4-dq", "bnb.fp4", "bnb.fp4-dq", "bnb.int8", "gptq.int4"]] = None,
eval_tasks: List[str] = ["arc_challenge", "piqa", "hellaswag", "hendrycksTest-*"],
save_filepath: Optional[Path] = None,
num_fewshot: int = 0,
limit: Optional[int] = None,
bootstrap_iters: int = 100000,
no_cache: bool = True,
):
if precision is None:
precision = get_default_supported_precision(training=False)
plugins = None
if quantize is not None and quantize.startswith("bnb."):
if "mixed" in precision:
raise ValueError("Quantization and mixed precision is not supported.")
dtype = {"16-true": torch.float16, "bf16-true": torch.bfloat16, "32-true": torch.float32}[precision]
plugins = BitsandbytesPrecision(quantize[4:], dtype)
precision = None
fabric = L.Fabric(devices=1, precision=precision, plugins=plugins)
check_valid_checkpoint_dir(checkpoint_dir)
tokenizer = Tokenizer(checkpoint_dir)
config = Config.from_json(checkpoint_dir / "lit_config.json")
if quantize == "gptq.int4":
model_file = "lit_model_gptq.4bit.pth"
if not (checkpoint_dir / model_file).is_file():
raise ValueError("Please run `python quantize/gptq.py` first")
else:
model_file = "lit_model.pth"
checkpoint_path = checkpoint_dir / model_file
print(f"Loading model {str(checkpoint_path)!r} with {config.__dict__}", file=sys.stderr)
with fabric.init_module(empty_init=True), gptq_quantization(quantize == "gptq.int4"):
model = GPT(config)
model.eval()
model = fabric.setup_module(model)
| load_checkpoint(fabric, model, checkpoint_path) | 7 | 2023-11-22 06:56:19+00:00 | 12k |
Luo-Z13/pointobb | PointOBB/mmdet/models/detectors/PointOBB.py | [
{
"identifier": "DETECTORS",
"path": "PointOBB/mmdet/models/builder.py",
"snippet": "DETECTORS = MODELS"
},
{
"identifier": "TwoStageDetector",
"path": "PointOBB/mmdet/models/detectors/two_stage.py",
"snippet": "class TwoStageDetector(BaseDetector):\n \"\"\"Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n \"\"\"\n\n def __init__(self,\n backbone,\n neck=None,\n rpn_head=None,\n roi_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n super(TwoStageDetector, self).__init__(init_cfg)\n backbone.pretrained = pretrained\n self.backbone = build_backbone(backbone)\n\n if neck is not None:\n self.neck = build_neck(neck)\n\n if rpn_head is not None:\n rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None\n rpn_head_ = rpn_head.copy()\n rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)\n self.rpn_head = build_head(rpn_head_)\n\n if roi_head is not None:\n # update train and test cfg here for now\n # TODO: refactor assigner & sampler\n rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None\n roi_head.update(train_cfg=rcnn_train_cfg)\n roi_head.update(test_cfg=test_cfg.rcnn)\n roi_head.pretrained = pretrained\n self.roi_head = build_head(roi_head)\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n @property\n def with_rpn(self):\n \"\"\"bool: whether the detector has RPN\"\"\"\n return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n @property\n def with_roi_head(self):\n \"\"\"bool: whether the detector has a RoI head\"\"\"\n return hasattr(self, 'roi_head') and self.roi_head is not None\n\n def extract_feat(self, img):\n \"\"\"Directly extract features from the backbone+neck.\"\"\"\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n \"\"\"\n outs = ()\n # backbone\n x = self.extract_feat(img)\n # rpn\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n outs = outs + (rpn_outs,)\n proposals = torch.randn(1000, 4).to(img.device)\n # roi_head\n roi_outs = self.roi_head.forward_dummy(x, proposals)\n outs = outs + (roi_outs,)\n return outs\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n ann_weight=None, ## add by fei\n gt_masks=None,\n proposals=None,\n **kwargs):\n \"\"\"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n proposals : override rpn proposals with custom proposals. Use when\n `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n x = self.extract_feat(img)\n\n losses = dict()\n\n # RPN forward and loss\n if self.with_rpn:\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n rpn_losses, proposal_list = self.rpn_head.forward_train(\n x,\n img_metas,\n gt_bboxes,\n gt_labels=None,\n ann_weight=ann_weight,\n gt_bboxes_ignore=gt_bboxes_ignore,\n proposal_cfg=proposal_cfg)\n losses.update(rpn_losses)\n else:\n proposal_list = proposals\n\n roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,\n gt_bboxes, gt_labels,ann_weight, ## add by fei\n gt_bboxes_ignore, gt_masks,\n **kwargs)\n losses.update(roi_losses)\n\n return losses\n\n async def async_simple_test(self,\n img,\n img_meta,\n proposals=None,\n rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = await self.rpn_head.async_simple_test_rpn(\n x, img_meta)\n else:\n proposal_list = proposals\n\n return await self.roi_head.async_simple_test(\n x, proposal_list, img_meta, rescale=rescale)\n\n def simple_test(self, img, img_metas, proposals=None, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n\n assert self.with_bbox, 'Bbox head must be implemented.'\n x = self.extract_feat(img)\n if proposals is None:\n proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)\n else:\n proposal_list = proposals\n\n return self.roi_head.simple_test(\n x, proposal_list, img_metas, rescale=rescale)\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n # modified by hui #####################################\n if self.test_cfg.rcnn.get('do_tile_as_aug', False):\n x = self.extract_feats(imgs)\n proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)\n return self.roi_head.aug_test(\n x, proposal_list, img_metas, rescale=rescale)\n else:\n return self.tile_aug_test(imgs, img_metas, rescale)\n ##########################################################################\n\n # add by hui ######################################################################\n def tile_aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations for each tile seperatelly.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n x = self.extract_feats(imgs)\n\n assert len(x) == len(img_metas)\n assert not self.roi_head.with_mask\n tile2img_metas = {}\n tile2feats = {}\n for feat, img_meta in zip(x, img_metas):\n assert len(img_meta) == 1\n tile_off = img_meta[0].pop('tile_offset') # must pop here, attention.\n if tile_off in tile2img_metas:\n tile2img_metas[tile_off].append(img_meta)\n tile2feats[tile_off].append(feat)\n else:\n tile2img_metas[tile_off] = [img_meta]\n tile2feats[tile_off] = [feat]\n\n # forward and merge all result on each tile\n all_tile_bboxes = []\n all_tile_labels = []\n num_classes = 0\n for tile_off, img_metas in tile2img_metas.items():\n x = tile2feats[tile_off]\n proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)\n bboxes = self.roi_head.aug_test(x, proposal_list, img_metas, rescale=rescale)[0]\n\n device = x[0][0].device\n dx, dy = tile_off\n labels = []\n num_classes = max(num_classes, len(bboxes))\n for cls in range(len(bboxes)):\n bboxes[cls][:, [0, 2]] += dx\n bboxes[cls][:, [1, 3]] += dy\n label = torch.zeros((len(bboxes[cls]),), dtype=torch.long, device=device) + cls\n labels.append(label)\n all_tile_bboxes.extend(bboxes)\n all_tile_labels.extend(labels)\n import numpy as np\n all_tile_bboxes = np.concatenate(all_tile_bboxes, axis=0)\n all_tile_bboxes = torch.from_numpy(all_tile_bboxes).to(device)\n all_tile_labels = torch.cat(all_tile_labels, dim=0)\n\n # performance NMS\n if len(all_tile_bboxes) > 0:\n from mmcv.ops.nms import batched_nms\n dets, keep = batched_nms(all_tile_bboxes[:, :4], all_tile_bboxes[:, 4].contiguous(),\n all_tile_labels, self.test_cfg.rcnn.nms)\n max_num = self.test_cfg.rcnn.max_per_img\n if max_num > 0:\n dets = dets[:max_num]\n keep = keep[:max_num]\n det_bboxes, det_labels = dets, all_tile_labels[keep]\n else:\n det_bboxes, det_labels = torch.zeros((0, 5)), torch.zeros((0,))\n\n from mmdet.core import bbox2result\n bbox_results = bbox2result(det_bboxes, det_labels, num_classes)\n return [bbox_results]\n\n ##################################################################\n\n def onnx_export(self, img, img_metas):\n\n img_shape = torch._shape_as_tensor(img)[2:]\n img_metas[0]['img_shape_for_onnx'] = img_shape\n x = self.extract_feat(img)\n proposals = self.rpn_head.onnx_export(x, img_metas)\n return self.roi_head.onnx_export(x, proposals, img_metas)"
},
{
"identifier": "build_head",
"path": "PointOBB/mmdet/models/builder.py",
"snippet": "def build_head(cfg):\n \"\"\"Build head.\"\"\"\n return HEADS.build(cfg)"
},
{
"identifier": "HEADS",
"path": "PointOBB/mmdet/models/builder.py",
"snippet": "HEADS = MODELS"
},
{
"identifier": "build_loss",
"path": "PointOBB/mmdet/models/builder.py",
"snippet": "def build_loss(cfg):\n \"\"\"Build loss.\"\"\"\n return LOSSES.build(cfg)"
},
{
"identifier": "gen_proposals_from_cfg",
"path": "PointOBB/mmdet/models/detectors/P2BNet.py",
"snippet": "def gen_proposals_from_cfg(gt_points, proposal_cfg, img_meta):\n base_scales = proposal_cfg['base_scales']\n base_ratios = proposal_cfg['base_ratios']\n shake_ratio = proposal_cfg['shake_ratio']\n \n if 'cut_mode' in proposal_cfg:\n cut_mode = proposal_cfg['cut_mode']\n else:\n cut_mode = 'symmetry'\n base_proposal_list = []\n proposals_valid_list = []\n for i in range(len(gt_points)):\n img_h, img_w, _ = img_meta[i]['img_shape']\n if 'base_size' in proposal_cfg:\n base = proposal_cfg['base_size']\n else:\n base = max(img_w, img_h) / 100\n \n base_proposals = []\n for scale in base_scales:\n scale = scale * base # ≈[41, 81, 161, 326, 640, 1280]\n for ratio in base_ratios:\n base_proposals.append(gt_points[i].new_tensor([[scale * ratio, scale / ratio]]))\n\n base_proposals = torch.cat(base_proposals)\n base_proposals = base_proposals.repeat((len(gt_points[i]), 1))\n base_center = torch.repeat_interleave(gt_points[i], len(base_scales) * len(base_ratios), dim=0)\n\n if shake_ratio is not None:\n base_x_l = base_center[:, 0] - shake_ratio * base_proposals[:, 0]\n base_x_r = base_center[:, 0] + shake_ratio * base_proposals[:, 0]\n base_y_t = base_center[:, 1] - shake_ratio * base_proposals[:, 1]\n base_y_d = base_center[:, 1] + shake_ratio * base_proposals[:, 1]\n if cut_mode is not None:\n base_x_l = torch.clamp(base_x_l, 1, img_w - 1)\n base_x_r = torch.clamp(base_x_r, 1, img_w - 1)\n base_y_t = torch.clamp(base_y_t, 1, img_h - 1)\n base_y_d = torch.clamp(base_y_d, 1, img_h - 1)\n\n base_center_l = torch.stack([base_x_l, base_center[:, 1]], dim=1)\n base_center_r = torch.stack([base_x_r, base_center[:, 1]], dim=1)\n base_center_t = torch.stack([base_center[:, 0], base_y_t], dim=1)\n base_center_d = torch.stack([base_center[:, 0], base_y_d], dim=1)\n\n shake_mode = 0\n if shake_mode == 0:\n base_proposals = base_proposals.unsqueeze(1).repeat((1, 5, 1))\n elif shake_mode == 1:\n base_proposals_l = torch.stack([((base_center[:, 0] - base_x_l) * 2 + base_proposals[:, 0]),\n base_proposals[:, 1]], dim=1)\n base_proposals_r = torch.stack([((base_x_r - base_center[:, 0]) * 2 + base_proposals[:, 0]),\n base_proposals[:, 1]], dim=1)\n base_proposals_t = torch.stack([base_proposals[:, 0],\n ((base_center[:, 1] - base_y_t) * 2 + base_proposals[:, 1])], dim=1\n )\n base_proposals_d = torch.stack([base_proposals[:, 0],\n ((base_y_d - base_center[:, 1]) * 2 + base_proposals[:, 1])], dim=1\n )\n base_proposals = torch.stack(\n [base_proposals, base_proposals_l, base_proposals_r, base_proposals_t, base_proposals_d], dim=1)\n\n base_center = torch.stack([base_center, base_center_l, base_center_r, base_center_t, base_center_d], dim=1)\n\n if cut_mode == 'symmetry':\n base_proposals[..., 0] = torch.min(base_proposals[..., 0], 2 * base_center[..., 0])\n base_proposals[..., 0] = torch.min(base_proposals[..., 0], 2 * (img_w - base_center[..., 0]))\n base_proposals[..., 1] = torch.min(base_proposals[..., 1], 2 * base_center[..., 1])\n base_proposals[..., 1] = torch.min(base_proposals[..., 1], 2 * (img_h - base_center[..., 1]))\n\n base_proposals = torch.cat([base_center, base_proposals], dim=-1)\n base_proposals = base_proposals.reshape(-1, 4)\n base_proposals = bbox_cxcywh_to_xyxy(base_proposals)\n proposals_valid = base_proposals.new_full(\n (*base_proposals.shape[:-1], 1), 1, dtype=torch.long).reshape(-1, 1)\n if cut_mode == 'clamp':\n base_proposals[..., 0:4:2] = torch.clamp(base_proposals[..., 0:4:2], 0, img_w)\n base_proposals[..., 1:4:2] = torch.clamp(base_proposals[..., 1:4:2], 0, img_h)\n proposals_valid_list.append(proposals_valid)\n if cut_mode == 'symmetry':\n proposals_valid_list.append(proposals_valid)\n elif cut_mode == 'ignore':\n img_xyxy = base_proposals.new_tensor([0, 0, img_w, img_h])\n iof_in_img = bbox_overlaps(base_proposals, img_xyxy.unsqueeze(0), mode='iof')\n proposals_valid = iof_in_img > 0.7\n proposals_valid_list.append(proposals_valid)\n elif cut_mode is None:\n proposals_valid_list.append(proposals_valid)\n base_proposal_list.append(base_proposals)\n\n return base_proposal_list, proposals_valid_list"
},
{
"identifier": "resize_proposal",
"path": "PointOBB/mmdet/models/detectors/utils.py",
"snippet": "def resize_proposal(img_metas, generate_proposals, gt_true_bboxes, gt_bboxes_ignore, ratio = 0.5):\n \n img_meta_out = copy.deepcopy(img_metas)\n generate_proposals_out = []\n gt_true_bboxes_out = []\n gt_bboxes_ignore_out = []\n for i in range(len(img_metas)):\n h, w, c = img_metas[i]['img_shape']\n img_meta_out[i]['img_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c)\n img_meta_out[i]['pad_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c)\n tmp_proposal = generate_proposals[i] * ratio\n generate_proposals_out.append(tmp_proposal)\n tmp_gt_true_bbox = gt_true_bboxes[i] * ratio\n gt_true_bboxes_out.append(tmp_gt_true_bbox)\n gt_bboxes_ignore_out.append(gt_bboxes_ignore[i]*ratio)\n return generate_proposals_out, gt_true_bboxes_out, img_meta_out, gt_bboxes_ignore_out"
},
{
"identifier": "resize_single_proposal",
"path": "PointOBB/mmdet/models/detectors/utils.py",
"snippet": "def resize_single_proposal(generate_proposals, ratio = 0.5):\n generate_proposals_out = []\n for i in range(len(generate_proposals)):\n tmp_proposal = generate_proposals[i] * ratio\n generate_proposals_out.append(tmp_proposal)\n\n return generate_proposals_out"
},
{
"identifier": "flip_tensor",
"path": "PointOBB/mmdet/models/detectors/utils.py",
"snippet": "def flip_tensor(tensor,\n img_shape: Tuple[int, int],\n direction: str = 'horizontal') -> None:\n \"\"\"Flip boxes horizontally or vertically in-place.\n\n Args:\n img_shape (Tuple[int, int]): A tuple of image height and width.\n direction (str): Flip direction, options are \"horizontal\",\n \"vertical\" and \"diagonal\". Defaults to \"horizontal\"\n \"\"\"\n assert direction in ['horizontal', 'vertical', 'diagonal']\n flipped = tensor\n if direction == 'horizontal':\n flipped[..., 0] = img_shape[1] - flipped[..., 0]\n flipped[..., 4] = -flipped[..., 4]\n elif direction == 'vertical':\n flipped[..., 1] = img_shape[0] - flipped[..., 1]\n flipped[..., 4] = -flipped[..., 4]\n else:\n flipped[..., 0] = img_shape[1] - flipped[..., 0]\n flipped[..., 1] = img_shape[0] - flipped[..., 1]\n return flipped"
},
{
"identifier": "hboxlist2cxcywha",
"path": "PointOBB/mmdet/models/detectors/utils.py",
"snippet": "def hboxlist2cxcywha(bbox_list):\n batch_bbox = []\n\n for i in range(len(bbox_list)):\n gt_box = bbox_list[i]\n # xyxy2cxcywha\n cx = (gt_box[:,0] + gt_box[:,2]) /2\n cy = (gt_box[:,1] + gt_box[:,3]) /2\n w = gt_box[:,2] - gt_box[:,0]\n h = gt_box[:,3] - gt_box[:,1]\n theta = torch.zeros_like(w, dtype=w.dtype)\n gt_box_new = torch.stack([cx, cy, w, h, theta], dim=-1)\n batch_bbox.append(gt_box_new)\n\n return batch_bbox"
},
{
"identifier": "merge_batch_list",
"path": "PointOBB/mmdet/models/detectors/utils.py",
"snippet": "def merge_batch_list(batch_gt_bboxes, batch_proposals):\n merged_list = []\n flag = []\n\n for gt_bboxes, proposals in zip(batch_gt_bboxes, batch_proposals):\n merged_list.append(torch.cat([gt_bboxes, proposals], dim=0))\n flag.append([gt_bboxes.size(0), proposals.size(0)])\n\n return merged_list, flag"
},
{
"identifier": "split_batch_list",
"path": "PointOBB/mmdet/models/detectors/utils.py",
"snippet": "def split_batch_list(merged_list, flags):\n out_list1 = []\n out_list2 = []\n for merged_tensor, flag in zip(merged_list, flags):\n out_list1.append(merged_tensor[:flag[0]])\n out_list2.append(merged_tensor[flag[0]:])\n\n return out_list1, out_list2"
},
{
"identifier": "box_iou_rotated",
"path": "PointOBB/mmdet/models/detectors/utils.py",
"snippet": "def box_iou_rotated(bboxes1: torch.Tensor,\n bboxes2: torch.Tensor,\n mode: str = 'iou',\n aligned: bool = False,\n clockwise: bool = True) -> torch.Tensor:\n \"\"\"Return intersection-over-union (Jaccard index) of boxes.\n\n Both sets of boxes are expected to be in\n (x_center, y_center, width, height, angle) format.\n\n If ``aligned`` is ``False``, then calculate the ious between each bbox\n of bboxes1 and bboxes2, otherwise the ious between each aligned pair of\n bboxes1 and bboxes2.\n\n .. note::\n The operator assumes:\n\n 1) The positive direction along x axis is left -> right.\n\n 2) The positive direction along y axis is top -> down.\n\n 3) The w border is in parallel with x axis when angle = 0.\n\n However, there are 2 opposite definitions of the positive angular\n direction, clockwise (CW) and counter-clockwise (CCW). MMCV supports\n both definitions and uses CW by default.\n\n Please set ``clockwise=False`` if you are using the CCW definition.\n\n The coordinate system when ``clockwise`` is ``True`` (default)\n\n .. code-block:: none\n\n 0-------------------> x (0 rad)\n | A-------------B\n | | |\n | | box h\n | | angle=0 |\n | D------w------C\n v\n y (pi/2 rad)\n\n In such coordination system the rotation matrix is\n\n .. math::\n \\\\begin{pmatrix}\n \\\\cos\\\\alpha & -\\\\sin\\\\alpha \\\\\\\\\n \\\\sin\\\\alpha & \\\\cos\\\\alpha\n \\\\end{pmatrix}\n\n The coordinates of the corner point A can be calculated as:\n\n .. math::\n P_A=\n \\\\begin{pmatrix} x_A \\\\\\\\ y_A\\\\end{pmatrix}\n =\n \\\\begin{pmatrix} x_{center} \\\\\\\\ y_{center}\\\\end{pmatrix} +\n \\\\begin{pmatrix}\\\\cos\\\\alpha & -\\\\sin\\\\alpha \\\\\\\\\n \\\\sin\\\\alpha & \\\\cos\\\\alpha\\\\end{pmatrix}\n \\\\begin{pmatrix} -0.5w \\\\\\\\ -0.5h\\\\end{pmatrix} \\\\\\\\\n =\n \\\\begin{pmatrix} x_{center}-0.5w\\\\cos\\\\alpha+0.5h\\\\sin\\\\alpha\n \\\\\\\\\n y_{center}-0.5w\\\\sin\\\\alpha-0.5h\\\\cos\\\\alpha\\\\end{pmatrix}\n\n\n The coordinate system when ``clockwise`` is ``False``\n\n .. code-block:: none\n\n 0-------------------> x (0 rad)\n | A-------------B\n | | |\n | | box h\n | | angle=0 |\n | D------w------C\n v\n y (-pi/2 rad)\n\n In such coordination system the rotation matrix is\n\n .. math::\n \\\\begin{pmatrix}\n \\\\cos\\\\alpha & \\\\sin\\\\alpha \\\\\\\\\n -\\\\sin\\\\alpha & \\\\cos\\\\alpha\n \\\\end{pmatrix}\n\n The coordinates of the corner point A can be calculated as:\n\n .. math::\n P_A=\n \\\\begin{pmatrix} x_A \\\\\\\\ y_A\\\\end{pmatrix}\n =\n \\\\begin{pmatrix} x_{center} \\\\\\\\ y_{center}\\\\end{pmatrix} +\n \\\\begin{pmatrix}\\\\cos\\\\alpha & \\\\sin\\\\alpha \\\\\\\\\n -\\\\sin\\\\alpha & \\\\cos\\\\alpha\\\\end{pmatrix}\n \\\\begin{pmatrix} -0.5w \\\\\\\\ -0.5h\\\\end{pmatrix} \\\\\\\\\n =\n \\\\begin{pmatrix} x_{center}-0.5w\\\\cos\\\\alpha-0.5h\\\\sin\\\\alpha\n \\\\\\\\\n y_{center}+0.5w\\\\sin\\\\alpha-0.5h\\\\cos\\\\alpha\\\\end{pmatrix}\n\n Args:\n boxes1 (torch.Tensor): rotated bboxes 1. It has shape (N, 5),\n indicating (x, y, w, h, theta) for each row. Note that theta is in\n radian.\n boxes2 (torch.Tensor): rotated bboxes 2. It has shape (M, 5),\n indicating (x, y, w, h, theta) for each row. Note that theta is in\n radian.\n mode (str): \"iou\" (intersection over union) or iof (intersection over\n foreground).\n clockwise (bool): flag indicating whether the positive angular\n orientation is clockwise. default True.\n `New in version 1.4.3.`\n\n Returns:\n torch.Tensor: Return the ious betweens boxes. If ``aligned`` is\n ``False``, the shape of ious is (N, M) else (N,).\n \"\"\"\n assert mode in ['iou', 'iof']\n mode_dict = {'iou': 0, 'iof': 1}\n mode_flag = mode_dict[mode]\n rows = bboxes1.size(0)\n cols = bboxes2.size(0)\n if aligned:\n ious = bboxes1.new_zeros(rows)\n else:\n ious = bboxes1.new_zeros(rows * cols)\n if not clockwise:\n flip_mat = bboxes1.new_ones(bboxes1.shape[-1])\n flip_mat[-1] = -1\n bboxes1 = bboxes1 * flip_mat\n bboxes2 = bboxes2 * flip_mat\n bboxes1 = bboxes1.contiguous()\n bboxes2 = bboxes2.contiguous()\n ext_module.box_iou_rotated(\n bboxes1, bboxes2, ious, mode_flag=mode_flag, aligned=aligned)\n if not aligned:\n ious = ious.view(rows, cols)\n return ious"
},
{
"identifier": "obb2poly_np",
"path": "PointOBB/mmdet/models/detectors/utils.py",
"snippet": "def obb2poly_np(rbboxes, version='oc'):\n \"\"\"Convert oriented bounding boxes to polygons.\n\n Args:\n obbs (ndarray): [x_ctr,y_ctr,w,h,angle]\n version (Str): angle representations.\n\n Returns:\n polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]\n \"\"\"\n if version == 'oc':\n results = obb2poly_np_oc(rbboxes)\n elif version == 'le135':\n results = obb2poly_np_le135(rbboxes)\n elif version == 'le90':\n results = obb2poly_np_le90(rbboxes)\n else:\n raise NotImplementedError\n return results"
}
] | import copy
import torch
import numpy as np
import copy
import math
import cv2
import os
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
from mmdet.core.bbox import bbox_xyxy_to_cxcywh
from mmdet.core import bbox_cxcywh_to_xyxy
from mmdet.core.bbox.iou_calculators import bbox_overlaps
from ..builder import build_head
from torch.nn import functional as F
from ..builder import HEADS, build_loss
from typing import Tuple, Union
from torch import Tensor
from torch.nn.functional import grid_sample
from torchvision import transforms
from .P2BNet import gen_proposals_from_cfg
from .utils import resize_proposal, resize_single_proposal, flip_tensor, hboxlist2cxcywha \
,merge_batch_list, split_batch_list, box_iou_rotated, obb2poly_np | 9,067 | pps_angle = pps_angle.unsqueeze(2).expand((pps_center.size()[0], pps_center.size()[1], pps_center.size()[2], 1))
pps = torch.cat([pps_center, pps_wh, pps_angle], dim=-1)
pps = pps.reshape(pps.shape[0], -1, 5)
pps_new.append(pps.reshape(*pps_old.shape[0:2], -1, 5))
pps_new = torch.cat(pps_new, dim=2)
else:
pps_new = pps_old
h, w, _ = img_meta[i]['img_shape']
if cut_mode is 'clamp':
pps_new[..., 0:4:2] = torch.clamp(pps_new[..., 0:4:2], 0, w)
pps_new[..., 1:4:2] = torch.clamp(pps_new[..., 1:4:2], 0, h)
proposals_valid_list.append(pps_new.new_full(
(*pps_new.shape[0:3], 1), 1, dtype=torch.long).reshape(-1, 1))
else:
rot_theta = base_boxes[:,-1].mean()
img_xywh = pps_new.new_tensor([w/2, h/2, w, h, rot_theta]) # (cx,cy,w,h,theta)
iof_in_img = box_iou_rotated(pps_new.reshape(-1, 5), img_xywh.unsqueeze(0), mode='iof')
proposals_valid = iof_in_img > 0.8
proposals_valid_list.append(proposals_valid)
proposal_list.append(pps_new.reshape(-1, 5))
return proposal_list, proposals_valid_list
def gen_rotate_negative_proposals(gt_points, proposal_cfg, aug_generate_proposals, img_meta):
num_neg_gen = proposal_cfg['gen_num_neg']
if num_neg_gen == 0:
return None, None
neg_proposal_list = []
neg_weight_list = []
device = gt_points[0].device
for i in range(len(gt_points)):
pos_box = aug_generate_proposals[i]
h, w, _ = img_meta[i]['img_shape']
x1 = -0.2 * w + torch.rand(num_neg_gen) * (1.2 * w)
y1 = -0.2 * h + torch.rand(num_neg_gen) * (1.2 * h)
x2 = x1 + torch.rand(num_neg_gen) * (1.2 * w - x1)
y2 = y1 + torch.rand(num_neg_gen) * (1.2 * h - y1)
neg_theta = torch.ones_like(x1)*(pos_box[:,-1].mean().cpu())
neg_bboxes = torch.stack([(x1 + x2) / 2, (y1 + y2) / 2,
x2 - x1, y2 - y1, neg_theta], dim=1).to(device)
iou = box_iou_rotated(neg_bboxes, pos_box)
neg_weight = ((iou < 0.3).sum(dim=1) == iou.shape[1])
neg_proposal_list.append(neg_bboxes)
neg_weight_list.append(neg_weight)
return neg_proposal_list, neg_weight_list
def resize_rotate_proposal(img_metas,
batch_gt_bboxes,
batch_proposals,
gt_true_bboxes,
gt_bboxes_ignore,
ratio = 0.5):
'''
batch_gt_bboxes_all: [batch_size, num_proposals, 5] [cx,cy,w,h,a]
batch_proposals_all: [batch_size, num_proposals, 5] [cx,cy,w,h,a]
'''
img_meta_out = copy.deepcopy(img_metas)
batch_gt_bboxes_out = []
batch_proposals_out =[]
gt_true_bboxes_out = []
gt_bboxes_ignore_out = []
for i in range(len(img_metas)):
h, w, c = img_metas[i]['img_shape']
img_meta_out[i]['img_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c)
img_meta_out[i]['pad_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c)
tmp_gt_bboxes = batch_gt_bboxes[i].clone()
tmp_gt_bboxes[:,:4] = tmp_gt_bboxes[:,:4] * ratio
batch_gt_bboxes_out.append(tmp_gt_bboxes)
tmp_proposal = batch_proposals[i].clone()
tmp_proposal[:,:4] = tmp_proposal[:,:4] * ratio
batch_proposals_out.append(tmp_proposal)
tmp_gt_true_bbox = gt_true_bboxes[i].clone()
tmp_gt_true_bbox[:,:4] = tmp_gt_true_bbox[:,:4] * ratio
gt_true_bboxes_out.append(tmp_gt_true_bbox)
tmp_gt_bboxes_ignore = gt_bboxes_ignore[i].clone()
if gt_bboxes_ignore[i].size(0) != 0:
tmp_gt_bboxes_ignore[:,:,:4] = tmp_gt_bboxes_ignore[:,:4] * ratio
gt_bboxes_ignore_out.append(tmp_gt_bboxes_ignore)
return img_meta_out, batch_gt_bboxes_out, batch_proposals_out, gt_true_bboxes_out, gt_bboxes_ignore_out
@DETECTORS.register_module()
class PointOBB(TwoStageDetector):
def __init__(self,
backbone,
roi_head,
train_cfg,
test_cfg,
construct_view = True,
construct_resize = False,
loss_diff_view=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0),
crop_size = (1024, 1024),
padding = 'reflection',
view_range: Tuple[float, float] = (0.25, 0.75),
bbox_head=None,
neck=None,
pretrained=None,
init_cfg=None):
super(PointOBB, self).__init__(
backbone=backbone,
neck=neck,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
self.num_stages = roi_head.num_stages
self.stage = 0
print(f'========={self.stage}===========')
if bbox_head is not None:
self.with_bbox_head = True
self.bbox_head = build_head(bbox_head)
self.crop_size = crop_size
self.padding = padding
self.view_range = view_range
|
# from mmdet.datasets.utils import obb2poly_np
def resize_image(inputs, resize_ratio=0.5):
down_inputs = F.interpolate(inputs,
scale_factor=resize_ratio,
mode='nearest')
return down_inputs
def fine_rotate_proposals_from_cfg(pseudo_boxes, fine_proposal_cfg, img_meta, stage):
gen_mode = fine_proposal_cfg['gen_proposal_mode']
# cut_mode = fine_proposal_cfg['cut_mode']
cut_mode = None
if isinstance(fine_proposal_cfg['base_ratios'], tuple):
base_ratios = fine_proposal_cfg['base_ratios'][stage - 1]
shake_ratio = fine_proposal_cfg['shake_ratio'][stage - 1]
else:
base_ratios = fine_proposal_cfg['base_ratios']
shake_ratio = fine_proposal_cfg['shake_ratio']
if gen_mode == 'fix_gen':
proposal_list = []
proposals_valid_list = []
for i in range(len(img_meta)):
pps = []
base_boxes = pseudo_boxes[i]
for ratio_w in base_ratios:
for ratio_h in base_ratios:
base_boxes_ = base_boxes.clone()
base_boxes_[:, 2] *= ratio_w
base_boxes_[:, 3] *= ratio_h
pps.append(base_boxes_.unsqueeze(1))
pps_old = torch.cat(pps, dim=1)
if shake_ratio is not None:
pps_new = []
pps_new.append(pps_old.reshape(*pps_old.shape[0:2], -1, 5))
for ratio in shake_ratio:
pps = pps_old.clone()
pps_center = pps[:, :, :2]
pps_wh = pps[:, :, 2:4]
pps_angle = pps[:, :, 4].unsqueeze(2)
pps_x_l = pps_center[:, :, 0] - ratio * pps_wh[:, :, 0]
pps_x_r = pps_center[:, :, 0] + ratio * pps_wh[:, :, 0]
pps_y_t = pps_center[:, :, 1] - ratio * pps_wh[:, :, 1]
pps_y_d = pps_center[:, :, 1] + ratio * pps_wh[:, :, 1]
pps_center_l = torch.stack([pps_x_l, pps_center[:, :, 1]], dim=-1)
pps_center_r = torch.stack([pps_x_r, pps_center[:, :, 1]], dim=-1)
pps_center_t = torch.stack([pps_center[:, :, 0], pps_y_t], dim=-1)
pps_center_d = torch.stack([pps_center[:, :, 0], pps_y_d], dim=-1)
pps_center = torch.stack([pps_center_l, pps_center_r, pps_center_t, pps_center_d], dim=2)
pps_wh = pps_wh.unsqueeze(2).expand(pps_center.shape)
pps_angle = pps_angle.unsqueeze(2).expand((pps_center.size()[0], pps_center.size()[1], pps_center.size()[2], 1))
pps = torch.cat([pps_center, pps_wh, pps_angle], dim=-1)
pps = pps.reshape(pps.shape[0], -1, 5)
pps_new.append(pps.reshape(*pps_old.shape[0:2], -1, 5))
pps_new = torch.cat(pps_new, dim=2)
else:
pps_new = pps_old
h, w, _ = img_meta[i]['img_shape']
if cut_mode is 'clamp':
pps_new[..., 0:4:2] = torch.clamp(pps_new[..., 0:4:2], 0, w)
pps_new[..., 1:4:2] = torch.clamp(pps_new[..., 1:4:2], 0, h)
proposals_valid_list.append(pps_new.new_full(
(*pps_new.shape[0:3], 1), 1, dtype=torch.long).reshape(-1, 1))
else:
rot_theta = base_boxes[:,-1].mean()
img_xywh = pps_new.new_tensor([w/2, h/2, w, h, rot_theta]) # (cx,cy,w,h,theta)
iof_in_img = box_iou_rotated(pps_new.reshape(-1, 5), img_xywh.unsqueeze(0), mode='iof')
proposals_valid = iof_in_img > 0.8
proposals_valid_list.append(proposals_valid)
proposal_list.append(pps_new.reshape(-1, 5))
return proposal_list, proposals_valid_list
def gen_rotate_negative_proposals(gt_points, proposal_cfg, aug_generate_proposals, img_meta):
num_neg_gen = proposal_cfg['gen_num_neg']
if num_neg_gen == 0:
return None, None
neg_proposal_list = []
neg_weight_list = []
device = gt_points[0].device
for i in range(len(gt_points)):
pos_box = aug_generate_proposals[i]
h, w, _ = img_meta[i]['img_shape']
x1 = -0.2 * w + torch.rand(num_neg_gen) * (1.2 * w)
y1 = -0.2 * h + torch.rand(num_neg_gen) * (1.2 * h)
x2 = x1 + torch.rand(num_neg_gen) * (1.2 * w - x1)
y2 = y1 + torch.rand(num_neg_gen) * (1.2 * h - y1)
neg_theta = torch.ones_like(x1)*(pos_box[:,-1].mean().cpu())
neg_bboxes = torch.stack([(x1 + x2) / 2, (y1 + y2) / 2,
x2 - x1, y2 - y1, neg_theta], dim=1).to(device)
iou = box_iou_rotated(neg_bboxes, pos_box)
neg_weight = ((iou < 0.3).sum(dim=1) == iou.shape[1])
neg_proposal_list.append(neg_bboxes)
neg_weight_list.append(neg_weight)
return neg_proposal_list, neg_weight_list
def resize_rotate_proposal(img_metas,
batch_gt_bboxes,
batch_proposals,
gt_true_bboxes,
gt_bboxes_ignore,
ratio = 0.5):
'''
batch_gt_bboxes_all: [batch_size, num_proposals, 5] [cx,cy,w,h,a]
batch_proposals_all: [batch_size, num_proposals, 5] [cx,cy,w,h,a]
'''
img_meta_out = copy.deepcopy(img_metas)
batch_gt_bboxes_out = []
batch_proposals_out =[]
gt_true_bboxes_out = []
gt_bboxes_ignore_out = []
for i in range(len(img_metas)):
h, w, c = img_metas[i]['img_shape']
img_meta_out[i]['img_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c)
img_meta_out[i]['pad_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c)
tmp_gt_bboxes = batch_gt_bboxes[i].clone()
tmp_gt_bboxes[:,:4] = tmp_gt_bboxes[:,:4] * ratio
batch_gt_bboxes_out.append(tmp_gt_bboxes)
tmp_proposal = batch_proposals[i].clone()
tmp_proposal[:,:4] = tmp_proposal[:,:4] * ratio
batch_proposals_out.append(tmp_proposal)
tmp_gt_true_bbox = gt_true_bboxes[i].clone()
tmp_gt_true_bbox[:,:4] = tmp_gt_true_bbox[:,:4] * ratio
gt_true_bboxes_out.append(tmp_gt_true_bbox)
tmp_gt_bboxes_ignore = gt_bboxes_ignore[i].clone()
if gt_bboxes_ignore[i].size(0) != 0:
tmp_gt_bboxes_ignore[:,:,:4] = tmp_gt_bboxes_ignore[:,:4] * ratio
gt_bboxes_ignore_out.append(tmp_gt_bboxes_ignore)
return img_meta_out, batch_gt_bboxes_out, batch_proposals_out, gt_true_bboxes_out, gt_bboxes_ignore_out
@DETECTORS.register_module()
class PointOBB(TwoStageDetector):
def __init__(self,
backbone,
roi_head,
train_cfg,
test_cfg,
construct_view = True,
construct_resize = False,
loss_diff_view=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0),
crop_size = (1024, 1024),
padding = 'reflection',
view_range: Tuple[float, float] = (0.25, 0.75),
bbox_head=None,
neck=None,
pretrained=None,
init_cfg=None):
super(PointOBB, self).__init__(
backbone=backbone,
neck=neck,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
self.num_stages = roi_head.num_stages
self.stage = 0
print(f'========={self.stage}===========')
if bbox_head is not None:
self.with_bbox_head = True
self.bbox_head = build_head(bbox_head)
self.crop_size = crop_size
self.padding = padding
self.view_range = view_range | self.loss_diff_view = build_loss(loss_diff_view) | 4 | 2023-11-20 07:50:12+00:00 | 12k |
ModelTC/EasyLLM | llm/runners/hf_runner.py | [
{
"identifier": "load_yaml",
"path": "llm/utils/general/yaml_loader.py",
"snippet": "def load_yaml(path):\n with open(path, \"r\")as f:\n yaml_data = yaml.load(f, IncludeLoader)\n # TODO check_cfg\n # cfg check\n return yaml_data"
},
{
"identifier": "parse_args",
"path": "llm/utils/general/parser_helper.py",
"snippet": "def parse_args():\n \"\"\"Parse all arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Megatron-LM Arguments',\n allow_abbrev=False)\n\n # Standard arguments.\n parser = _add_training_args(parser)\n parser = _add_inference_args(parser)\n parser = _add_medusa_args(parser)\n parser = _add_distributed_args(parser)\n parser = deepspeed.add_config_arguments(parser)\n\n args = parser.parse_args()\n\n return args"
},
{
"identifier": "build_optimizer",
"path": "llm/utils/model/optimizer_helper.py",
"snippet": "def build_optimizer(cfg_optim, model, deepspeed=True):\n if cfg_optim.get('cpu_optimizer', False):\n raise NotImplementedError('need to add cpu adam')\n\n # Base optimizer.\n param_groups = _get_params_for_weight_decay_optimization(model)\n param_groups = filter_freeze_param_groups(param_groups)\n\n optim_type = cfg_optim['type']\n cfg_optim['kwargs']['params'] = param_groups\n if optim_type == 'Adam8bit':\n try:\n import bitsandbytes as bnb\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\n \"Please install bitsandbytes from https://github.com/facebookresearch/bitsandbytes.\")\n optimizer = build_cls_instance(bnb.optim, cfg_optim)\n elif cfg_optim['type'] in ['FusedAdam', 'FusedSGD', 'FusedNovoGrad']:\n import apex\n optimizer = build_cls_instance(apex.optimizers, cfg_optim)\n elif cfg_optim['type'] in ['SophiaG']:\n optimizer = SophiaG(**cfg_optim['kwargs'])\n else:\n optimizer = build_cls_instance(torch.optim, cfg_optim)\n\n if deepspeed:\n return optimizer\n else:\n raise NotImplementedError"
},
{
"identifier": "build_learning_rate_scheduler",
"path": "llm/utils/model/lr_helper.py",
"snippet": "def build_learning_rate_scheduler(cfg_lr, optimizer):\n cfg_lr['kwargs'].update({'optimizer': optimizer})\n return LR_REGISTRY.build(cfg_lr)"
},
{
"identifier": "build_hooks",
"path": "llm/utils/general/hook_helper.py",
"snippet": "def build_hooks(runner, cfg_list, is_train=True, add_log_if_not_exists=True):\n\n def add_log_hook(cfg_hooks):\n exists = any(['train_val_logger' in cfg['type'] for cfg in cfg_hooks])\n if not exists:\n cfg_hooks.insert(0, {\n 'type': 'train_val_logger',\n 'kwargs': {}\n })\n return cfg_hooks\n\n def build_single_hook(cfg):\n cfg = copy.deepcopy(cfg)\n kwargs = cfg.setdefault('kwargs', {})\n kwargs['runner'] = runner\n return HOOK_REGISTRY.build(cfg)\n\n if add_log_if_not_exists:\n cfg_list = add_log_hook(cfg_list)\n if not is_train:\n # TODO: add remove hooks\n pass\n\n hooks = [build_single_hook(cfg) for cfg in cfg_list]\n return ComposeHook(hooks)"
},
{
"identifier": "default_logger",
"path": "llm/utils/general/log_helper.py",
"snippet": "BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)\nRESET_SEQ = \"\\033[0m\"\nCOLOR_SEQ = \"\\033[1;%dm\"\nCOLORS = {\n 'WARNING': YELLOW,\n 'INFO': WHITE,\n 'DEBUG': BLUE,\n 'CRITICAL': YELLOW,\n 'ERROR': RED\n}\nMASTER_RANK = 0\ndef is_master():\ndef basicConfig(*args, **kwargs):\n def __init__(self, msg, use_color=True):\n def format(self, record):\ndef init_log(name='global', level=logging.INFO):\nclass ColoredFormatter(logging.Formatter):"
},
{
"identifier": "build_tokenizer",
"path": "llm/data/tokenizer.py",
"snippet": "def build_tokenizer(_cfg_tokenizer):\n cfg_tokenizer = copy.deepcopy(_cfg_tokenizer)\n pad_vocab_size_to = cfg_tokenizer.pop('pad_vocab_size_to', None)\n type = cfg_tokenizer['type']\n tokenizer_name_or_path = cfg_tokenizer['kwargs'].pop('tokenizer_name_or_path')\n tokenizer = TOKENIZER_REGISTRY[type].from_pretrained(tokenizer_name_or_path, **cfg_tokenizer['kwargs'])\n if 'special_tokens' in cfg_tokenizer:\n special_tokens = cfg_tokenizer.get('special_tokens')\n tokenizer.add_special_tokens(special_tokens)\n # Add vocab size.\n padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size,\n pad_vocab_size_to)\n setattr(tokenizer, 'padded_vocab_size', padded_vocab_size)\n return tokenizer"
},
{
"identifier": "setup_distributed",
"path": "llm/utils/env/hf_dist_helper.py",
"snippet": "def setup_distributed(launcher='slurm', backend='nccl', port=13333):\n if launcher == 'torch':\n os.environ['LAUNCHER'] = 'torch'\n device = setup_distributed_torch()\n elif launcher == 'slurm':\n device = setup_distributed_slurm(backend, port)\n else:\n device = setup_distributed_mpi(backend=backend, port=port)\n return device"
},
{
"identifier": "get_world_size",
"path": "llm/utils/env/hf_dist_helper.py",
"snippet": "def get_world_size(*args, **kwargs):\n if \"LAUNCHER\" not in os.environ:\n world_size = get_world_size_from_env()\n if world_size is not None:\n return world_size\n return get_dist_world_size(*args, **kwargs)"
},
{
"identifier": "build_batch_collator",
"path": "llm/utils/general/hf_build_utils.py",
"snippet": "def build_batch_collator(cfg_batch_collator, tokenizer):\n if 'kwargs' not in cfg_batch_collator:\n cfg_batch_collator['kwargs'] = {}\n cfg_batch_collator['kwargs']['tokenizer'] = tokenizer\n return BATCH_COLLECTOR_REGISTRY.build(cfg_batch_collator)"
},
{
"identifier": "build_dataloader",
"path": "llm/utils/general/hf_build_utils.py",
"snippet": "def build_dataloader(cfg_data, dataset, batch_collator):\n batch_sampler = build_batch_sampler(cfg_data['batch_sampler'], dataset)\n cfg_data['data_loader']['kwargs'].update({'dataset': dataset,\n 'batch_sampler': batch_sampler,\n 'batch_collator': batch_collator})\n return DATALOADER_REGISTRY.build(cfg_data['data_loader'])"
},
{
"identifier": "build_dataset",
"path": "llm/utils/general/hf_build_utils.py",
"snippet": "def build_dataset(cfg_dataset, tokenizer):\n if 'kwargs' not in cfg_dataset:\n cfg_dataset['kwargs'] = {}\n cfg_dataset['kwargs']['tokenizer'] = tokenizer\n return DATASET_REGISTRY.build(cfg_dataset)"
},
{
"identifier": "build_model",
"path": "llm/utils/general/hf_build_utils.py",
"snippet": "def build_model(model_cfg):\n fast_device = torch.device('cuda')\n with fast_init(fast_device):\n peft_model_cfg = model_cfg.get('peft_model_cfg', None)\n model = MODULE_ZOO_REGISTRY.build(model_cfg)\n if peft_model_cfg is not None:\n model = build_peft_model(peft_model_cfg, model)\n return model"
},
{
"identifier": "hack_model",
"path": "llm/utils/general/hf_build_utils.py",
"snippet": "def hack_model(model):\n def hack_custom_forward(module, *args, **kwargs):\n output = module(*args, **kwargs)\n output.requires_grad = True\n return output\n\n def common_cast_forward(m, *args, **kwargs):\n old_forward = m.forward\n\n def forward(*args, **kwargs):\n return hack_custom_forward(old_forward, *args, **kwargs)\n m.forward = forward\n\n for _, m in model.named_modules():\n if isinstance(m, torch.nn.Embedding):\n common_cast_forward(m)\n logger.info(\"set nn.Embedding output requires_grad=True for gradient checkpointing\")"
},
{
"identifier": "build_augmentation",
"path": "llm/utils/general/hf_build_utils.py",
"snippet": "def build_augmentation(cfg):\n if 'template' in cfg['kwargs']:\n cfg['kwargs'].pop('template')\n return AUGMENTATION_REGISTRY.build(cfg)"
},
{
"identifier": "hf_inference",
"path": "llm/utils/general/hf_utils.py",
"snippet": "def hf_inference(config, model, sense_tokenization, device, args):\n generation_cfg = config[\"generation_cfg\"]\n tokenizer = sense_tokenization.parser.tokenizer\n pad_token_id = len(tokenizer) - 1\n history_metas = []\n with torch.no_grad():\n if args.generate_mode == \"interactive\":\n system_flag = False\n while True:\n logger.info(\"请输入问题,退出请输入 quit\")\n raw_input_text = input()\n input_meta = {}\n if system_flag:\n input_meta['content'] = raw_input_text\n input_meta['role'] = \"system\"\n history_metas.append(input_meta)\n system_flag = False\n continue\n\n if len(raw_input_text.strip()) == 0:\n break\n if raw_input_text == 'quit':\n break\n if raw_input_text == 'system':\n system_flag = True\n continue\n if raw_input_text == \"clean\":\n history_metas = []\n continue\n\n if hasattr(sense_tokenization.parser, 'build_inference_meta'):\n prompt = sense_tokenization.parser.build_inference_meta(raw_input_text, history_metas)\n context_tokens, _ = sense_tokenization(prompt)\n else:\n context_tokens, _ = sense_tokenization({\"text\": raw_input_text, \"dialog_history\": history_metas})\n context_tokens = torch.LongTensor([context_tokens])\n attention_mask = context_tokens.ne(pad_token_id)\n\n generation_output = model.generate(\n input_ids=context_tokens.to(device),\n attention_mask=attention_mask.to(device),\n eos_token_id=tokenizer.eos_token_id,\n pad_token_id=tokenizer.pad_token_id,\n **generation_cfg\n )\n s = generation_output[0]\n output = tokenizer.decode(s, skip_special_tokens=True)\n logger.info(f\"SenseChat: {output}\")\n\n input_meta['content'] = raw_input_text\n input_meta['role'] = 'user'\n history_metas.append(input_meta)\n out_meta = {}\n out_meta['content'] = output\n out_meta['role'] = 'assistant'\n history_metas.append(out_meta)\n elif args.generate_mode == \"eval\":\n samples = []\n eval_task = config.get(\"eval_task\", \"base\")\n question_file = config.get(\"question_file\", \"questions.jsonl\")\n result_file = config.get(\"result_file\", \"results.jsonl\")\n # load dataset\n eval_dataset = EvalDataset(eval_task, question_file)\n dist_dataset = SampleEvalDataset(eval_dataset)\n iter_datasets = dist_dataset.get_items()\n # generate tokens\n for _ in tqdm(range(len(dist_dataset)), desc='Processing'):\n task_id, prompt, answer = next(iter_datasets)\n if hasattr(sense_tokenization.parser, 'build_inference_meta'):\n prompt = sense_tokenization.parser.build_inference_meta(prompt, history_metas)\n context_tokens, _ = sense_tokenization(prompt)\n else:\n context_tokens, _ = sense_tokenization({\"text\": prompt, \"dialog_history\": history_metas})\n context_tokens = torch.LongTensor([context_tokens])\n attention_mask = context_tokens.ne(pad_token_id)\n\n generation_output = model.generate(\n input_ids=context_tokens.to(device),\n max_new_tokens=generation_cfg[\"max_new_tokens\"]\n )\n # generation_output = model.generate(\n # input_ids=context_tokens.to(device),\n # attention_mask=attention_mask.to(device),\n # eos_token_id=tokenizer.eos_token_id,\n # pad_token_id=tokenizer.pad_token_id,\n # **generation_cfg\n # )\n s = generation_output[0]\n accept_length = s.numel() - context_tokens.numel()\n output = tokenizer.decode(s, skip_special_tokens=True)\n actual_input = tokenizer.decode(context_tokens.to(device)[0], skip_special_tokens=True)\n raw_output = output.split(actual_input)[-1]\n infos = {\n \"count\": accept_length,\n \"accept_length\": accept_length,\n \"ave_accept_length\": 1\n }\n # postprocess output\n output = text_postprocess(raw_output, eval_task)\n if eval_task == \"human_eval\":\n samples.append(\n dict(task_id=task_id, completion=output)\n )\n elif eval_task in [\"cmmlu\", \"ceval\", \"base\"]:\n samples.append(\n dict(\n task_id=task_id,\n input=prompt,\n output=output,\n raw_output=raw_output,\n answer=answer,\n infos=infos)\n )\n dist_barrier()\n\n samples_list = all_gather(samples)\n all_samples = []\n for temps in samples_list:\n all_samples.extend(temps)\n if get_rank() == 0:\n # save results\n save_results(result_file, all_samples, eval_task)\n # evaluate\n evaluate(result_file, eval_task)"
},
{
"identifier": "hf_inference_multimodal",
"path": "llm/utils/general/hf_utils.py",
"snippet": "def hf_inference_multimodal(config, model, sense_tokenization, device, args):\n\n def get_input_format(string):\n _string = string.split('/img/')\n input_format = []\n for i in _string:\n if '/img_end/' in i:\n input_format.append({'image' : i.split('/img_end/')[0]})\n if i.split('/img_end/')[1]:\n input_format.append({'text' : i.split('/img_end/')[1]})\n else:\n if i:\n input_format.append({'text' : i})\n return input_format\n\n generation_cfg = config[\"generation_cfg\"]\n tokenizer = sense_tokenization.parser.tokenizer\n history_metas = []\n with torch.no_grad():\n if args.generate_mode == \"interactive\":\n system_flag = False\n while True:\n logger.info(\" 如果内容中包含图片路径,请在路径前后分别加入 /img/ 和 /img_end/, 示例如 /img/pathto/yourpic.jpeg/img_end/please describe the image\")\n logger.info(\"请输入问题,退出请输入 quit,\")\n raw_input_text = input()\n input_meta = {}\n if system_flag:\n input_meta['content'] = raw_input_text\n input_meta['role'] = \"system\"\n history_metas.append(input_meta)\n system_flag = False\n continue\n if len(raw_input_text.strip()) == 0:\n break\n if raw_input_text == 'quit':\n break\n if raw_input_text == 'system':\n system_flag = True\n continue\n if raw_input_text == \"clean\":\n history_metas = []\n continue\n input_format = get_input_format(raw_input_text)\n query = tokenizer.from_list_format(input_format)\n response, history_metas = model.chat(tokenizer, query=query, history=history_metas, generation_cfg=generation_cfg)\n logger.info(f\"SenseChat: {response}\")\n elif args.generate_mode == \"eval\":\n raise NotImplementedError(\"Not implementented for multimodal eval\")"
},
{
"identifier": "load_from_ds",
"path": "llm/utils/general/hf_utils.py",
"snippet": "def load_from_ds(runner, load_cfg):\n resume_from_checkpoint = load_cfg['load_path']\n deepspeed_checkpoint_dirs = []\n if resume_from_checkpoint is not None:\n import glob\n deepspeed_checkpoint_dirs = sorted(glob.glob(f\"{resume_from_checkpoint}/global_step*\"))\n if len(deepspeed_checkpoint_dirs) <= 0:\n deepspeed_checkpoint_dirs = sorted(glob.glob(f\"{resume_from_checkpoint}/global-latest\"))\n logger.info(f\"Resuming deepspeed weights from {resume_from_checkpoint}\")\n load_optim = load_cfg.get('load_optim', False)\n if len(deepspeed_checkpoint_dirs) > 0:\n # this magically updates self.optimizer and self.lr_scheduler\n load_path, state_dict = runner.model.load_checkpoint(\n resume_from_checkpoint, load_optimizer_states=load_optim, load_lr_scheduler_states=load_optim\n )\n runner.start_iter = state_dict['iteration']\n if load_path is None:\n raise ValueError(f\"[deepspeed] failed to resume from checkpoint {resume_from_checkpoint}\")\n else:\n logger.info(f\"[deepspeed] Can't find checkpoint from checkpoint {resume_from_checkpoint}\")"
},
{
"identifier": "load_from_hf",
"path": "llm/utils/general/hf_utils.py",
"snippet": "def load_from_hf(runner, load_cfg):\n load_dir = load_cfg['load_path']\n WEIGHTS_NAME = \"pytorch_model.bin\"\n OPTIMIZER_NAME = \"optimizer.pt\"\n SCHEDULER_NAME = \"scheduler.pt\"\n SCALER_NAME = \"scaler.pt\"\n weights_file = os.path.join(load_dir, WEIGHTS_NAME)\n if os.path.isfile(weights_file):\n state_dict = torch.load(weights_file, map_location=\"cpu\")\n runner.model.load_state_dict(state_dict, False)\n del state_dict\n else:\n runner.load_sharded_checkpoint(load_dir)\n logger.info(\"Loading checkpoint done.\")\n if load_cfg.get('load_optim', False):\n # load trainer\n checkpoint_file_exists = os.path.isfile(os.path.join(load_dir, OPTIMIZER_NAME))\n if checkpoint_file_exists and os.path.isfile(os.path.join(load_dir, SCHEDULER_NAME)):\n map_location = \"cuda\" if get_world_size() > 1 else \"cpu\"\n runner.optimizer.load_state_dict(\n torch.load(os.path.join(load_dir, OPTIMIZER_NAME), map_location=map_location)\n )\n logger.info(\"Loading optimizer done.\")\n runner.lr_scheduler.load_state_dict(torch.load(os.path.join(load_dir, SCHEDULER_NAME)))\n logger.info(\"Loading lr_scheduler done.\")\n runner.scaler.load_state_dict(torch.load(os.path.join(load_dir, SCALER_NAME)))\n logger.info(\"Loading scaler done.\")\n if load_cfg.get('load_rng_state', False):\n # load rng\n if get_world_size() > 1:\n rng_file = os.path.join(load_dir, f\"rng_state_{get_rank()}.pth\")\n else:\n rng_file = os.path.join(load_dir, \"rng_state.pth\")\n checkpoint_rng_state = torch.load(rng_file)\n random.setstate(checkpoint_rng_state[\"python\"])\n np.random.set_state(checkpoint_rng_state[\"numpy\"])\n torch.random.set_rng_state(checkpoint_rng_state[\"cpu\"])\n if torch.cuda.is_available():\n if get_world_size() > 1:\n torch.cuda.random.set_rng_state_all(checkpoint_rng_state[\"cuda\"])\n else:\n torch.cuda.random.set_rng_state(checkpoint_rng_state[\"cuda\"])\n logger.info(\"Loading rng_state done.\")"
},
{
"identifier": "save_hf_checkpoint",
"path": "llm/utils/general/hf_utils.py",
"snippet": "def save_hf_checkpoint(runner, save_cfg, global_step, state_dict=None):\n PREFIX_CHECKPOINT_DIR = \"checkpoint\"\n WEIGHTS_NAME = \"pytorch_model.bin\"\n checkpoint_folder = f\"{PREFIX_CHECKPOINT_DIR}-{global_step}\"\n run_dir = save_cfg.get('save_path', \"checkpoints\")\n\n output_dir = os.path.join(run_dir, checkpoint_folder)\n os.makedirs(output_dir, exist_ok=True)\n logger.info(f\"Saving model checkpoint to {output_dir}\")\n if \"CEPHBUCKET\" in os.environ and os.environ.get(\"CEPHBUCKET\") is not None:\n save_function = ceph_save\n else:\n save_function = torch.save\n if isinstance(runner.model, DDP):\n runner.model.module.save_pretrained(\n output_dir, state_dict=state_dict, safe_serialization=False, save_function=save_function\n )\n else:\n runner.model.save_pretrained(\n output_dir, state_dict=state_dict, safe_serialization=False, save_function=save_function\n )\n logger.info(\"Saving model state dict done.\")\n\n if runner.tokenizer is not None:\n try:\n if hasattr(runner.tokenizer, \"tokenizer\"):\n runner.tokenizer.tokenizer.save_pretrained(output_dir)\n else:\n runner.tokenizer.save_pretrained(output_dir)\n logger.info(\"Saving tokenizer done.\")\n except Exception:\n logger.warning(\"Failed to saving tokenizer done!!!\")\n\n if os.environ.get(\"CEPHBUCKET\", None) is not None:\n all_files = os.listdir(output_dir)\n for file_path in all_files:\n if file_path.endswith('.' + WEIGHTS_NAME.split('.')[-1]):\n continue\n local_path = os.path.join(output_dir, file_path)\n if os.path.isdir(local_path):\n continue\n ceph_file_path = get_ceph_path(local_path)\n from petrel_helper import PetrelHelper\n with open(local_path, 'rb') as f:\n PetrelHelper.write(f, ceph_file_path)"
},
{
"identifier": "save_ds_checkpoints",
"path": "llm/utils/general/hf_utils.py",
"snippet": "def save_ds_checkpoints(runner, save_cfg, global_step):\n output_dir = save_cfg.get('save_path', \"checkpoints\")\n checkpoint_folder = f\"checkpoint-{global_step}\"\n output_dir = os.path.join(output_dir, checkpoint_folder)\n os.makedirs(output_dir, exist_ok=True)\n tag = f\"global_step{global_step}\"\n state_dict = {}\n state_dict['iteration'] = global_step\n if save_cfg.get('save_rng_state', False):\n state_dict['random_rng_state'] = random.getstate()\n state_dict['np_rng_state'] = np.random.get_state()\n state_dict['torch_rng_state'] = torch.get_rng_state()\n state_dict['cuda_rng_state'] = torch.cuda.get_rng_state()\n runner.model.save_checkpoint(output_dir,\n tag=tag,\n client_state=state_dict,\n save_base_state=save_cfg.get('save_base_state', True),\n save_zero=save_cfg.get('save_zero', False),\n save_optim=save_cfg.get('save_optim', False))"
}
] | import torch
import deepspeed
from torch.nn.parallel import DistributedDataParallel as DDP
from llm.utils.general.yaml_loader import load_yaml
from llm.utils.general.parser_helper import parse_args
from llm.utils.model.optimizer_helper import build_optimizer
from llm.utils.model.lr_helper import build_learning_rate_scheduler
from llm.utils.general.hook_helper import build_hooks
from llm.utils.general.log_helper import default_logger as logger
from llm.data.tokenizer import build_tokenizer
from llm.utils.env.hf_dist_helper import (
setup_distributed,
get_world_size
)
from llm.utils.general.hf_build_utils import (
build_batch_collator,
build_dataloader,
build_dataset,
build_model,
hack_model,
build_augmentation
)
from llm.utils.general.hf_utils import (
hf_inference,
hf_inference_multimodal,
load_from_ds,
load_from_hf,
save_hf_checkpoint,
save_ds_checkpoints
)
from llm.utils.general.grad_scaler import ShardedGradScaler | 7,541 | self.gradient_accumulation_steps = ds_config['gradient_accumulation_steps']
self.global_train_batch_size *= self.gradient_accumulation_steps
self.train_epoch_size //= self.gradient_accumulation_steps
if 'train_batch_size' not in ds_config or ds_config['train_batch_size'] == 'auto':
ds_config['train_batch_size'] = self.global_train_batch_size
if 'train_micro_batch_size_per_gpu' not in ds_config or ds_config['train_micro_batch_size_per_gpu'] == 'auto':
ds_config['train_micro_batch_size_per_gpu'] = self.mirco_train_batch_size
model, optimizer, _, lr_scheduler = deepspeed.initialize(
model=self.model,
optimizer=self.optimizer,
lr_scheduler=self.lr_scheduler,
config=self.config['deepspeed']['config'],
args=None,
)
self.model = model
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
def load_checkpoints(self, load_cfg):
if load_cfg.get('enabled', False):
load_dir = load_cfg.get("load_path", None)
mode = load_cfg.get('load_mode', 'hf')
if not load_dir:
logger.info("No weights need to be loaded.")
return
logger.info(f"Loading model from {load_dir}")
if mode == 'huggingface':
try:
if self.config['model'].get('mode', "from_pretrained") == "from_config":
load_from_hf(self, load_cfg)
except: # noqa
logger.warning("Loading failed by huggingface")
elif mode == 'deepspeed':
try:
load_from_ds(self, load_cfg)
except: # noqa
logger.warning("Loading failed by deepspeed")
else:
raise NotImplementedError
def build_data(self):
self.data_loaders = {}
for data_type in self.config['data'].get('data_types', []):
dataset_cfg = self.config['data'][data_type]['dataset']
dataset = build_dataset(dataset_cfg, self.tokenizer)
batch_collector_cfg = self.config['data'][data_type]['batch_collector']
batch_collector_cfg['kwargs']['offset_label'] = False
batch_collector = build_batch_collator(batch_collector_cfg, self.tokenizer)
if data_type == 'val' or data_type == 'test':
self.config['data'][data_type]['batch_sampler']['infinite'] = False
self.config['data'][data_type]['batch_sampler']['kwargs']['sampler']['type'] = 'dist_test'
data_loader = build_dataloader(self.config['data'][data_type], dataset, batch_collector)
self.data_loaders[data_type] = data_loader
def batch2device(self, batch):
batch['input_ids'] = batch['input_ids'].to(device=torch.device('cuda'))
batch['labels'] = batch['labels'].to(device=torch.device('cuda'))
batch['attention_mask'] = batch['attention_mask'].to(device=torch.device('cuda'))
return batch
def get_batch(self, batch_type='train'):
assert batch_type in self.data_loaders
if not hasattr(self, 'data_iterators'):
self.data_iterators = {}
if batch_type not in self.data_iterators:
iterator = self.data_iterators[batch_type] = iter(self.data_loaders[batch_type])
else:
iterator = self.data_iterators[batch_type]
try:
batch = next(iterator)
except StopIteration as e: # noqa
iterator = self.data_iterators[batch_type] = iter(self.data_loaders[batch_type])
batch = next(iterator)
batch = self.batch2device(batch)
return batch
def _save(self, iteration):
if (iteration + 1) % self.save_interval == 0:
self.save_checkpoint(self.config.get('saver', {}), iteration + 1)
def train(self):
self.model.train()
self._hooks('before_train')
for iteration in range(
self.start_iter * self.gradient_accumulation_steps,
self.train_iters * self.gradient_accumulation_steps,
):
self.cur_iter = iteration // self.gradient_accumulation_steps
batch = self.get_batch()
self._hooks('before_train_iter', self.cur_iter, batch)
with torch.cuda.amp.autocast(enabled=True, dtype=self.dtype):
output = self.model(batch['input_ids'],
batch['attention_mask'],
labels=batch['labels'],
return_dict=True,
use_cache=False)
losses = [val for name, val in output.items() if name.find('loss') >= 0]
loss = sum(losses)
if self.deepspeed:
self.model.backward(loss)
self.model.step()
else:
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
self.optimizer.zero_grad()
self.lr_scheduler.step()
if (iteration + 1) % self.gradient_accumulation_steps == 0:
self._save(self.cur_iter)
self._hooks('after_train_iter', self.cur_iter, output)
save_hf_checkpoint(self, self.config['saver'], self.train_iters)
self._hooks('after_train')
def infer(self):
self.model.eval()
self.model.cuda()
device = self.model.device
assert 'infer_tokenization' in self.config, "infer_tokenization does not exist."
self.config['infer_tokenization']['kwargs'].update({'tokenizer': self.tokenizer})
|
class HFRunner(object):
def __init__(self, args, cfg=None, training=True):
self.args = args
self.config = cfg
self.training = training
self.deepspeed = False
self.dtype = torch.float16
if 'deepspeed' in self.config:
self.deepspeed = self.config['deepspeed'].get('enabled', False)
self.dtype = self.get_dtype_from_ds(self.config['deepspeed']['config'])
if 'runtime' not in self.config:
self.config['runtime'] = {}
self.gradient_accumulation_steps = self.config['runtime'].get('gradient_accumulation_steps', 1)
self.start_iter = 0
self.build()
if not self.deepspeed:
self.scaler = ShardedGradScaler(enabled=True)
if self.training:
logger.info(f"Start_iter: {self.start_iter}")
logger.info(f"Train_iters: {self.train_iters}")
logger.info(f"Train_epoch_size: {self.train_epoch_size}")
logger.info(f"Total epoch: {self.get_max_train_epoch()}")
logger.info(f"Gradient_accumulation_steps: {self.gradient_accumulation_steps}")
logger.info(f"Global_train_batch_size: {self.global_train_batch_size}")
def get_dtype_from_ds(self, ds_confg):
bf16 = False
fp16 = False
if 'bf16' in ds_confg:
bf16 = ds_confg['bf16'].get('enabled', False)
if 'fp16' in ds_confg:
fp16 = ds_confg['fp16'].get('enabled', False)
assert bf16 != fp16
if bf16:
return torch.bfloat16
if fp16:
return torch.float16
def build(self):
self.build_tokenizer()
self.build_model()
self.build_hooks()
self.build_data()
self.build_trainer()
if self.deepspeed and self.training:
self.deepspeed_init()
self.load_checkpoints(self.config['loader'])
def get_cur_train_epoch(self):
epoch = (self.cur_iter // self.train_epoch_size) + 1
return epoch
def get_max_train_epoch(self):
epoch = (max(self.train_iters - 1, 1)) // self.train_epoch_size + 1
return epoch
def build_optimzer(self):
optimizer_cfg = self.config['trainer']['optimizer']
self.optimizer = build_optimizer(optimizer_cfg, self.model)
def build_lr_scheduler(self):
lr_scheduler_cfg = self.config['trainer']['lr_scheduler']
self.lr_scheduler = build_learning_rate_scheduler(lr_scheduler_cfg, self.optimizer)
def build_tokenizer(self):
self.tokenizer = build_tokenizer(self.config['tokenizer'])
def build_model(self):
self.model = build_model(self.config['model'])
if self.config['runtime'].get('gradient_checkpointing', True):
if hasattr(self.model, "gradient_checkpointing_disable"):
self.model.gradient_checkpointing_enable()
if hasattr(self.model, "base_model"):
self.model.base_model.gradient_checkpointing_enable()
if self.config['model'].get('peft_model_cfg', None) is not None:
modules_to_save = self.config['model']['peft_model_cfg'].get('modules_to_save', [])
if len(modules_to_save) == 0:
hack_model(self.model)
if not self.deepspeed:
self.mdoel = self.model.cuda()
if self.training:
self.model = DDP(self.model,
broadcast_buffers=False,
find_unused_parameters=False)
def build_trainer(self):
world_size = get_world_size()
if self.training:
self.train_iters = self.config['trainer']['train_iters']
self.save_interval = self.config['saver'].get('save_interval', 100)
self.build_optimzer()
self.build_lr_scheduler()
self.mirco_train_batch_size = self.data_loaders['train'].batch_sampler.batch_size
self.train_epoch_size = self.data_loaders['train'].get_epoch_size()
self.global_train_batch_size = self.mirco_train_batch_size * world_size
else:
if 'test' in self.data_loaders:
self.mirco_test_batch_size = self.data_loaders['test'].batch_sampler.batch_size
self.test_epoch_size = self.data_loaders['test'].get_epoch_size()
else:
self.mirco_test_batch_size = 1
self.test_epoch_size = 1
self.global_test_batch_size = self.mirco_test_batch_size * world_size
self.global_train_batch_size = 1
def build_hooks(self):
cfg_hooks = self.config.get('hooks', [])
self._hooks = build_hooks(self, cfg_hooks, is_train=self.training, add_log_if_not_exists=True)
logger.info('build hooks done')
def deepspeed_init(self):
ds_config = self.config['deepspeed']['config']
if ds_config.get('gradient_accumulation_steps', 'auto') == 'auto':
ds_config['gradient_accumulation_steps'] = self.gradient_accumulation_steps
self.gradient_accumulation_steps = ds_config['gradient_accumulation_steps']
self.global_train_batch_size *= self.gradient_accumulation_steps
self.train_epoch_size //= self.gradient_accumulation_steps
if 'train_batch_size' not in ds_config or ds_config['train_batch_size'] == 'auto':
ds_config['train_batch_size'] = self.global_train_batch_size
if 'train_micro_batch_size_per_gpu' not in ds_config or ds_config['train_micro_batch_size_per_gpu'] == 'auto':
ds_config['train_micro_batch_size_per_gpu'] = self.mirco_train_batch_size
model, optimizer, _, lr_scheduler = deepspeed.initialize(
model=self.model,
optimizer=self.optimizer,
lr_scheduler=self.lr_scheduler,
config=self.config['deepspeed']['config'],
args=None,
)
self.model = model
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
def load_checkpoints(self, load_cfg):
if load_cfg.get('enabled', False):
load_dir = load_cfg.get("load_path", None)
mode = load_cfg.get('load_mode', 'hf')
if not load_dir:
logger.info("No weights need to be loaded.")
return
logger.info(f"Loading model from {load_dir}")
if mode == 'huggingface':
try:
if self.config['model'].get('mode', "from_pretrained") == "from_config":
load_from_hf(self, load_cfg)
except: # noqa
logger.warning("Loading failed by huggingface")
elif mode == 'deepspeed':
try:
load_from_ds(self, load_cfg)
except: # noqa
logger.warning("Loading failed by deepspeed")
else:
raise NotImplementedError
def build_data(self):
self.data_loaders = {}
for data_type in self.config['data'].get('data_types', []):
dataset_cfg = self.config['data'][data_type]['dataset']
dataset = build_dataset(dataset_cfg, self.tokenizer)
batch_collector_cfg = self.config['data'][data_type]['batch_collector']
batch_collector_cfg['kwargs']['offset_label'] = False
batch_collector = build_batch_collator(batch_collector_cfg, self.tokenizer)
if data_type == 'val' or data_type == 'test':
self.config['data'][data_type]['batch_sampler']['infinite'] = False
self.config['data'][data_type]['batch_sampler']['kwargs']['sampler']['type'] = 'dist_test'
data_loader = build_dataloader(self.config['data'][data_type], dataset, batch_collector)
self.data_loaders[data_type] = data_loader
def batch2device(self, batch):
batch['input_ids'] = batch['input_ids'].to(device=torch.device('cuda'))
batch['labels'] = batch['labels'].to(device=torch.device('cuda'))
batch['attention_mask'] = batch['attention_mask'].to(device=torch.device('cuda'))
return batch
def get_batch(self, batch_type='train'):
assert batch_type in self.data_loaders
if not hasattr(self, 'data_iterators'):
self.data_iterators = {}
if batch_type not in self.data_iterators:
iterator = self.data_iterators[batch_type] = iter(self.data_loaders[batch_type])
else:
iterator = self.data_iterators[batch_type]
try:
batch = next(iterator)
except StopIteration as e: # noqa
iterator = self.data_iterators[batch_type] = iter(self.data_loaders[batch_type])
batch = next(iterator)
batch = self.batch2device(batch)
return batch
def _save(self, iteration):
if (iteration + 1) % self.save_interval == 0:
self.save_checkpoint(self.config.get('saver', {}), iteration + 1)
def train(self):
self.model.train()
self._hooks('before_train')
for iteration in range(
self.start_iter * self.gradient_accumulation_steps,
self.train_iters * self.gradient_accumulation_steps,
):
self.cur_iter = iteration // self.gradient_accumulation_steps
batch = self.get_batch()
self._hooks('before_train_iter', self.cur_iter, batch)
with torch.cuda.amp.autocast(enabled=True, dtype=self.dtype):
output = self.model(batch['input_ids'],
batch['attention_mask'],
labels=batch['labels'],
return_dict=True,
use_cache=False)
losses = [val for name, val in output.items() if name.find('loss') >= 0]
loss = sum(losses)
if self.deepspeed:
self.model.backward(loss)
self.model.step()
else:
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
self.optimizer.zero_grad()
self.lr_scheduler.step()
if (iteration + 1) % self.gradient_accumulation_steps == 0:
self._save(self.cur_iter)
self._hooks('after_train_iter', self.cur_iter, output)
save_hf_checkpoint(self, self.config['saver'], self.train_iters)
self._hooks('after_train')
def infer(self):
self.model.eval()
self.model.cuda()
device = self.model.device
assert 'infer_tokenization' in self.config, "infer_tokenization does not exist."
self.config['infer_tokenization']['kwargs'].update({'tokenizer': self.tokenizer}) | sense_tokenization = build_augmentation(self.config["infer_tokenization"]) | 14 | 2023-11-26 10:12:52+00:00 | 12k |
dewgenenny/ScreenSync_v2 | screensync/ui.py | [
{
"identifier": "create_add_bulb_window",
"path": "screensync/screen_sync/ui/add_bulb.py",
"snippet": "def create_add_bulb_window(root, config_manager, refresh_callback):\n # Styles\n style = ttk.Style()\n style.configure('TLabel', background='#404957', foreground='white')\n style.configure('TButton', background='#404957', foreground='white', font=('Helvetica', 10))\n style.configure('TRadiobutton', background='#404957', foreground='white', font=('Helvetica', 10))\n style.map('TButton',\n background=[('active', '#50597A'), ('disabled', '#404957')],\n foreground=[('active', 'white'), ('disabled', 'white')])\n\n\n entries = {}\n placement_var = tk.StringVar()\n\n def update_config_fields(event):\n # Clear previous fields and reset entries\n for widget in config_frame.winfo_children():\n widget.destroy()\n entries.clear()\n\n bulb_type = bulb_type_var.get()\n\n # Common placement radio buttons for all bulb types\n ttk.Label(config_frame, text=\"Placement:\").pack()\n placement_frame = tk.Frame(config_frame, bg='#404957')\n placement_frame.pack()\n\n placements = ['top-left', 'top-center', 'top-right',\n 'center-left', 'center', 'center-right',\n 'bottom-left', 'bottom-center', 'bottom-right']\n for i, placement in enumerate(placements):\n row = i // 3\n column = i % 3\n radio = ttk.Radiobutton(placement_frame, text=placement, variable=placement_var, value=placement, style='TRadiobutton')\n radio.grid(row=row, column=column, sticky='w', padx=5, pady=5)\n\n # Additional fields based on bulb type\n if bulb_type == 'Tuya':\n ttk.Label(config_frame, text=\"Device ID:\").pack()\n entries['device_id'] = ttk.Entry(config_frame)\n entries['device_id'].pack()\n\n ttk.Label(config_frame, text=\"Local Key:\").pack()\n entries['local_key'] = ttk.Entry(config_frame)\n entries['local_key'].pack()\n\n ttk.Label(config_frame, text=\"IP Address:\").pack()\n entries['ip_address'] = ttk.Entry(config_frame)\n entries['ip_address'].pack()\n\n elif bulb_type == 'MagicHome':\n ttk.Label(config_frame, text=\"IP Address:\").pack()\n entries['ip_address'] = ttk.Entry(config_frame)\n entries['ip_address'].pack()\n ttk.Label(config_frame, text=\"Color Mode (Normally rgb):\").pack()\n entries['color_mode'] = ttk.Entry(config_frame)\n entries['color_mode'].pack()\n\n elif bulb_type == 'MQTT':\n ttk.Label(config_frame, text=\"MQTT Topic:\").pack()\n entries['mqtt_topic'] = ttk.Entry(config_frame)\n entries['mqtt_topic'].pack()\n\n\n def on_add_bulb():\n bulb_type = bulb_type_var.get()\n placement = placement_var.get()\n\n if bulb_type == 'Tuya':\n device_id = entries['device_id'].get() if 'device_id' in entries else None\n local_key = entries['local_key'].get() if 'local_key' in entries else None\n ip_address = entries['ip_address'].get() if 'ip_address' in entries else None\n config_manager.add_bulb(bulb_type, device_id=device_id, local_key=local_key, ip_address=ip_address, placement=placement)\n\n elif bulb_type == 'MagicHome':\n ip_address = entries['ip_address'].get() if 'ip_address' in entries else None\n color_mode = entries['color_mode'].get() if 'color_mode' in entries else None\n config_manager.add_bulb(bulb_type, color_mode=color_mode, ip_address=ip_address, placement=placement)\n elif bulb_type == 'MQTT':\n mqtt_topic = entries['mqtt_topic'].get() if 'mqtt_topic' in entries else None\n config_manager.add_bulb(bulb_type, mqtt_topic=mqtt_topic, placement=placement)\n\n refresh_callback()\n add_bulb_window.destroy()\n\n\n add_bulb_window = tk.Toplevel(root)\n add_bulb_window.title(\"Add New Bulb\")\n add_bulb_window.geometry(\"400x400\")\n add_bulb_window.configure(bg='#404957')\n\n # Dropdown for selecting the bulb type\n bulb_type_label = ttk.Label(add_bulb_window, text=\"Select Bulb Type:\", style='TLabel')\n bulb_type_label.pack(pady=(10, 0))\n\n bulb_type_var = tk.StringVar()\n bulb_type_dropdown = ttk.Combobox(add_bulb_window, textvariable=bulb_type_var, state='readonly', style='TCombobox')\n bulb_type_dropdown['values'] = BULB_TYPES\n bulb_type_dropdown.pack(pady=(0, 10))\n\n bulb_type_dropdown.bind(\"<<ComboboxSelected>>\", update_config_fields)\n\n config_frame = tk.Frame(add_bulb_window, bg='#404957')\n config_frame.pack(fill='both', expand=True, padx=20, pady=10)\n\n add_button = ttk.Button(add_bulb_window, text=\"Add Bulb\", command=on_add_bulb, style='TButton')\n add_button.pack(pady=(10, 10))\n\n return add_bulb_window"
},
{
"identifier": "create_remove_bulb_button",
"path": "screensync/screen_sync/ui/remove_bulb.py",
"snippet": "def create_remove_bulb_button(bulb_window, config_manager, config_section, refresh_callback):\n def remove_bulb():\n if messagebox.askyesno(\"Remove Bulb\", \"Are you sure you want to remove this bulb?\"):\n config_manager.remove_bulb(config_section)\n refresh_callback()\n bulb_window.destroy()\n\n remove_button = tk.Button(bulb_window, text=\"Remove\", command=remove_bulb, bg='red', fg='white')\n return remove_button"
},
{
"identifier": "ConfigManager",
"path": "screensync/screen_sync/config_manager.py",
"snippet": "class ConfigManager:\n def __init__(self, config_file):\n self.config_file = config_file\n self.config = configparser.ConfigParser()\n self.load_config()\n print (\"Reading from config file \" + config_file)\n\n def get_config_by_section(self, section):\n return dict(self.config.items(section))\n\n def create_default_config(self):\n \"\"\"Creates a default configuration file.\"\"\"\n # Add default sections and settings\n self.config['General'] = {\n 'saturation_factor': '1.5'\n }\n self.config['MQTT'] = {\n 'broker': 'localhost',\n 'port': '1883',\n 'username': '',\n 'password': ''\n }\n\n # Add default TuyaSettings\n self.config['TuyaSettings'] = {\n 'update_frequency': '50'\n }\n\n # Add default MQTTSettings\n self.config['MQTTSettings'] = {\n 'update_frequency': '0.5'\n }\n\n # Add default MagicHomeSettings\n self.config['MagicHomeSettings'] = {\n 'update_frequency': '50'\n }\n\n # Add more default sections and settings as necessary\n\n # Create the config file with default settings\n with open(self.config_file, 'w') as file:\n self.config.write(file)\n\n\n def load_config(self):\n \"\"\"Loads the configuration file, creates one if it doesn't exist.\"\"\"\n if not os.path.exists(self.config_file):\n self.create_default_config()\n else:\n self.config.read(self.config_file)\n\n def save_config(self):\n \"\"\"Saves the configuration to the file.\"\"\"\n with open(self.config_file, 'w') as file:\n self.config.write(file)\n\n def get_general_settings(self):\n \"\"\"Retrieves general settings from the config.\"\"\"\n general = self.config['General']\n return {\n # 'screen_capture_size': tuple(map(int, general.get('screen_capture_size', '100, 100').split(','))),\n 'saturation_factor': general.getfloat('saturation_factor', 1.5)\n }\n\n def get_section_by_device_id(self, device_id):\n for section in self.config.sections():\n if self.config[section].get('device_id') == device_id:\n return section\n return None # Or raise an error\n\n def get_bulbs(self):\n \"\"\"Retrieves bulb configurations for different types.\"\"\"\n bulbs = []\n for section in self.config.sections():\n if section.startswith('BulbTuya'):\n bulbs.append({\n 'type': 'Tuya',\n 'device_id': self.config[section]['device_id'],\n 'local_key': self.config[section]['local_key'],\n 'ip_address': self.config[section]['ip_address'],\n 'placement': self.config[section].get('placement', 'center'), # Default placement is 'Center'\n 'config_id' : section\n })\n elif section.startswith('BulbMagicHome'):\n bulbs.append({\n 'type': 'MagicHome',\n 'ip_address': self.config[section]['ip_address'],\n 'device_id': 'MagicHome',\n 'placement': self.config[section].get('placement', 'center'), # Default placement is 'Center'\n 'color_mode': self.config[section].get('color_mode', 'rgb'),\n 'config_id' : section\n })\n\n elif section.startswith('BulbMQTT'):\n bulbs.append({\n 'type': 'MQTT',\n 'topic': self.config[section]['topic'],\n 'placement': self.config[section].get('placement', 'center'), # Default placement is 'Center'\n 'device_id': 'MQTT',\n 'config_id' : section\n })\n # Add more elif blocks for other bulb types as needed\n\n return bulbs\n\n def get_mqtt_settings(self):\n \"\"\"Retrieves MQTT settings from the config.\"\"\"\n mqtt = self.config['MQTT']\n return {\n 'broker': mqtt.get('broker', 'localhost'),\n 'port': mqtt.getint('port', 1883),\n 'username': mqtt.get('username', ''),\n 'password': mqtt.get('password', '')\n }\n\n def set_mqtt_settings(self, broker, port, username, password):\n \"\"\"Sets MQTT settings.\"\"\"\n if 'MQTT' not in self.config.sections():\n self.config.add_section('MQTT')\n self.config['MQTT'] = {\n 'broker': broker,\n 'port': str(port),\n 'username': username,\n 'password': password\n }\n self.save_config()\n\n\n def add_bulb(self, bulb_type, **kwargs):\n \"\"\"Adds a new bulb configuration based on the bulb type.\"\"\"\n if bulb_type == 'MQTT':\n self._add_mqtt_bulb(**kwargs)\n elif bulb_type == 'Tuya':\n self._add_tuya_bulb(**kwargs)\n elif bulb_type == 'MagicHome':\n self._add_magichome_bulb(**kwargs)\n # Add more elif blocks for other bulb types as needed\n\n def _add_mqtt_bulb(self, mqtt_topic, placement):\n \"\"\"Adds a new MQTT bulb configuration.\"\"\"\n mqtt_bulb_count = len([s for s in self.config.sections() if s.startswith('BulbMQTT')])\n section_name = f'BulbMQTT{mqtt_bulb_count + 1}'\n self.config[section_name] = {\n\n 'topic': mqtt_topic,\n 'placement': placement\n\n }\n self.save_config()\n\n def _add_tuya_bulb(self, device_id, local_key, ip_address, placement):\n \"\"\"Adds a new Tuya bulb configuration.\"\"\"\n tuya_bulb_count = len([s for s in self.config.sections() if s.startswith('BulbTuya')])\n section_name = f'BulbTuya{tuya_bulb_count + 1}'\n\n self.config[section_name] = {\n 'device_id': device_id,\n 'local_key': local_key,\n 'ip_address': ip_address,\n 'placement': placement\n }\n self.save_config()\n\n def _add_magichome_bulb(self, ip_address, placement, color_mode):\n \"\"\"Adds a new Tuya bulb configuration.\"\"\"\n magic_home_bulb_count = len([s for s in self.config.sections() if s.startswith('BulbMagicHome')])\n section_name = f'BulbMagicHome{magic_home_bulb_count + 1}'\n\n self.config[section_name] = {\n 'ip_address': ip_address,\n 'placement': placement,\n 'color_mode': color_mode,\n }\n\n self.save_config()\n\n\n def get_update_frequency(self, bulb_type):\n \"\"\"Retrieves the update frequency for a given bulb type.\"\"\"\n section = f'{bulb_type}Settings'\n return self.config.getfloat(section, 'update_frequency', fallback=10) # Default to 10 updates per second\n\n def set_update_frequency(self, bulb_type, frequency):\n \"\"\"Sets the update frequency for a given bulb type.\"\"\"\n section = f'{bulb_type}Settings'\n if section not in self.config.sections():\n self.config.add_section(section)\n self.config[section]['update_frequency'] = str(frequency)\n self.save_config()\n\n def remove_bulb(self, config_section):\n if config_section in self.config.sections():\n self.config.remove_section(config_section)\n self.save_config()"
},
{
"identifier": "BulbFactory",
"path": "screensync/screen_sync/bulb_factory.py",
"snippet": "class BulbFactory:\n def __init__(self, config_manager):\n self.config_manager = config_manager\n\n def create_bulbs(self):\n \"\"\"Creates and returns bulb objects based on the configuration.\"\"\"\n bulbs = []\n mqtt_settings = self.config_manager.get_mqtt_settings()\n\n for bulb_config in self.config_manager.get_bulbs():\n bulb_type = bulb_config.get('type')\n frequency = self.config_manager.get_update_frequency(bulb_type)\n rate_limiter = RateLimiter(frequency) # Instantiate RateLimiter\n placement = bulb_config.get('placement', 'center')\n if bulb_config['type'] == 'MagicHome':\n try:\n bulb = FluxLedBulbControl(ip_address=bulb_config['ip_address'], color_mode=bulb_config['color_mode'], placement=placement, rate_limiter=rate_limiter)\n bulbs.append(bulb)\n except Exception as error:\n print (\"An exception occurred:\", error)\n print(\"Error adding \" + bulb_config.get('type') + \" bulb with IP \" + bulb_config['ip_address'] )\n\n elif bulb_type == 'Tuya':\n try:\n bulb = TuyaBulbControl(bulb_config['device_id'], bulb_config['local_key'], bulb_config['ip_address'], rate_limiter, placement)\n bulbs.append(bulb)\n except:\n print(\"Error adding \" + bulb_config.get('type') + \" bulb with IP \" + bulb_config['ip_address'] )\n elif bulb_type == 'MQTT':\n try:\n bulb = ZigbeeBulbControl(\n mqtt_broker=mqtt_settings['broker'],\n port=mqtt_settings['port'],\n username=mqtt_settings['username'],\n password=mqtt_settings['password'],\n topic=bulb_config['topic'],\n rate_limiter=rate_limiter,\n placement=placement\n )\n bulb.turn_on()\n bulb.connect()\n bulbs.append(bulb)\n except:\n print(\"Error adding \" + bulb_config.get('type') + \" bulb with MQTT broker \" + mqtt_broker )\n pass\n # Add more conditions for other bulb types\n\n if bulb:\n\n bulb.connect()\n\n return bulbs"
},
{
"identifier": "Coordinator",
"path": "screensync/screen_sync/coordinator.py",
"snippet": "class Coordinator:\n def __init__(self, bulbs, color_processing_module):\n self.bulbs = bulbs\n self.color_processing = color_processing_module\n self.mode = 'normal'\n self.running = False\n self.color_cache = defaultdict(lambda: (0, 0, 0)) # Default color is black\n self.lock = threading.Lock()\n\n def set_mode(self, mode):\n self.mode = mode\n # Any other updates required when changing modes\n\n def update_bulbs(self, new_bulbs):\n if self.running:\n self.stop()\n self.bulbs = new_bulbs\n self.start()\n if self.running:\n self.start()\n\n def update_bulb_color(self, bulb, color):\n # Update the bulb color in a new thread\n t = threading.Thread(target=bulb.set_color, args=color)\n t.start()\n self.threads.append(t)\n\n def start(self):\n self.running = True\n self.update_thread = threading.Thread(target=self.run_update_loop)\n self.update_thread.start()\n self.threads = [threading.Thread(target=self.update_bulb_color, args=(bulb,)) for bulb in self.bulbs]\n for thread in self.threads:\n thread.start()\n\n\n def run_update_loop(self):\n while self.running:\n # Record update for stats\n runtime_stats.record_update()\n\n if self.mode == 'shooter':\n # In shooter mode, capture the screen once for the center\n center_color = self.color_processing.process_screen_zone('center', mode='Shooter')\n for bulb in self.bulbs:\n # Update all bulbs with the center color\n self.update_bulb_color(bulb, center_color)\n else:\n # In normal mode, update each bulb based on its zone\n for bulb in self.bulbs:\n zone_color = self.color_processing.process_screen_zone(bulb.placement)\n self.update_bulb_color(bulb, zone_color)\n\n # Sleep to avoid overloading\n time.sleep(0.0001)\n\n\n def stop(self):\n self.running = False\n if self.update_thread:\n self.update_thread.join()\n for t in self.threads:\n t.join()"
},
{
"identifier": "runtime_stats",
"path": "screensync/screen_sync/stats.py",
"snippet": "class RuntimeStats:\n def __init__(self):\n def get_last_n_stats(self, n):\n def record_update(self):\n def timed_function(self, stat_key):\n def decorator(func):\n def wrapper(*args, **kwargs):\n def display_stats(self):"
},
{
"identifier": "create_embedded_graph",
"path": "screensync/screen_sync/graph.py",
"snippet": "def create_embedded_graph(runtime_stats, parent_widget):\n # Convert target size to inches (1 inch = 96 pixels)\n inches_width = 227 / 96\n inches_height = 83 / 96\n # Create a Figure with the converted size\n fig = Figure(figsize=(inches_width, inches_height), dpi=96)\n ax = fig.add_subplot(111)\n updates_text = fig.text(0.8, 0.5, '', fontsize=26, va='center', ha='center', color='white')\n fig.text(0.8, 0.15, 'Updates/Sec', fontsize=6, va='center', ha='center', color='white')\n fig.text(0.33, 0.044, 'Performance past 5 minutes', fontsize=6, va='center', ha='center', color='white')\n # Function to update the graph\n def update_graph():\n ax.clear()\n\n # Set the background color for the axes and the figure\n ax.set_facecolor('black')\n fig.patch.set_facecolor('black')\n\n # Remove padding and margins around the plot\n fig.subplots_adjust(left=0, right=1, top=1, bottom=0)\n\n # Hide the axes frame which may have padding\n ax.set_frame_on(False)\n fig.subplots_adjust(left=0.05, right=0.65, top=0.95, bottom=0.05)\n # Optionally, hide the axes ticks as well\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n last_update = runtime_stats.get_last_n_stats(1)[-1][1] if runtime_stats.get_last_n_stats(1) else 0\n # Using text() to place large numbers on the right side of the figure\n #fig.text(0.7, 0.5, str(last_update), fontsize=26, va='center', ha='center', color='white')\n updates_text.set_text(str(last_update))\n\n\n\n # Get last 300 data points\n data = runtime_stats.get_last_n_stats(300)\n\n # Separate timestamps and values\n timestamps = [datetime.fromtimestamp(ts) for ts, _ in data]\n values = [val for _, val in data]\n\n # Plot the data\n ax.plot(timestamps, values, color='red', linewidth=1)\n ax.set_facecolor('black')\n ax.tick_params(axis='x', colors='white', labelsize=6) # Format x-axis ticks\n ax.tick_params(axis='y', colors='white', labelsize=6) # Format y-axis ticks\n ax.spines['bottom'].set_color('white') # Set the color of the bottom spine\n ax.spines['left'].set_color('white') # Set the color of the left spine\n\n\n\n\n\n # Redraw the canvas\n canvas.draw()\n\n # Create the matplotlib canvas and pack it into the Tkinter window\n canvas = FigureCanvasTkAgg(fig, master=parent_widget)\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n\n return update_graph"
}
] | import tkinter as tk
import PIL
import os
import pkg_resources
import screensync.screen_sync.color_processing as color_processing
from tkinter import PhotoImage, Toplevel, Label, Entry, Button, Listbox,LabelFrame, ttk, messagebox, END
from PIL import Image, ImageTk
from platformdirs import *
from screensync.screen_sync.ui.add_bulb import create_add_bulb_window
from screensync.screen_sync.ui.remove_bulb import create_remove_bulb_button
from screensync.screen_sync.config_manager import ConfigManager
from screensync.screen_sync.bulb_factory import BulbFactory
from screensync.screen_sync.coordinator import Coordinator
from screensync.screen_sync.stats import runtime_stats
from screensync.screen_sync.graph import create_embedded_graph | 7,593 | # This function will need to be implemented with the actual save logic
print(f"Saving Saturation: {saturation_var.get()}, Capture Size: {capture_size_var.get()}")
def open_general_settings(config_manager):
general_settings_window = Toplevel(root)
general_settings_window.title("General Settings")
general_settings_window.geometry('300x200')
general_settings_window.configure(bg='#404957')
general_settings = config_manager.get_general_settings()
# Saturation Factor Setting
Label(general_settings_window, text="Saturation Factor:").grid(row=0, column=0, sticky='e')
saturation_var = tk.StringVar(value=general_settings.get('saturation_factor', '1.5'))
Entry(general_settings_window, textvariable=saturation_var).grid(row=0, column=1)
# # Screen Capture Size Setting
# Label(general_settings_window, text="Screen Capture Size:").grid(row=1, column=0, sticky='e')
# capture_size_var = tk.StringVar(value=general_settings.get('screen_capture_size', '100, 100'))
# Entry(general_settings_window, textvariable=capture_size_var).grid(row=1, column=1)
# Save Button
save_button = Button(general_settings_window, text="Save",
command=lambda: save_general_settings(saturation_var, capture_size_var))
save_button.grid(row=2, column=0, columnspan=2)
def create_settings_frame(parent, title, settings, entries_dict):
frame = tk.LabelFrame(parent, text=title, bg='#404957', fg='white', font=("TkDefaultFont", 12, "bold"))
frame.pack(padx=10, pady=10, fill='x')
for setting, value in settings.items():
row = tk.Frame(frame, bg='#404957')
row.pack(side='top', fill='x', padx=5, pady=5)
label = tk.Label(row, text=setting.replace('_', ' ').title() + ":", bg='#404957', fg='white')
label.pack(side='left')
entry = tk.Entry(row, bg='white', fg='black')
entry.pack(side='right', expand=True, fill='x')
entry.insert(0, value)
entries_dict[setting] = entry
return frame
def open_settings_window(root, coordinator, config_manager , bulb_factory):
# This dictionary will hold the entry widgets for settings
settings_entries = {
'General': {},
'MQTT': {},
'TuyaSettings': {},
'MQTTSettings': {},
'MagicHomeSettings': {}
}
def save_settings():
# Iterate over each settings section and update the configuration
for section, entries in settings_entries.items():
for setting, entry in entries.items():
config_manager.config[section][setting] = entry.get()
# Save the updated configuration to the file
config_manager.save_config()
# Refresh the bulbs and UI if necessary
refresh_bulb_list()
# Provide feedback that settings have been saved
messagebox.showinfo("Settings", "Settings have been saved successfully.")
def refresh_bulb_list():
bulbs_listbox.delete(0, tk.END) # Clear the existing list
bulbs = config_manager.get_bulbs() # Retrieve updated list of bulbs
for bulb in bulbs:
bulbs_listbox.insert(tk.END, f"{bulb['config_id']} - {bulb['device_id']} - {bulb['placement']}")
reinitialize_bulbs()
settings_window = tk.Toplevel(root)
settings_window.title("Settings")
settings_window.geometry("400x700") # Adjust the size as needed
settings_window.configure(bg='#404957')
settings_window.resizable(False, False)
# General settings frame
general_settings_frame = create_settings_frame(settings_window, "General", config_manager.get_general_settings(), settings_entries['General'])
# MQTT settings frame
mqtt_settings_frame = create_settings_frame(settings_window, "MQTT Server", config_manager.get_mqtt_settings(), settings_entries['MQTT'])
# Tuya settings frame
tuya_settings_frame = create_settings_frame(settings_window, "Tuya Specific", config_manager.get_config_by_section("TuyaSettings"), settings_entries['TuyaSettings'])
# MQTT specific settings frame
mqtt_specific_settings_frame = create_settings_frame(settings_window, "MQTT Specific", config_manager.get_config_by_section("MQTTSettings"), settings_entries['MQTTSettings'])
# MagicHome settings frame
magichome_specific_settings_frame = create_settings_frame(settings_window, "MagicHome Specific", config_manager.get_config_by_section("MagicHomeSettings"), settings_entries['MagicHomeSettings'])
# Add "Save Settings" Button
save_button = tk.Button(settings_window, text="Save Settings", command=save_settings, bg='green', fg='white')
save_button.pack(side='bottom', pady=10)
add_new_frame = tk.LabelFrame(settings_window, text="Add New Bulb", bg='#404957', fg='white', font=("TkDefaultFont", 12, "bold"))
add_new_frame.pack(padx=10, pady=10, fill='x')
# Bulbs listbox with a scrollbar
bulbs_frame = tk.LabelFrame(settings_window, text="Bulbs", bg='#404957', fg='white', font=("TkDefaultFont", 12, "bold"))
bulbs_frame.pack(padx=10, pady=10, fill='both', expand=True)
scrollbar = ttk.Scrollbar(bulbs_frame, orient='vertical')
scrollbar.pack(side='right', fill='y')
bulbs_listbox = tk.Listbox(bulbs_frame, yscrollcommand=scrollbar.set, bg='#D9D9D9', fg='black')
bulbs_listbox.pack(side='left', fill='both', expand=True)
scrollbar.config(command=bulbs_listbox.yview)
#add_bulb_window = create_add_bulb_window(root, config_manager, refresh_ui)
# Add New Button
add_new_button = tk.Button(add_new_frame,bg='#D9D9D9',text=' Add '
|
appname = 'ScreenSync_v2'
appauthor = 'Tom George'
# Global flag to track Shooter Mode state
shooter_mode_active = False
def main():
global config_manager, bulb_factory, bulbs, coordinator
# Check if config directory exists and if not create
os.makedirs(user_data_dir(appname, appauthor), exist_ok=True)
#print(user_data_dir(appname, appauthor) + '/config.ini')
# Initialize necessary objects
config_manager = ConfigManager(user_data_dir(appname, appauthor) + '/config.ini')
bulb_factory = BulbFactory(config_manager)
bulbs = bulb_factory.create_bulbs()
coordinator = Coordinator(bulbs, color_processing)
icon_path = pkg_resources.resource_filename('screensync', 'assets/ScreenSync.ico')
banner_path = pkg_resources.resource_filename('screensync', 'assets/screensync-banner.png')
# Define the main window
root = tk.Tk()
root.title("ScreenSync V2")
root.geometry('245x265') # Width x Height
root.configure(bg='#000000')
root.resizable(False, False)
root.overrideredirect(False)
root.iconbitmap(icon_path)
# Load and resize the banner image
banner_image = Image.open(banner_path)
banner_image = banner_image.resize((200, 55), PIL.Image.Resampling.LANCZOS)
banner_photo = ImageTk.PhotoImage(banner_image)
# Create a Label to display the image
banner_label = tk.Label(root, image=banner_photo, bg='#000000')
banner_label.image = banner_photo # Keep a reference to avoid garbage collection
banner_label.place(x=20, y=5) # Place at the top of the window
# Stats graph frame
stats_frame = tk.Frame(root, bg='#000000', width=227, height=83)
stats_frame.place(x=9, y=60)
update_graph = create_embedded_graph(runtime_stats, stats_frame)
refresh_graph(root, update_graph) # Start the periodic update
# Settings Button
settings_button = tk.Button(root, bg='#D9D9D9', text='Settings',
command=lambda: open_settings_window(root, coordinator, config_manager, bulb_factory))
settings_button.place(x=11, y=160)
# Add New Button
shooter_button = tk.Button(root,bg='#D9D9D9',text='Enable Shooter'
,command=lambda: shooter_clicked(shooter_button, coordinator))
shooter_button.place(x=133, y=160)
# Bind the on_closing function to the window's close event
root.protocol("WM_DELETE_WINDOW", lambda: on_closing(root, coordinator))
# Start/Stop Button
# Start/Stop Button
start_stop_button = tk.Button(root, text="Start", bg='#D9D9D9', width=31, height=3,
command=lambda: start_stop_button_clicked(start_stop_button, coordinator))
start_stop_button.place(x=9, y=200)
root.mainloop()
def toggle_shooter_mode(shooter_button, coordinator):
global shooter_mode_active
if shooter_mode_active:
# Disable Shooter Mode by setting it back to 'normal' or any other default mode
coordinator.set_mode('normal')
shooter_button.config(text="Enable Shooter")
else:
# Enable Shooter Mode
coordinator.set_mode('shooter')
shooter_button.config(text="Disable Shooter")
# Toggle the flag
shooter_mode_active = not shooter_mode_active
# Define a function to be called when the window is closed
def on_closing(root, coordinator):
if coordinator.running:
coordinator.stop() # Make sure to stop the coordinator
root.destroy() # Destroy the main window
# Function to reinitialize bulbs
def reinitialize_bulbs():
global config_manager
config_manager = ConfigManager('./config.ini')
global bulbs
bulbs = bulb_factory.create_bulbs() # Recreate bulbs with new settings
global coordinator
coordinator = Coordinator(bulbs, color_processing)
def shooter_clicked(shooter_button, coordinator):
toggle_shooter_mode(shooter_button, coordinator)
print("Toggle shooter mode clicked")
def start_stop_button_clicked(start_stop_button, coordinator):
if coordinator.running:
coordinator.stop()
start_stop_button.config(text="Start")
else:
coordinator.start()
start_stop_button.config(text="Stop")
def save_general_settings(saturation_var, capture_size_var):
# Here you'll save the general settings back to config.ini
# This function will need to be implemented with the actual save logic
print(f"Saving Saturation: {saturation_var.get()}, Capture Size: {capture_size_var.get()}")
def open_general_settings(config_manager):
general_settings_window = Toplevel(root)
general_settings_window.title("General Settings")
general_settings_window.geometry('300x200')
general_settings_window.configure(bg='#404957')
general_settings = config_manager.get_general_settings()
# Saturation Factor Setting
Label(general_settings_window, text="Saturation Factor:").grid(row=0, column=0, sticky='e')
saturation_var = tk.StringVar(value=general_settings.get('saturation_factor', '1.5'))
Entry(general_settings_window, textvariable=saturation_var).grid(row=0, column=1)
# # Screen Capture Size Setting
# Label(general_settings_window, text="Screen Capture Size:").grid(row=1, column=0, sticky='e')
# capture_size_var = tk.StringVar(value=general_settings.get('screen_capture_size', '100, 100'))
# Entry(general_settings_window, textvariable=capture_size_var).grid(row=1, column=1)
# Save Button
save_button = Button(general_settings_window, text="Save",
command=lambda: save_general_settings(saturation_var, capture_size_var))
save_button.grid(row=2, column=0, columnspan=2)
def create_settings_frame(parent, title, settings, entries_dict):
frame = tk.LabelFrame(parent, text=title, bg='#404957', fg='white', font=("TkDefaultFont", 12, "bold"))
frame.pack(padx=10, pady=10, fill='x')
for setting, value in settings.items():
row = tk.Frame(frame, bg='#404957')
row.pack(side='top', fill='x', padx=5, pady=5)
label = tk.Label(row, text=setting.replace('_', ' ').title() + ":", bg='#404957', fg='white')
label.pack(side='left')
entry = tk.Entry(row, bg='white', fg='black')
entry.pack(side='right', expand=True, fill='x')
entry.insert(0, value)
entries_dict[setting] = entry
return frame
def open_settings_window(root, coordinator, config_manager , bulb_factory):
# This dictionary will hold the entry widgets for settings
settings_entries = {
'General': {},
'MQTT': {},
'TuyaSettings': {},
'MQTTSettings': {},
'MagicHomeSettings': {}
}
def save_settings():
# Iterate over each settings section and update the configuration
for section, entries in settings_entries.items():
for setting, entry in entries.items():
config_manager.config[section][setting] = entry.get()
# Save the updated configuration to the file
config_manager.save_config()
# Refresh the bulbs and UI if necessary
refresh_bulb_list()
# Provide feedback that settings have been saved
messagebox.showinfo("Settings", "Settings have been saved successfully.")
def refresh_bulb_list():
bulbs_listbox.delete(0, tk.END) # Clear the existing list
bulbs = config_manager.get_bulbs() # Retrieve updated list of bulbs
for bulb in bulbs:
bulbs_listbox.insert(tk.END, f"{bulb['config_id']} - {bulb['device_id']} - {bulb['placement']}")
reinitialize_bulbs()
settings_window = tk.Toplevel(root)
settings_window.title("Settings")
settings_window.geometry("400x700") # Adjust the size as needed
settings_window.configure(bg='#404957')
settings_window.resizable(False, False)
# General settings frame
general_settings_frame = create_settings_frame(settings_window, "General", config_manager.get_general_settings(), settings_entries['General'])
# MQTT settings frame
mqtt_settings_frame = create_settings_frame(settings_window, "MQTT Server", config_manager.get_mqtt_settings(), settings_entries['MQTT'])
# Tuya settings frame
tuya_settings_frame = create_settings_frame(settings_window, "Tuya Specific", config_manager.get_config_by_section("TuyaSettings"), settings_entries['TuyaSettings'])
# MQTT specific settings frame
mqtt_specific_settings_frame = create_settings_frame(settings_window, "MQTT Specific", config_manager.get_config_by_section("MQTTSettings"), settings_entries['MQTTSettings'])
# MagicHome settings frame
magichome_specific_settings_frame = create_settings_frame(settings_window, "MagicHome Specific", config_manager.get_config_by_section("MagicHomeSettings"), settings_entries['MagicHomeSettings'])
# Add "Save Settings" Button
save_button = tk.Button(settings_window, text="Save Settings", command=save_settings, bg='green', fg='white')
save_button.pack(side='bottom', pady=10)
add_new_frame = tk.LabelFrame(settings_window, text="Add New Bulb", bg='#404957', fg='white', font=("TkDefaultFont", 12, "bold"))
add_new_frame.pack(padx=10, pady=10, fill='x')
# Bulbs listbox with a scrollbar
bulbs_frame = tk.LabelFrame(settings_window, text="Bulbs", bg='#404957', fg='white', font=("TkDefaultFont", 12, "bold"))
bulbs_frame.pack(padx=10, pady=10, fill='both', expand=True)
scrollbar = ttk.Scrollbar(bulbs_frame, orient='vertical')
scrollbar.pack(side='right', fill='y')
bulbs_listbox = tk.Listbox(bulbs_frame, yscrollcommand=scrollbar.set, bg='#D9D9D9', fg='black')
bulbs_listbox.pack(side='left', fill='both', expand=True)
scrollbar.config(command=bulbs_listbox.yview)
#add_bulb_window = create_add_bulb_window(root, config_manager, refresh_ui)
# Add New Button
add_new_button = tk.Button(add_new_frame,bg='#D9D9D9',text=' Add ' | ,command=lambda: create_add_bulb_window(root, config_manager, refresh_bulb_list)) | 0 | 2023-11-19 10:48:58+00:00 | 12k |
natto-maki/ComfyUI-NegiTools | negi/repos/controlnet_aux/src/controlnet_aux/segment_anything/build_sam.py | [
{
"identifier": "Sam",
"path": "negi/repos/controlnet_aux/src/controlnet_aux/segment_anything/modeling/sam.py",
"snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: Union[ImageEncoderViT, TinyViT],\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x"
},
{
"identifier": "ImageEncoderViT",
"path": "negi/repos/controlnet_aux/src/controlnet_aux/segment_anything/modeling/image_encoder.py",
"snippet": "class ImageEncoderViT(nn.Module):\n def __init__(\n self,\n img_size: int = 1024,\n patch_size: int = 16,\n in_chans: int = 3,\n embed_dim: int = 768,\n depth: int = 12,\n num_heads: int = 12,\n mlp_ratio: float = 4.0,\n out_chans: int = 256,\n qkv_bias: bool = True,\n norm_layer: Type[nn.Module] = nn.LayerNorm,\n act_layer: Type[nn.Module] = nn.GELU,\n use_abs_pos: bool = True,\n use_rel_pos: bool = False,\n rel_pos_zero_init: bool = True,\n window_size: int = 0,\n global_attn_indexes: Tuple[int, ...] = (),\n ) -> None:\n \"\"\"\n Args:\n img_size (int): Input image size.\n patch_size (int): Patch size.\n in_chans (int): Number of input image channels.\n embed_dim (int): Patch embedding dimension.\n depth (int): Depth of ViT.\n num_heads (int): Number of attention heads in each ViT block.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool): If True, add a learnable bias to query, key, value.\n norm_layer (nn.Module): Normalization layer.\n act_layer (nn.Module): Activation layer.\n use_abs_pos (bool): If True, use absolute positional embeddings.\n use_rel_pos (bool): If True, add relative positional embeddings to the attention map.\n rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\n window_size (int): Window size for window attention blocks.\n global_attn_indexes (list): Indexes for blocks using global attention.\n \"\"\"\n super().__init__()\n self.img_size = img_size\n\n self.patch_embed = PatchEmbed(\n kernel_size=(patch_size, patch_size),\n stride=(patch_size, patch_size),\n in_chans=in_chans,\n embed_dim=embed_dim,\n )\n\n self.pos_embed: Optional[nn.Parameter] = None\n if use_abs_pos:\n # Initialize absolute positional embedding with pretrain image size.\n self.pos_embed = nn.Parameter(\n torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)\n )\n\n self.blocks = nn.ModuleList()\n for i in range(depth):\n block = Block(\n dim=embed_dim,\n num_heads=num_heads,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n norm_layer=norm_layer,\n act_layer=act_layer,\n use_rel_pos=use_rel_pos,\n rel_pos_zero_init=rel_pos_zero_init,\n window_size=window_size if i not in global_attn_indexes else 0,\n input_size=(img_size // patch_size, img_size // patch_size),\n )\n self.blocks.append(block)\n\n self.neck = nn.Sequential(\n nn.Conv2d(\n embed_dim,\n out_chans,\n kernel_size=1,\n bias=False,\n ),\n LayerNorm2d(out_chans),\n nn.Conv2d(\n out_chans,\n out_chans,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n LayerNorm2d(out_chans),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.patch_embed(x)\n if self.pos_embed is not None:\n x = x + self.pos_embed\n\n for blk in self.blocks:\n x = blk(x)\n\n x = self.neck(x.permute(0, 3, 1, 2))\n\n return x"
},
{
"identifier": "MaskDecoder",
"path": "negi/repos/controlnet_aux/src/controlnet_aux/segment_anything/modeling/mask_decoder.py",
"snippet": "class MaskDecoder(nn.Module):\n def __init__(\n self,\n *,\n transformer_dim: int,\n transformer: nn.Module,\n num_multimask_outputs: int = 3,\n activation: Type[nn.Module] = nn.GELU,\n iou_head_depth: int = 3,\n iou_head_hidden_dim: int = 256,\n ) -> None:\n \"\"\"\n Predicts masks given an image and prompt embeddings, using a\n transformer architecture.\n\n Arguments:\n transformer_dim (int): the channel dimension of the transformer\n transformer (nn.Module): the transformer used to predict masks\n num_multimask_outputs (int): the number of masks to predict\n when disambiguating masks\n activation (nn.Module): the type of activation to use when\n upscaling masks\n iou_head_depth (int): the depth of the MLP used to predict\n mask quality\n iou_head_hidden_dim (int): the hidden dimension of the MLP\n used to predict mask quality\n \"\"\"\n super().__init__()\n self.transformer_dim = transformer_dim\n self.transformer = transformer\n\n self.num_multimask_outputs = num_multimask_outputs\n\n self.iou_token = nn.Embedding(1, transformer_dim)\n self.num_mask_tokens = num_multimask_outputs + 1\n self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)\n\n self.output_upscaling = nn.Sequential(\n nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),\n LayerNorm2d(transformer_dim // 4),\n activation(),\n nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),\n activation(),\n )\n self.output_hypernetworks_mlps = nn.ModuleList(\n [\n MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)\n for i in range(self.num_mask_tokens)\n ]\n )\n\n self.iou_prediction_head = MLP(\n transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth\n )\n\n def forward(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n multimask_output: bool,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks given image and prompt embeddings.\n\n Arguments:\n image_embeddings (torch.Tensor): the embeddings from the image encoder\n image_pe (torch.Tensor): positional encoding with the shape of image_embeddings\n sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes\n dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs\n multimask_output (bool): Whether to return multiple masks or a single\n mask.\n\n Returns:\n torch.Tensor: batched predicted masks\n torch.Tensor: batched predictions of mask quality\n \"\"\"\n masks, iou_pred = self.predict_masks(\n image_embeddings=image_embeddings,\n image_pe=image_pe,\n sparse_prompt_embeddings=sparse_prompt_embeddings,\n dense_prompt_embeddings=dense_prompt_embeddings,\n )\n\n # Select the correct mask or masks for output\n if multimask_output:\n mask_slice = slice(1, None)\n else:\n mask_slice = slice(0, 1)\n masks = masks[:, mask_slice, :, :]\n iou_pred = iou_pred[:, mask_slice]\n\n # Prepare output\n return masks, iou_pred\n\n def predict_masks(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Predicts masks. See 'forward' for more details.\"\"\"\n # Concatenate output tokens\n output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)\n output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)\n tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)\n\n # Expand per-image data in batch direction to be per-mask\n src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)\n src = src + dense_prompt_embeddings\n pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)\n b, c, h, w = src.shape\n\n # Run the transformer\n hs, src = self.transformer(src, pos_src, tokens)\n iou_token_out = hs[:, 0, :]\n mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]\n\n # Upscale mask embeddings and predict masks using the mask tokens\n src = src.transpose(1, 2).view(b, c, h, w)\n upscaled_embedding = self.output_upscaling(src)\n hyper_in_list: List[torch.Tensor] = []\n for i in range(self.num_mask_tokens):\n hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))\n hyper_in = torch.stack(hyper_in_list, dim=1)\n b, c, h, w = upscaled_embedding.shape\n masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)\n\n # Generate mask quality predictions\n iou_pred = self.iou_prediction_head(iou_token_out)\n\n return masks, iou_pred"
},
{
"identifier": "PromptEncoder",
"path": "negi/repos/controlnet_aux/src/controlnet_aux/segment_anything/modeling/prompt_encoder.py",
"snippet": "class PromptEncoder(nn.Module):\n def __init__(\n self,\n embed_dim: int,\n image_embedding_size: Tuple[int, int],\n input_image_size: Tuple[int, int],\n mask_in_chans: int,\n activation: Type[nn.Module] = nn.GELU,\n ) -> None:\n \"\"\"\n Encodes prompts for input to SAM's mask decoder.\n\n Arguments:\n embed_dim (int): The prompts' embedding dimension\n image_embedding_size (tuple(int, int)): The spatial size of the\n image embedding, as (H, W).\n input_image_size (int): The padded size of the image as input\n to the image encoder, as (H, W).\n mask_in_chans (int): The number of hidden channels used for\n encoding input masks.\n activation (nn.Module): The activation to use when encoding\n input masks.\n \"\"\"\n super().__init__()\n self.embed_dim = embed_dim\n self.input_image_size = input_image_size\n self.image_embedding_size = image_embedding_size\n self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)\n\n self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners\n point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]\n self.point_embeddings = nn.ModuleList(point_embeddings)\n self.not_a_point_embed = nn.Embedding(1, embed_dim)\n\n self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])\n self.mask_downscaling = nn.Sequential(\n nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),\n LayerNorm2d(mask_in_chans // 4),\n activation(),\n nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),\n LayerNorm2d(mask_in_chans),\n activation(),\n nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),\n )\n self.no_mask_embed = nn.Embedding(1, embed_dim)\n\n def get_dense_pe(self) -> torch.Tensor:\n \"\"\"\n Returns the positional encoding used to encode point prompts,\n applied to a dense set of points the shape of the image encoding.\n\n Returns:\n torch.Tensor: Positional encoding with shape\n 1x(embed_dim)x(embedding_h)x(embedding_w)\n \"\"\"\n return self.pe_layer(self.image_embedding_size).unsqueeze(0)\n\n def _embed_points(\n self,\n points: torch.Tensor,\n labels: torch.Tensor,\n pad: bool,\n ) -> torch.Tensor:\n \"\"\"Embeds point prompts.\"\"\"\n points = points + 0.5 # Shift to center of pixel\n if pad:\n padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)\n padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)\n points = torch.cat([points, padding_point], dim=1)\n labels = torch.cat([labels, padding_label], dim=1)\n point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)\n point_embedding[labels == -1] = 0.0\n point_embedding[labels == -1] += self.not_a_point_embed.weight\n point_embedding[labels == 0] += self.point_embeddings[0].weight\n point_embedding[labels == 1] += self.point_embeddings[1].weight\n return point_embedding\n\n def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds box prompts.\"\"\"\n boxes = boxes + 0.5 # Shift to center of pixel\n coords = boxes.reshape(-1, 2, 2)\n corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)\n corner_embedding[:, 0, :] += self.point_embeddings[2].weight\n corner_embedding[:, 1, :] += self.point_embeddings[3].weight\n return corner_embedding\n\n def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds mask inputs.\"\"\"\n mask_embedding = self.mask_downscaling(masks)\n return mask_embedding\n\n def _get_batch_size(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n ) -> int:\n \"\"\"\n Gets the batch size of the output given the batch size of the input prompts.\n \"\"\"\n if points is not None:\n return points[0].shape[0]\n elif boxes is not None:\n return boxes.shape[0]\n elif masks is not None:\n return masks.shape[0]\n else:\n return 1\n\n def _get_device(self) -> torch.device:\n return self.point_embeddings[0].weight.device\n\n def forward(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Embeds different types of prompts, returning both sparse and dense\n embeddings.\n\n Arguments:\n points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates\n and labels to embed.\n boxes (torch.Tensor or none): boxes to embed\n masks (torch.Tensor or none): masks to embed\n\n Returns:\n torch.Tensor: sparse embeddings for the points and boxes, with shape\n BxNx(embed_dim), where N is determined by the number of input points\n and boxes.\n torch.Tensor: dense embeddings for the masks, in the shape\n Bx(embed_dim)x(embed_H)x(embed_W)\n \"\"\"\n bs = self._get_batch_size(points, boxes, masks)\n sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())\n if points is not None:\n coords, labels = points\n point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))\n sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)\n if boxes is not None:\n box_embeddings = self._embed_boxes(boxes)\n sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)\n\n if masks is not None:\n dense_embeddings = self._embed_masks(masks)\n else:\n dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(\n bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]\n )\n\n return sparse_embeddings, dense_embeddings"
},
{
"identifier": "TwoWayTransformer",
"path": "negi/repos/controlnet_aux/src/controlnet_aux/segment_anything/modeling/transformer.py",
"snippet": "class TwoWayTransformer(nn.Module):\n def __init__(\n self,\n depth: int,\n embedding_dim: int,\n num_heads: int,\n mlp_dim: int,\n activation: Type[nn.Module] = nn.ReLU,\n attention_downsample_rate: int = 2,\n ) -> None:\n \"\"\"\n A transformer decoder that attends to an input image using\n queries whose positional embedding is supplied.\n\n Args:\n depth (int): number of layers in the transformer\n embedding_dim (int): the channel dimension for the input embeddings\n num_heads (int): the number of heads for multihead attention. Must\n divide embedding_dim\n mlp_dim (int): the channel dimension internal to the MLP block\n activation (nn.Module): the activation to use in the MLP block\n \"\"\"\n super().__init__()\n self.depth = depth\n self.embedding_dim = embedding_dim\n self.num_heads = num_heads\n self.mlp_dim = mlp_dim\n self.layers = nn.ModuleList()\n\n for i in range(depth):\n self.layers.append(\n TwoWayAttentionBlock(\n embedding_dim=embedding_dim,\n num_heads=num_heads,\n mlp_dim=mlp_dim,\n activation=activation,\n attention_downsample_rate=attention_downsample_rate,\n skip_first_layer_pe=(i == 0),\n )\n )\n\n self.final_attn_token_to_image = Attention(\n embedding_dim, num_heads, downsample_rate=attention_downsample_rate\n )\n self.norm_final_attn = nn.LayerNorm(embedding_dim)\n\n def forward(\n self,\n image_embedding: Tensor,\n image_pe: Tensor,\n point_embedding: Tensor,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"\n Args:\n image_embedding (torch.Tensor): image to attend to. Should be shape\n B x embedding_dim x h x w for any h and w.\n image_pe (torch.Tensor): the positional encoding to add to the image. Must\n have the same shape as image_embedding.\n point_embedding (torch.Tensor): the embedding to add to the query points.\n Must have shape B x N_points x embedding_dim for any N_points.\n\n Returns:\n torch.Tensor: the processed point_embedding\n torch.Tensor: the processed image_embedding\n \"\"\"\n # BxCxHxW -> BxHWxC == B x N_image_tokens x C\n bs, c, h, w = image_embedding.shape\n image_embedding = image_embedding.flatten(2).permute(0, 2, 1)\n image_pe = image_pe.flatten(2).permute(0, 2, 1)\n\n # Prepare queries\n queries = point_embedding\n keys = image_embedding\n\n # Apply transformer blocks and final layernorm\n for layer in self.layers:\n queries, keys = layer(\n queries=queries,\n keys=keys,\n query_pe=point_embedding,\n key_pe=image_pe,\n )\n\n # Apply the final attention layer from the points to the image\n q = queries + point_embedding\n k = keys + image_pe\n attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)\n queries = queries + attn_out\n queries = self.norm_final_attn(queries)\n\n return queries, keys"
},
{
"identifier": "TinyViT",
"path": "negi/repos/controlnet_aux/src/controlnet_aux/segment_anything/modeling/tiny_vit_sam.py",
"snippet": "class TinyViT(nn.Module):\n def __init__(self, img_size=224, in_chans=3, num_classes=1000,\n embed_dims=[96, 192, 384, 768], depths=[2, 2, 6, 2],\n num_heads=[3, 6, 12, 24],\n window_sizes=[7, 7, 14, 7],\n mlp_ratio=4.,\n drop_rate=0.,\n drop_path_rate=0.1,\n use_checkpoint=False,\n mbconv_expand_ratio=4.0,\n local_conv_size=3,\n layer_lr_decay=1.0,\n ):\n super().__init__()\n self.img_size=img_size\n self.num_classes = num_classes\n self.depths = depths\n self.num_layers = len(depths)\n self.mlp_ratio = mlp_ratio\n\n activation = nn.GELU\n\n self.patch_embed = PatchEmbed(in_chans=in_chans,\n embed_dim=embed_dims[0],\n resolution=img_size,\n activation=activation)\n\n patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = patches_resolution\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate,\n sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n kwargs = dict(dim=embed_dims[i_layer],\n input_resolution=(patches_resolution[0] // (2 ** (i_layer-1 if i_layer == 3 else i_layer)),\n patches_resolution[1] // (2 ** (i_layer-1 if i_layer == 3 else i_layer))),\n # input_resolution=(patches_resolution[0] // (2 ** i_layer),\n # patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n downsample=PatchMerging if (\n i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint,\n out_dim=embed_dims[min(\n i_layer + 1, len(embed_dims) - 1)],\n activation=activation,\n )\n if i_layer == 0:\n layer = ConvLayer(\n conv_expand_ratio=mbconv_expand_ratio,\n **kwargs,\n )\n else:\n layer = BasicLayer(\n num_heads=num_heads[i_layer],\n window_size=window_sizes[i_layer],\n mlp_ratio=self.mlp_ratio,\n drop=drop_rate,\n local_conv_size=local_conv_size,\n **kwargs)\n self.layers.append(layer)\n\n # Classifier head\n self.norm_head = nn.LayerNorm(embed_dims[-1])\n self.head = nn.Linear(\n embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity()\n\n # init weights\n self.apply(self._init_weights)\n self.set_layer_lr_decay(layer_lr_decay)\n self.neck = nn.Sequential(\n nn.Conv2d(\n embed_dims[-1],\n 256,\n kernel_size=1,\n bias=False,\n ),\n LayerNorm2d(256),\n nn.Conv2d(\n 256,\n 256,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n LayerNorm2d(256),\n )\n def set_layer_lr_decay(self, layer_lr_decay):\n decay_rate = layer_lr_decay\n\n # layers -> blocks (depth)\n depth = sum(self.depths)\n lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)]\n #print(\"LR SCALES:\", lr_scales)\n\n def _set_lr_scale(m, scale):\n for p in m.parameters():\n p.lr_scale = scale\n\n self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0]))\n i = 0\n for layer in self.layers:\n for block in layer.blocks:\n block.apply(lambda x: _set_lr_scale(x, lr_scales[i]))\n i += 1\n if layer.downsample is not None:\n layer.downsample.apply(\n lambda x: _set_lr_scale(x, lr_scales[i - 1]))\n assert i == depth\n for m in [self.norm_head, self.head]:\n m.apply(lambda x: _set_lr_scale(x, lr_scales[-1]))\n\n for k, p in self.named_parameters():\n p.param_name = k\n\n def _check_lr_scale(m):\n for p in m.parameters():\n assert hasattr(p, 'lr_scale'), p.param_name\n\n self.apply(_check_lr_scale)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'attention_biases'}\n\n def forward_features(self, x):\n # x: (N, C, H, W)\n x = self.patch_embed(x)\n\n x = self.layers[0](x)\n start_i = 1\n\n for i in range(start_i, len(self.layers)):\n layer = self.layers[i]\n x = layer(x)\n B,_,C=x.size()\n x = x.view(B, 64, 64, C)\n x=x.permute(0, 3, 1, 2)\n x=self.neck(x)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n #x = self.norm_head(x)\n #x = self.head(x)\n return x"
}
] | import torch
from functools import partial
from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer, TinyViT | 8,245 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
build_sam = build_sam_vit_h
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
def build_sam_vit_t(checkpoint=None):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
build_sam = build_sam_vit_h
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
def build_sam_vit_t(checkpoint=None):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size | mobile_sam = Sam( | 0 | 2023-11-20 13:09:44+00:00 | 12k |
wangermeng2021/llm-webui | main.py | [
{
"identifier": "login_huggingface",
"path": "src/utils/common.py",
"snippet": "def login_huggingface(token,base_model_name_dropdown):\n if base_model_name_dropdown.lower().find(\"llama\") >= 0:\n if token:\n HUGGINGFACE_HUB_TOKEN = token\n print(\"d1:\",HUGGINGFACE_HUB_TOKEN)\n else:\n env_file_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),\"token.env\")\n load_dotenv(env_file_path)\n HUGGINGFACE_HUB_TOKEN = os.getenv('HUGGINGFACE_HUB_TOKEN')\n print(\"d2:\", HUGGINGFACE_HUB_TOKEN)\n login(token=HUGGINGFACE_HUB_TOKEN)\n os.environ[\"HUGGING_FACE_HUB_TOKEN\"] = HUGGINGFACE_HUB_TOKEN"
},
{
"identifier": "HuggingfaceInference",
"path": "src/finetune/huggingface_inference.py",
"snippet": "class HuggingfaceInference(Inference):\n def __init__(self,model_path,max_new_tokens=256,temperature=0.7 ,top_p=0.95 ,top_k=1,repetition_penalty=1.15,using_4bit_quantization=True,low_cpu_mem_usage=False):\n self.model = None\n self.tokenizer = None\n self.hg_model = None\n self.model_path = model_path\n self.max_new_tokens = max_new_tokens\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.repetition_penalty = repetition_penalty\n self.prompt_template = PromptTemplate.from_template(\n \"{question}\"\n )\n self.bnb_config = None\n if using_4bit_quantization:\n self.bnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n )\n self.low_cpu_mem_usage = low_cpu_mem_usage\n def load_model(self):\n try:\n \n if self.model_path.split(os.sep)[-1].rfind(\"llama\") >=0:\n self.tokenizer = LlamaTokenizer.from_pretrained(self.model_path)\n if self.bnb_config:\n self.hg_model = LlamaForCausalLM.from_pretrained(self.model_path, device_map={\"\":0},quantization_config=self.bnb_config,torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.hg_model = LlamaForCausalLM.from_pretrained(self.model_path, device_map={\"\": 0},torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)\n if self.bnb_config:\n self.hg_model = AutoModelForCausalLM.from_pretrained(self.model_path, device_map={\"\":0},quantization_config=self.bnb_config,torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.hg_model = AutoModelForCausalLM.from_pretrained(self.model_path, device_map={\"\": 0},torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n if not self.tokenizer.pad_token:\n if self.model_path.split(os.sep)[-1].lower().rfind(\"gpt2\")>=0:\n self.tokenizer.pad_token = self.tokenizer.eos_token\n else:\n self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})\n self.hg_model.resize_token_embeddings(len(self.tokenizer))\n\n except Exception as e:\n return -1, e\n self.model = pipeline(\n \"text-generation\",\n model=self.hg_model,\n tokenizer=self.tokenizer,\n max_new_tokens = self.max_new_tokens,\n temperature=self.temperature,\n top_p=self.top_p,top_k=self.top_k,do_sample=True,\n return_full_text=False,\n repetition_penalty=self.repetition_penalty,\n # return_dict_in_generate = True\n )\n return 0, \"\"\n def infer(self ,input):\n output = self.model(input)\n return output[0]['generated_text'] if output else None\n def free_memory(self):\n if self.hg_model:\n del self.hg_model\n self.hg_model = None\n if self.tokenizer:\n del self.tokenizer\n self.tokenizer = None\n if self.model:\n del self.model\n self.model = None"
},
{
"identifier": "LlamaCppInference",
"path": "src/finetune/llama_cpp_inference.py",
"snippet": "class LlamaCppInference(Inference):\n def __init__(self,model_path,max_new_tokens=256,temperature=0.7 ,top_p=0.95 ,top_k=1,repetition_penalty=1.15,n_gpu_layers=35, n_ctx=4048,verbose=False):\n self.model_path = model_path\n self.max_new_tokens = max_new_tokens\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.repetition_penalty = repetition_penalty\n self.prefix1 = \"\"\n self.prefix2 = \"\"\n self.model = None\n\n def load_model(self):\n load_model_status = 0\n msg = None\n try:\n self.model = LlamaCpp(model_path=self.model_path, n_gpu_layers=35, n_ctx=4096,max_tokens=self.max_new_tokens, temperature=self.temperature,\n verbose=False, top_k=self.top_k, top_p=self.top_p,repeat_penalty=self.repetition_penalty)\n except Exception as e:\n load_model_status = -1\n msg = e\n return load_model_status, msg\n def infer(self ,input):\n return self.model(input)\n\n\n def free_memory(self):\n if self.model:\n del self.model\n self.model = None"
},
{
"identifier": "QAWithRAG",
"path": "src/rag/qa_with_rag.py",
"snippet": "class QAWithRAG():\n def __init__(self ,config: dict ={}):\n self.text_splitter = None\n self.embedding_function = None\n self.vectorstore = None\n self.retriever = None\n self.chat_llm = None\n\n self.chat_history =[]\n # self.persist_directory = \"./chroma_db\"\n self.persist_directory = None\n self.qa = None\n self.langchain_llm = None\n def free_memory(self):\n if self.chat_llm:\n self.chat_llm.free_memory()\n del self.chat_llm\n self.chat_llm = None\n if self.langchain_llm:\n del self.langchain_llm\n self.langchain_llm = None\n if self.qa:\n del self.qa\n self.qa = None\n\n\n def get_text_splitter(self ,chunk_size ,chunk_overlap ,separators):\n self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap, length_function=len,\n separators=separators)\n def load_embedding_model(self ,model_path=\"\"):\n self.embedding_function = HuggingFaceEmbeddings(model_name=model_path ,model_kwargs = {'device': 'cpu'})\n def load_chat_model(self ,model_path,using_4bit_quantization,low_cpu_mem_usage,\n max_new_tokens, temperature, top_k, top_p, repeat_penalty\n ):\n self.set_prompt_template(model_path)\n load_model_status = 0\n if model_path.split('.')[-1] == \"gguf\":\n self.chat_llm = LlamaCppInference(model_path=model_path, max_new_tokens=max_new_tokens, temperature=temperature,\n top_k=top_k, top_p=top_p, repetition_penalty=repeat_penalty)\n load_model_status, msg = self.chat_llm.load_model()\n self.langchain_llm = self.chat_llm.model\n else:\n self.chat_llm = HuggingfaceInference(model_path, max_new_tokens, temperature, top_p, top_k, repeat_penalty, using_4bit_quantization,low_cpu_mem_usage)\n load_model_status, msg = self.chat_llm.load_model()\n self.langchain_llm = HuggingFacePipeline(pipeline=self.chat_llm.model)\n\n return load_model_status, msg\n\n #\n def get_document_data(self ,doc_path):\n self.chat_history = []\n self.chat_history.clear()\n self.doc_ext = doc_path.split('.')[-1]\n if self.doc_ext == \"txt\":\n loader = TextLoader(doc_path, encoding='utf8')\n elif self.doc_ext == \"pdf\":\n loader = PyPDFLoader(doc_path)\n elif self.doc_ext == \"docx\":\n loader = Docx2txtLoader(doc_path)\n else:\n raise ValueError(f\"Unsupported format: {self.doc_ext}\")\n data = loader.load()\n return data\n def add_document_to_vector_store(self, doc_path ,search_top_k ,search_score_threshold):\n data = self.get_document_data(doc_path)\n data = self.text_splitter.split_documents(data)\n try:\n self.vectorstore = Chroma.from_documents(data, self.embedding_function\n ,collection_metadata={\"hnsw:space\": \"cosine\"}\n ,persist_directory=self.persist_directory)\n # self.vectorstore = FAISS.from_documents(data, self.embedding_function) \n except InvalidDimensionException:\n Chroma().delete_collection()\n self.vectorstore = Chroma.from_documents(data, self.embedding_function\n ,collection_metadata={\"hnsw:space\": \"cosine\"}\n ,persist_directory=self.persist_directory)\n # self.vectorstore = FAISS.from_documents(data, self.embedding_function) \n self.set_retriever(search_top_k ,search_score_threshold)\n\n def set_retriever(self ,search_top_k ,score_threshold):\n self.retriever = self.vectorstore.as_retriever(search_type='similarity_score_threshold',\n search_kwargs={'k': search_top_k, \"score_threshold\": score_threshold})\n def set_prompt_template(self ,chat_model_path):\n\n if chat_model_path.lower().find(\"mistral\") >= 0 and chat_model_path.lower().find(\"instruct\") >= 0:\n prompt_template = \"\"\"<s>[INST] Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: [/INST]\"\"\"\n elif chat_model_path.lower().find(\"llama\") >= 0 and chat_model_path.lower().find(\"chat\") >= 0:\n prompt_template = \"\"\"<s>[INST] Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: [/INST]\"\"\"\n elif chat_model_path.lower().find(\"zephyr\") >= 0:\n prompt_template = \"\"\"<|user|>\\n Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: </s><|assistant|>\\n\"\"\"\n else:\n prompt_template = \"\"\"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer:\"\"\"\n\n self.prompt_template = PromptTemplate(\n template=prompt_template, input_variables=[\"context\", \"question\"]\n )\n def generate(self, question):\n self.chat_history = []\n if self.retriever:\n\n chain_type_kwargs = {\"prompt\": self.prompt_template ,\"verbose\": False}\n self.qa = RetrievalQA.from_chain_type(llm=self.langchain_llm, chain_type=\"stuff\", retriever=self.retriever,\n return_source_documents=True,\n chain_type_kwargs=chain_type_kwargs)\n result = self.qa({\"query\": question}, return_only_outputs=True)\n retrieved_txt_list = []\n if len(result['source_documents'] ) >0:\n if self.doc_ext == \"txt\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n elif self.doc_ext == \"pdf\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n elif self.doc_ext == \"docx\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n answer = result['result']\n else:\n answer = \"Sorry, I can't find any relevant information in document. \" + result['result']\n return answer, retrieved_txt_list\n else:\n return \"\", retrieved_txt_list"
},
{
"identifier": "read_yaml",
"path": "src/utils/common.py",
"snippet": "def read_yaml(yaml_path):\n with open(yaml_path) as f1:\n try:\n data = yaml.safe_load(f1)\n return data\n except yaml.YAMLError as e:\n raise ValueError(f'Error loading yaml file: {e}')"
},
{
"identifier": "get_first_row_from_dataset",
"path": "src/utils/common.py",
"snippet": "def get_first_row_from_dataset(dataset_path):\n if os.path.exists(os.path.join(dataset_path, \"dataset_dict.json\")):\n dataset = datasets.load_from_disk(dataset_path)\n elif os.path.exists(os.path.join(dataset_path, \"dataset_infos.json\")):\n dataset = datasets.load_dataset(dataset_path)\n elif os.path.exists(os.path.join(dataset_path, \"dataset_info.json\")):\n dataset = datasets.load_from_disk(dataset_path)\n else:\n raise ValueError(\n f'Invalid Dataset format {dataset_path}.')\n try:\n split_list = list(dataset.keys())\n except:\n split_list = [\"train\"]\n new_split_list= [\"\",\"\",\"\"]\n for split in split_list:\n if split.find(\"train\") >= 0:\n new_split_list[0] = split\n elif split.find(\"val\") >= 0:\n new_split_list[1] = split\n elif split.find(\"test\") >= 0:\n new_split_list[2] = split\n\n return dataset[new_split_list[0]][0],new_split_list"
},
{
"identifier": "get_runs_model_names_from_dir",
"path": "src/utils/common.py",
"snippet": "def get_runs_model_names_from_dir(root_dir):\n\n run_names = os.listdir(root_dir)\n run_names.sort(key=lambda file: os.path.getmtime(os.path.join(root_dir, file)),reverse=True)\n runs_output_model = []\n for run_name in run_names:\n run_name_dir = os.path.join(root_dir, run_name)\n run_output_model = os.path.join(run_name_dir, \"output_model\")\n if os.path.exists(run_output_model):\n run_output_model_names = os.listdir(run_output_model)\n for run_output_model_name in run_output_model_names:\n model_bin_path = os.path.exists(\n os.path.join(root_dir,\n run_name, \"output_model\", run_output_model_name, \"ori\",\n \"pytorch_model.bin\"))\n if run_output_model_name.find(\"merged_\") >= 0 and model_bin_path:\n runs_output_model.append(os.path.join(run_name, \"output_model\", run_output_model_name, \"ori\"))\n return runs_output_model"
},
{
"identifier": "get_hg_model_names_from_dir",
"path": "src/utils/common.py",
"snippet": "def get_hg_model_names_from_dir(root_dir):\n model_names = os.listdir(root_dir)\n model_names.sort(key=lambda file: os.path.getmtime(os.path.join(root_dir, file)),reverse=True)\n return model_names"
},
{
"identifier": "get_hg_model_names_and_gguf_from_dir",
"path": "src/utils/common.py",
"snippet": "def get_hg_model_names_and_gguf_from_dir(hg_model_root_dir,runs_model_root_dir):\n output = []\n runs_gguf_files = glob.glob(os.path.join(runs_model_root_dir,\"**\",\"**\",\"**\",\"**\",\"*.gguf\"),recursive=False)\n root_model_gguf_files = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"*.gguf\"),recursive=False)\n root_model_gguf_files1 = glob.glob(os.path.join(hg_model_root_dir, \"**\",\"**\", \"*.gguf\"), recursive=False)\n root_model_hg_dir0 = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"config.json\"),recursive=False)\n root_model_hg_dir1 = glob.glob(os.path.join(hg_model_root_dir, \"**\",\"**\", \"config.json\"), recursive=False)\n runs_hg_dir = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"**\",\"**\",\"**\",\"config.json\"),recursive=False)\n runs_gguf_files.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_gguf_files.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_gguf_files1.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_hg_dir0.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_hg_dir1.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n runs_hg_dir.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n\n for file in runs_gguf_files:\n file_pos = file.find(\"runs\")\n output.append(file[file_pos:])\n for file in root_model_gguf_files:\n output.append(file[file.find(\"models\")+len(\"models\")+1:])\n for file in root_model_gguf_files1:\n output.append(file[file.find(\"models\")+len(\"models\")+1:])\n for file in root_model_hg_dir0:\n file_pos1 = file.find(\"models\")\n file_pos2 = file.find(\"config.json\")\n output.append(file[file_pos1+len(\"models\")+1:file_pos2-1])\n for file in root_model_hg_dir1:\n file_pos1 = file.find(\"models\")\n file_pos2 = file.find(\"config.json\")\n output.append(file[file_pos1+len(\"models\")+1:file_pos2-1])\n for file in runs_hg_dir:\n file_pos = file.find(\"runs\")+len(\"runs\")+1\n output.append(file[file_pos:])\n return output"
},
{
"identifier": "validate_model_path",
"path": "src/utils/common.py",
"snippet": "def validate_model_path(model_name):\n if not model_name:\n return False,\"\"\n home_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n base_model_config_path1 = os.path.join(home_dir, \"models\", model_name)\n base_model_config_path2 = os.path.join(base_model_config_path1, \"config.json\")\n run_model_config_path1 = os.path.join(home_dir, \"runs\", model_name)\n run_model_config_path2 = os.path.join(run_model_config_path1, \"config.json\")\n if os.path.exists(base_model_config_path1) and base_model_config_path1.endswith(\".gguf\"):\n return True,base_model_config_path1\n if os.path.exists(run_model_config_path1) and run_model_config_path1.endswith(\".gguf\") :\n return True,run_model_config_path1\n if os.path.exists(base_model_config_path2):\n return True,base_model_config_path1\n if os.path.exists(run_model_config_path2):\n return True,run_model_config_path1\n return False,\"\""
},
{
"identifier": "get_runs_models",
"path": "src/utils/common.py",
"snippet": "def get_runs_models():\n training_runs_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'runs')\n run_names = os.listdir(training_runs_dir)\n run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file)))\n runs_output_model = []\n for run_name in run_names:\n run_name_dir = os.path.join(training_runs_dir, run_name)\n run_output_model = os.path.join(run_name_dir, \"output_model\")\n if os.path.exists(run_output_model):\n run_output_model_names = os.listdir(run_output_model)\n for run_output_model_name in run_output_model_names:\n if run_output_model_name.find(\"merged_\") >= 0:\n runs_output_model.append(os.path.join(run_name, \"output_model\", run_output_model_name, \"ori\"))\n runs_output_model = runs_output_model[::-1]\n return runs_output_model"
},
{
"identifier": "get_model_type",
"path": "src/utils/chat_prompts.py",
"snippet": "def get_model_type(model_path):\n if model_path:\n if model_path.lower().find(\"mistral\") >= 0 and model_path.lower().find(\"instruct\") >= 0:\n model_type = \"mistral\"\n elif model_path.lower().find(\"llama\") >= 0 and model_path.lower().find(\"chat\") >= 0:\n model_type = \"llama2\"\n elif model_path.lower().find(\"zephyr\") >= 0:\n model_type = \"zephyr\"\n else:\n model_type = \"other model\"\n else:\n model_type = \"other model\"\n return model_type"
},
{
"identifier": "get_chat_history_prompt",
"path": "src/utils/chat_prompts.py",
"snippet": "def get_chat_history_prompt(chat_history,model_type=\"llama2\"):\n if model_type == \"other model\":\n prompt = ','.join(chat_history[:-2])\n prompt = prompt + chat_history[-2]\n elif model_type == \"llama2\":\n prompt = format_chat_history_prompt_for_llama2_7b_chat(chat_history)\n elif model_type == \"zephyr\":\n prompt = format_chat_history_prompt_for_zephyr_7b_instruct(chat_history)\n elif model_type == \"mistral\":\n prompt = format_chat_history_prompt_for_mistral_7b_instruct(chat_history)\n return prompt"
},
{
"identifier": "get_model_prompt_template",
"path": "src/utils/chat_prompts.py",
"snippet": "def get_model_prompt_template(model_type=\"llama2\"):\n if model_type == \"other model\":\n prompt_template = PromptTemplate.from_template(\n \"{question}\"\n )\n elif model_type == \"llama2\":\n prompt_template = PromptTemplate.from_template(\n \"<s>[INST] {question} [/INST]\"\n )\n elif model_type == \"zephyr\":\n prompt_template = PromptTemplate.from_template(\n \"<|user|>\\n{question}</s><|assistant|>\\n\"\n )\n elif model_type == \"mistral\":\n prompt_template = PromptTemplate.from_template(\n \"<s>[INST] {question} [/INST]\"\n )\n return prompt_template"
},
{
"identifier": "download_model",
"path": "src/utils/download_model.py",
"snippet": "class ModelDownloader:\n def __init__(self, max_retries=5):\n def sanitize_model_and_branch_names(self, model, branch):\n def get_download_links_from_huggingface(self, model, branch, text_only=False, specific_file=None):\n def get_output_folder(self, model, branch, is_lora, is_llamacpp=False, base_folder=None):\n def get_single_file(self, url, output_folder, start_from_scratch=False):\n def start_download_threads(self, file_list, output_folder, start_from_scratch=False, threads=4):\n def download_model_files(self, model, branch, links, sha256, output_folder, progress_bar=None, start_from_scratch=False, threads=1, specific_file=None, is_llamacpp=False):\n def check_model_files(self, model, branch, links, sha256, output_folder):"
},
{
"identifier": "QloraTrainer",
"path": "src/finetune/qlora_trainer.py",
"snippet": "class QloraTrainer(PeftTrainer):\n\n def __init__(self, config: dict):\n self.config = config\n self.tokenizer = None\n self.base_model = None\n self.merged_model = None\n self.dataset = None\n self.fused_model = None\n self.train_dataset = None\n self.val_dataset = None\n self.logging_callback = self.LoggingCallbacks()\n print(\"config:\",config)\n def load_dataset(self):\n if self.config[\"dataset\"][\"hg_dataset_dir\"]:\n if os.path.exists(os.path.join(self.config[\"dataset\"][\"hg_dataset_dir\"],\"dataset_infos.json\")):\n if self.config[\"dataset\"][\"hg_train_dataset\"]:\n self.train_dataset= datasets.load_dataset(self.config[\"dataset\"][\"hg_dataset_dir\"],split=self.config[\"dataset\"][\"hg_train_dataset\"])\n if self.config[\"dataset\"][\"hg_val_dataset\"]:\n self.val_dataset = datasets.load_dataset(self.config[\"dataset\"][\"hg_dataset_dir\"],split=self.config[\"dataset\"][\"hg_val_dataset\"])\n elif os.path.exists(os.path.join(self.config[\"dataset\"][\"hg_dataset_dir\"],\"dataset_dict.json\")):\n if self.config[\"dataset\"][\"hg_train_dataset\"]:\n self.train_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"hg_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"hg_train_dataset\"])\n if self.config[\"dataset\"][\"hg_val_dataset\"]:\n self.val_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"hg_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"hg_val_dataset\"])\n else:\n raise ValueError(\n f'Invalid Dataset format {self.config[\"dataset\"][\"hg_dataset_dir\"]}.')\n else:\n\n if self.config[\"dataset\"][\"local_dataset_dir\"]:\n if os.path.exists(os.path.join(self.config[\"dataset\"][\"local_dataset_dir\"], \"dataset_infos.json\")):\n if self.config[\"dataset\"][\"local_train_set\"]:\n self.train_dataset = datasets.load_dataset(self.config[\"dataset\"][\"local_dataset_dir\"],\n split=self.config[\"dataset\"][\"local_train_set\"])\n if self.config[\"dataset\"][\"local_val_set\"]:\n self.val_dataset = datasets.load_dataset(self.config[\"dataset\"][\"local_dataset_dir\"],\n split=self.config[\"dataset\"][\"local_val_set\"])\n elif os.path.exists(os.path.join(self.config[\"dataset\"][\"local_dataset_dir\"], \"dataset_dict.json\")):\n if self.config[\"dataset\"][\"local_train_set\"]:\n self.train_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"local_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"local_train_set\"])\n if self.config[\"dataset\"][\"local_val_set\"]:\n self.val_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"local_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"local_val_set\"])\n else:\n raise ValueError(\n f'Invalid Dataset format {self.config[\"dataset\"][\"local_dataset_dir\"]}.')\n\n\n if self.config[\"dataset\"][\"max_length\"] == \"Model Max Length\":\n\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\") >= 0:\n context_window = 1024*4\n elif self.config[\"model\"][\"base_model_name\"].rfind(\"mistral\") >= 0:\n context_window = 1024*4\n elif self.config[\"model\"][\"base_model_name\"].rfind(\"zephyr\") >= 0:\n context_window = 1024*4\n else:\n context_window = self.tokenizer.model_max_length\n if self.tokenizer.model_max_length == int(1e30):\n context_window = 1024\n else:\n context_window = self.config[\"dataset\"][\"max_length\"]\n print(\"context_window:\",context_window)\n self.train_dataset = self.train_dataset.map(lambda sample: self.tokenizer(\n self.generate_prompt(\n sample,\n self.tokenizer.eos_token),\n max_length=context_window,\n truncation=True,\n # padding=True\n ))\n if self.val_dataset:\n self.val_dataset = self.val_dataset.map(lambda sample: self.tokenizer(\n self.generate_prompt(\n sample,\n self.tokenizer.eos_token),\n max_length=context_window,\n truncation=True,\n padding=True\n ))\n def generate_prompt(self,sample,eos_token):\n\n prompt = self.config[\"dataset\"][\"prefix1\"]+sample[self.config[\"dataset\"][\"datatset_col1\"]]+\\\n self.config[\"dataset\"][\"prefix2\"] + sample[self.config[\"dataset\"][\"datatset_col2\"]]+eos_token\n # print(\"prompt:\",prompt)\n return prompt\n\n def load_model(self):\n\n if self.config[\"model\"][\"fine_tuning_type\"] == \"QLoRA\":\n bnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n )\n elif self.config[\"model\"][\"fine_tuning_type\"] == \"LoRA\":\n bnb_config = None\n try:\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\")>=0:\n self.tokenizer = LlamaTokenizer.from_pretrained(self.config[\"model\"][\"base_model_path\"])\n self.base_model = LlamaForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], quantization_config=bnb_config, device_map={\"\":0},trust_remote_code=True)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(self.config[\"model\"][\"base_model_path\"])\n self.base_model = AutoModelForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], quantization_config=bnb_config, device_map={\"\":0},trust_remote_code=True)\n except Exception as e:\n return -1,e\n if not self.tokenizer.pad_token:\n self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})\n self.base_model.resize_token_embeddings(len(self.tokenizer))\n if self.config[\"training\"][\"gradient_checkpointing\"] and not self.config[\"model\"][\"base_model_name\"].rfind(\"phi\")>=0:\n # self.base_model.gradient_checkpointing_enable()\n self.base_model = prepare_model_for_kbit_training(self.base_model,use_gradient_checkpointing=True,gradient_checkpointing_kwargs={'use_reentrant':False})\n else:\n self.base_model = prepare_model_for_kbit_training(self.base_model, use_gradient_checkpointing=False,gradient_checkpointing_kwargs={'use_reentrant':False})\n if self.config[\"model\"][\"base_model_name\"].lower().rfind(\"llama\")>=0 or \\\n self.config[\"model\"][\"base_model_name\"].lower().rfind(\"mistral\") >= 0 or \\\n self.config[\"model\"][\"base_model_name\"].lower().rfind(\"zephyr\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"llama\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"falcon\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"falcon\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"gpt2\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"gpt2\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"phi\") >= 0:\n target_modules = [\"Wqkv\", \"out_proj\"]\n task_type = \"CAUSAL_LM\"\n else:\n raise ValueError(f'{self.config[\"model\"][\"base_model_name\"]} is not yet supported.')\n #T5,bart, task_type = \"SEQ_2_SEQ_LM\" ,AutoModelForSeq2SeqLM\n \n lora_config = LoraConfig(\n r=self.config[\"model\"][\"lora_r\"],\n lora_alpha=self.config[\"model\"][\"lora_alpha\"],\n target_modules=target_modules,\n lora_dropout=self.config[\"model\"][\"lora_dropout\"],\n bias=self.config[\"model\"][\"lora_bias\"],\n task_type=task_type,\n )\n self.fused_model = get_peft_model(self.base_model, lora_config)\n # self.fused_model.gradient_checkpointing = True\n return 0,\"\"\n def train(self):\n self.run_name = datetime.now().strftime(\"run_%Y-%m-%d_%H-%M-%S\")\n logging_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name,\"tensorboard\")\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name,\"output_model\", run_output_model_name + \"_adapter\")\n checkpoint_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name)\n self.trainer = transformers.Trainer(\n model=self.fused_model,\n train_dataset=self.train_dataset,\n eval_dataset= self.val_dataset if self.val_dataset else None,\n args=transformers.TrainingArguments(\n per_device_train_batch_size=self.config[\"training\"][\"batch_size\"],\n gradient_accumulation_steps=self.config[\"training\"][\"gradient_accumulation_steps\"],\n warmup_steps=self.config[\"training\"][\"warmup_steps\"],\n num_train_epochs=self.config[\"training\"][\"epochs\"],\n learning_rate=self.config[\"training\"][\"learning_rate\"],\n fp16=True,\n output_dir=checkpoint_dir,\n report_to=\"tensorboard\",\n optim=self.config[\"training\"][\"optimizer\"],\n lr_scheduler_type=self.config[\"training\"][\"lr_scheduler_type\"],\n load_best_model_at_end=True if self.val_dataset else False,\n save_strategy=\"steps\",\n save_steps = self.config[\"training\"][\"eval_steps\"],\n save_total_limit=1,\n evaluation_strategy=\"steps\" if self.val_dataset else \"no\",\n eval_steps=self.config[\"training\"][\"eval_steps\"], # eval interval\n per_device_eval_batch_size=1,\n # eval_steps=10, # eval interval\n logging_steps=100,#self.config[\"training\"][\"eval_steps\"]\n # run_name=self.run_name,\n logging_dir=logging_dir,\n ),\n\n callbacks=[self.logging_callback,transformers.EarlyStoppingCallback(early_stopping_patience=self.config[\"training\"][\"early_stopping_patience\"]) ] if self.config[\"training\"][\"early_stopping_patience\"]>0 else [self.logging_callback],\n data_collator=transformers.DataCollatorForLanguageModeling(self.tokenizer, mlm=False),\n\n )\n\n self.fused_model.config.use_cache = False # silence the warnings. Please re-enable for inference!\n try:\n self.trainer.train()\n except Exception as e:\n return -1,e\n # model_save_path = f\"{self.config['training']['output_dir']}/{self.config['model']['base_model_name']}_adapter\"\n self.trainer.save_model(output_model_dir)\n return 0,\"\"\n def merge_and_save(self):\n\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\")>=0:\n base_model = LlamaForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], device_map=\"cpu\",trust_remote_code=True)\n else:\n base_model = AutoModelForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], device_map=\"cpu\",trust_remote_code=True)\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_adapter_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"], \"runs\", self.run_name, \"output_model\",\n run_output_model_name + \"_adapter\")\n\n model = PeftModel.from_pretrained(base_model, output_adapter_model_dir)\n\n merged_model = model.merge_and_unload()\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_merged_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"], \"runs\", self.run_name, \"output_model\",\"merged_\"+run_output_model_name,\"ori\")\n merged_model.save_pretrained(output_merged_model_dir)\n self.tokenizer.save_pretrained(output_merged_model_dir)\n\n def _print_trainable_parameters(self, model):\n \"\"\"\n Prints the number of trainable parameters in the model.\n \"\"\"\n trainable_params = 0\n all_param = 0\n for _, param in model.named_parameters():\n all_param += param.numel()\n if param.requires_grad:\n trainable_params += param.numel()\n print(\n f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}\"\n )\n\n\n class LoggingCallbacks(transformers.TrainerCallback):\n # current_step = 0\n # max_steps = 0\n\n def on_step_begin(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, **kwargs):\n pass\n\n def on_step_end(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, **kwargs):\n global TRAINING_STATUS\n if TRAINING_STATUS.status == 1:\n control.should_epoch_stop = True\n control.should_training_stop = True\n else:\n self.max_steps = state.max_steps\n self.current_step = state.global_step\n\n def on_log(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, logs, **kwargs):\n pass\n\n def free_memroy(self):\n try:\n del self.fused_model\n del self.tokenizer\n del self.base_model\n del self.trainer\n torch.cuda.empty_cache()\n except Exception as e:\n print(\"Free memory error:\",e)"
},
{
"identifier": "TRAINING_STATUS",
"path": "src/finetune/qlora_trainer.py",
"snippet": "TRAINING_STATUS = TrainingStatus()"
},
{
"identifier": "download_model_wrapper",
"path": "src/utils/download_huggingface_repo.py",
"snippet": "def download_model_wrapper(repo_id,local_model_root_dir, specific_file=None, return_links=False, check=False,progress = gr.Progress()):\n if repo_id.endswith(\".gguf\"):\n try:\n model_dir = os.path.join(local_model_root_dir, '/'.join(repo_id.split('/')[0:-1]))\n yield f\"<span style='color:green'> Downloading file {repo_id.split('/')[-1]} to `{model_dir}/...`</span>\"\n hf_hub_download(repo_id='/'.join(repo_id.split('/')[0:-1]), filename=repo_id.split('/')[-1], local_dir=model_dir, resume_download=True,\n force_download=False)\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')\n yield \"<span style='color:green'> Download successful!</span>\"\n else:\n if repo_id == \"\" or repo_id == \"None\":\n # return gr.update(value=\"Model's name is empty!\",visible=True)\n yield f\"Model's name is empty!\"\n else:\n model_dir = os.path.join(local_model_root_dir, repo_id)\n\n model_config_path = os.path.join(model_dir, \"config.json\")\n model_config_path1 = os.path.join(model_dir, \"pytorch_model.bin\")\n model_config_path2 = os.path.join(model_dir, \"model.safetensors\")\n if os.path.exists(model_config_path1) or os.path.exists(model_config_path2):\n yield '<span style=\"color:green\"> This model has already been downloaded.</span>'\n else:\n\n try:\n progress(0.0)\n # download_model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\"download-model.py\")\n # downloader = importlib.import_module(download_model_path).ModelDownloader()\n downloader = download_model.ModelDownloader()\n model, branch = downloader.sanitize_model_and_branch_names(repo_id, None)\n yield (\"Getting the download links from Hugging Face\")\n links, sha256, is_lora, is_llamacpp, link_file_size_list = downloader.get_download_links_from_huggingface(model,\n branch,\n text_only=False,\n specific_file=specific_file\n )\n if return_links:\n yield '\\n\\n'.join([f\"`{Path(link).name}`\" for link in links])\n yield (\"Getting the output folder\")\n # base_folder = shared.args.lora_dir if is_lora else shared.args.model_dir\n base_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"models\")\n output_folder = downloader.get_output_folder(model, branch, is_lora, is_llamacpp=is_llamacpp,\n base_folder=base_folder)\n link_file_size_list = np.array(link_file_size_list)\n links = np.array(links)\n sorted_index = np.argsort(link_file_size_list)\n link_file_size_list = link_file_size_list[sorted_index]\n links = links[sorted_index]\n total_file_size = sum(link_file_size_list)\n copyed_file_size = 0\n for link, link_file_size in zip(links, link_file_size_list):\n model_file_name = link.split('/')[-1]\n if model_file_name.find(\"Pooling\")>=0:\n model_file_name = model_file_name+\"/config.json\"\n # yield (f\"Downloading file {model_file_name} to `{output_folder}/...`\")\n yield f\"<span style='color:green'> Downloading file {model_file_name} to `{output_folder}/...`</span>\"\n hf_hub_download(repo_id=repo_id, filename=model_file_name, local_dir=model_dir, resume_download=True,\n force_download=False)\n copyed_file_size += link_file_size\n progress(copyed_file_size / total_file_size)\n # yield (\"Download successful!\")\n yield \"<span style='color:green'> Download successful!</span>\"\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')"
},
{
"identifier": "download_dataset_wrapper",
"path": "src/utils/download_huggingface_repo.py",
"snippet": "def download_dataset_wrapper(repo_id,local_dataset_root_dir,progress = gr.Progress()):\n repo_id = repo_id.strip()\n if repo_id == \"\":\n yield \"<span style='color:red'> This Dataset's name is empty!</span>\"\n else:\n dataset_dir = os.path.join(local_dataset_root_dir, repo_id)\n # dataset_config_path1 = os.path.join(dataset_dir, \"config.json\")\n dataset_config_path1 = os.path.join(dataset_dir, \"dataset_infos.json\")\n dataset_config_path2 = os.path.join(dataset_dir, \"dataset_dict.json\")\n\n if os.path.exists(dataset_config_path1) or os.path.exists(dataset_config_path2):\n yield \"<span style='color:green'> This Dataset has already been downloaded.</span>\"\n else:\n try:\n\n progress(0.3)\n yield f\"<span style='color:green'> Downloading dataset to `{dataset_dir}/...`</span>\"\n datasets = load_dataset(repo_id)\n progress(0.8)\n yield \"<span style='color:green'> Download successful!</span>\"\n datasets.save_to_disk(dataset_dir)\n # datasets = load_from_disk(\"dddd\")\n yield \"<span style='color:green'> Download successful!</span>\"\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')"
}
] | import pandas as pd
import math
import numpy as np
import gc
import os,requests
import subprocess,threading
import time
import gradio as gr
import os
import traceback
import numpy as np
import glob
import shutil
import torch
import socket
from src.utils.common import login_huggingface
from src.finetune.huggingface_inference import HuggingfaceInference
from src.finetune.llama_cpp_inference import LlamaCppInference
from src.rag.qa_with_rag import QAWithRAG
from src.utils.common import read_yaml,get_first_row_from_dataset,\
get_runs_model_names_from_dir,get_hg_model_names_from_dir,get_hg_model_names_and_gguf_from_dir,validate_model_path,get_runs_models
from src.utils.chat_prompts import get_model_type,get_chat_history_prompt,get_model_prompt_template
from transformers.training_args import OptimizerNames
from huggingface_hub import hf_hub_download
from src.utils import download_model
from pathlib import Path
from src.finetune.qlora_trainer import QloraTrainer
from src.finetune.qlora_trainer import TRAINING_STATUS
from src.utils.download_huggingface_repo import download_model_wrapper,download_dataset_wrapper | 10,464 |
# os.environ['HTTP_PROXY'] = 'http://127.0.0.1:8889'
# os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:8889'
LOCAL_HOST_IP = "0.0.0.0"
TENSORBOARD_URL = "http://" + LOCAL_HOST_IP + ":6006/"
INIT_DATASET_NAME = "test_python_code_instructions_5000_rows"
RAG_DATA_LIST_DROPDOWN = ""
TEXT_SPLITTER_DROPDOWN = ""
CHUNK_SIZE_SLIDER = 0
CHUNK_OVERLAP_SLIDER = -1
SEPARATORS_TEXTBOX = ""
EMBEDDING_MODEL_SOURCE_RADIO = ""
HUB_EMBEDDING_MODEL_NAMES_DROPDOWN = ""
LOCAL_EMBEDDING_MODEL_NAMES_DROPDOWN = ""
CHAT_MODEL_SOURCE_RADIO = ""
HUB_CHAT_MODEL_NAMES_DROPDOWN = ""
LOCAL_CHAT_MODEL_NAMES_DROPDOWN = ""
SEARCH_TOP_K_SLIDER = ""
SEARCH_SCORE_THRESHOLD_SLIDER = ""
training_ret_val = -1
error_msg = ""
current_running_model_name = ""
infer_model = None
stop_generation_status = False
chatbot_history=[]
chatbot_height = 500
rag_chatbot_history=[]
rag_stop_generation_status = False
|
# os.environ['HTTP_PROXY'] = 'http://127.0.0.1:8889'
# os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:8889'
LOCAL_HOST_IP = "0.0.0.0"
TENSORBOARD_URL = "http://" + LOCAL_HOST_IP + ":6006/"
INIT_DATASET_NAME = "test_python_code_instructions_5000_rows"
RAG_DATA_LIST_DROPDOWN = ""
TEXT_SPLITTER_DROPDOWN = ""
CHUNK_SIZE_SLIDER = 0
CHUNK_OVERLAP_SLIDER = -1
SEPARATORS_TEXTBOX = ""
EMBEDDING_MODEL_SOURCE_RADIO = ""
HUB_EMBEDDING_MODEL_NAMES_DROPDOWN = ""
LOCAL_EMBEDDING_MODEL_NAMES_DROPDOWN = ""
CHAT_MODEL_SOURCE_RADIO = ""
HUB_CHAT_MODEL_NAMES_DROPDOWN = ""
LOCAL_CHAT_MODEL_NAMES_DROPDOWN = ""
SEARCH_TOP_K_SLIDER = ""
SEARCH_SCORE_THRESHOLD_SLIDER = ""
training_ret_val = -1
error_msg = ""
current_running_model_name = ""
infer_model = None
stop_generation_status = False
chatbot_history=[]
chatbot_height = 500
rag_chatbot_history=[]
rag_stop_generation_status = False | qa_with_rag = QAWithRAG() | 3 | 2023-11-25 12:37:21+00:00 | 12k |
basf/ARCANA | arcana/procedures/training.py | [
{
"identifier": "logger",
"path": "arcana/logger/logger.py",
"snippet": "APP_LOGGER_NAME = 'ARCANA'\ndef setup_applevel_logger(logger_name = APP_LOGGER_NAME, file_name=None):\ndef get_logger(module_name):"
},
{
"identifier": "train_model",
"path": "arcana/training/train_model.py",
"snippet": "def train_model(self, train_loader, val_loader, val_lengths, trial=None):\n \"\"\"The main function that controls the training process which does the following:\n - initialize the training\n - train the model\n - validate the model\n - calculate the loss for each epoch and add it to the loss_trace\n - print the last losses and scores after every 50 epochs\n - early stopping\n - update the training parameters\n - save the training results and plots\n\n Args:\n train_loader (torch.utils.data.DataLoader): training data loader\n val_loader (torch.utils.data.DataLoader): validation data loader\n val_lengths (list): list of lengths of the validation data\n trial (optuna.trial): optuna trial\n \"\"\"\n log.info(f\"start training with device: {self.device}\")\n\n # initialize the training\n self.initialize_training()\n self.seq2seq.train()\n\n log.info(f\"number of trainable parameters: {self.count_parameters()}\")\n\n # train the model\n for epoch in tqdm(range(self.config.number_of_epochs)):\n # Reset the temp loss trace for each epoch\n self.loss_trace.reset_temp_loss_trace()\n available_seq = self.available_sequence_list[epoch]\n # train the model\n self.train_epoch(epoch, train_loader, available_seq)\n # validate the model\n self.validation_epoch(val_loader, val_lengths, available_seq)\n\n self.loss_trace.calculate_epoch_loss(train_loader, val_loader)\n\n # print the last losses and scores after every 50 epochs\n if (epoch+1) % 20 == 0:\n # Constructing the log message in multiple steps\n epoch_info = f\"Epoch {epoch+1}/{self.config.number_of_epochs}\"\n train_loss_info = f\"train loss: {self.loss_trace.losses['train_loss_epoch'][-1]:.6f}\"\n val_loss_info = f\"val loss: {self.loss_trace.losses['val_loss_epoch'][-1]:.6f}\"\n log_message = f\"{epoch_info} - {train_loss_info} - {val_loss_info}\"\n log.info(log_message)\n\n # early stopping\n should_stop = self.early_stopping_check(train_loss = self.loss_trace.losses['train_loss_epoch'][-1],\n val_loss = self.loss_trace.losses['val_loss_epoch'][-1], epoch=epoch+1)\n\n\n if should_stop:\n log.info(f\"Early stopping after {epoch+1} epochs and no improvements for {self.config.patience} epochs\")\n self.save_training_results_and_plots(epoch = epoch)\n break\n\n if self.learning_rate_type == \"ReduceLROnPlateau\":\n self.scheduler.step(self.loss_trace.losses['val_loss_epoch'][-1])\n\n self.update_training_params(epoch)\n\n # TODO optuna part\n if trial is not None:\n # Add prune mechanism\n trial.report(self.loss_trace.losses[\"val_loss_epoch\"][-1], epoch)\n\n if trial.should_prune():\n raise optuna.exceptions.TrialPruned()\n\n self.save_training_results_and_plots()"
},
{
"identifier": "LossFactory",
"path": "arcana/losses/loss.py",
"snippet": "class LossFactory:\n \"\"\"Factory class for losses.\"\"\"\n @staticmethod\n def create_loss(config):\n \"\"\"Create a loss.\n\n Args:\n config (ModelConfig): model config\n\n Returns:\n torch.nn.Module: loss function\n \"\"\"\n\n if config.loss_type == \"huber\":\n return torch.nn.SmoothL1Loss(beta=config.beta, reduction=config.reduction)#(beta=beta_value, reduction='none')\n if config.loss_type == \"logcosh\":\n return LogCoshLoss()\n if config.loss_type == \"quantile\":\n return QuantileLoss(quantile=config.quantile)#(quantile=0.6)\n if config.loss_type == \"pinball\":\n return PinballLoss()\n if config.loss_type == \"combinedhp\":\n return CombinedHPLoss(delta=config.delta)#(delta=deltavalue)\n if config.loss_type == \"combinedlp\":\n return CombinedLPLoss()\n if config.loss_type == \"rmse\":\n return torch.sqrt(torch.nn.MSELoss() + 1e-6)\n if config.loss_type == \"mse\":\n return torch.nn.MSELoss(reduction=config.reduction)#(reduction='none')\n raise ValueError(\"Invalid loss type\")"
},
{
"identifier": "SchedulerFactory",
"path": "arcana/regularizations/optimizer_scheduler.py",
"snippet": "class SchedulerFactory:\n \"\"\"Factory class for the scheduler\"\"\"\n def __init__(self, optimizer, model_config, len_train_loader = None):\n self.optimizer = optimizer\n self.model_config = model_config\n self.len_train_loader = len_train_loader\n\n def get_scheduler(self, learning_rate_type):\n \"\"\"Get the scheduler\n\n Args:\n learning_rate_type (str): learning rate type\n\n Returns:\n torch.optim: scheduler of the given type\n Raises:\n ValueError: if the learning rate type is unknown\n \"\"\"\n\n if learning_rate_type == \"reduced\":\n return self._reduced_lr_scheduler()\n if learning_rate_type == \"cycle\":\n return self._cyclic_lr_scheduler()\n if learning_rate_type == \"onecycle\":\n return self._one_cycle_lr_scheduler()\n raise ValueError(f\"Unknown learning rate type: {learning_rate_type}\")\n\n def _reduced_lr_scheduler(self):\n \"\"\"Get the reduced learning rate scheduler\n\n Returns:\n torch.optim: reduced learning rate scheduler\n \"\"\"\n return optim.lr_scheduler.ReduceLROnPlateau(\n self.optimizer,\n mode=\"min\",\n factor=self.model_config.factor_reduced,\n patience=8,\n verbose=True,\n )\n\n def _cyclic_lr_scheduler(self):\n \"\"\"Get the cyclic learning rate scheduler\n\n Returns:\n torch.optim: cyclic learning rate scheduler\n \"\"\"\n return optim.lr_scheduler.CyclicLR(\n self.optimizer,\n base_lr=self.model_config.learning_rate / 10,\n max_lr=self.model_config.learning_rate,\n mode=\"triangular2\",\n step_size_up=self.len_train_loader * 10, # FIXME: self.model_config.step_size_up, self.len_train_loader * 5,\n cycle_momentum=False,\n )\n\n def _one_cycle_lr_scheduler(self):\n \"\"\"Get the one cycle learning rate scheduler\n\n Returns:\n torch.optim: one cycle learning rate scheduler\n \"\"\"\n total_steps = self.len_train_loader * self.model_config.number_of_epochs\n return optim.lr_scheduler.OneCycleLR(\n self.optimizer, max_lr=self.model_config.learning_rate, total_steps=total_steps\n )"
},
{
"identifier": "Seq2SeqFactory",
"path": "arcana/models/sequence_to_sequence/seq2seq_factory.py",
"snippet": "class Seq2SeqFactory:\n \"\"\"Factory class for creating Seq2Seq models.\"\"\"\n def __init__(self, config):\n # Device setup\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n if self.device.type == \"cpu\":\n torch.set_default_tensor_type('torch.FloatTensor')\n else:\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n log.info(f\"device: {self.device}\")\n\n self.config = config\n self.seq2seq = None\n\n def create_additive_model(self):\n \"\"\"Create an additive model.\n\n Args:\n config (dict): Dictionary containing the configuration parameters\n\n Returns:\n seq2seq (Seq2Seq): The additive model\n \"\"\"\n encoder = additive_encoder.AdditiveEncoder(self.config).to(self.device)\n log.info(repr(encoder))\n\n decoder = additive_decoder.AdditiveDecoder(self.config).to(self.device)\n log.info(repr(decoder))\n\n self.seq2seq = sequence_to_sequence.Seq2Seq(\n encoder=encoder, decoder=decoder, config = self.config).to(self.device)\n log.info(repr(self.seq2seq))\n\n # TODO is return needed?\n return self.seq2seq\n\n\n def create_multihead_model(self):\n \"\"\"Create a multihead model.\n\n Returns:\n seq2seq (Seq2Seq): The multihead model\n \"\"\"\n encoder = multihead_encoder.MultiheadEncoder(self.config).to(self.device)\n log.info(repr(encoder))\n\n decoder = multihead_decoder.MultiheadDecoder(self.config).to(self.device)\n log.info(repr(decoder))\n\n self.seq2seq = sequence_to_sequence.Seq2Seq(\n encoder=encoder, decoder=decoder, config=self.config).to(self.device)\n log.info(repr(self.seq2seq))\n\n return self.seq2seq\n\n\n def print_weights(self, layer):\n \"\"\"Print the weights of a layer.\n\n Args:\n layer (torch.nn.Module): The layer to print the weights of\n \"\"\"\n if isinstance(layer, torch.nn.LSTM):\n for name, param in layer.named_parameters():\n log.info(f\"name: {name}, param: {param.data}\")\n\n\n def count_parameters(self):\n \"\"\"Count the number of trainable parameters in a model.\n\n Returns:\n num_params (int): The number of trainable parameters\n \"\"\"\n # Get the number of trainable parameters\n return sum(p.numel() for p in self.seq2seq.parameters() if p.requires_grad)"
},
{
"identifier": "ConfigHandler",
"path": "arcana/procedures/config_handler.py",
"snippet": "class ConfigHandler:\n \"\"\"Config handler class\"\"\"\n def __init__(self):\n \"\"\"Initialize the config handler by initializing all the config dataclasses\n and setting the config values\n \"\"\"\n self.general_config = GeneralConfig()\n self.data_config = DataConfig()\n self.procedure_config = ProcedureConfig()\n self.model_config = ModelConfig()\n\n self._read_general_config()\n self._read_data_config()\n self._read_procedure_config()\n self._read_model_config()\n self.model_config.result_path = utils.prepare_folder_structure(self.general_config.test_id)\n self._backup_config(self.model_config.result_path)\n\n def _backup_config(self, result_path):\n \"\"\"Backup the config files\n\n Args:\n result_path (str): result path\n \"\"\"\n config_path = os.path.join(result_path, \"config_files\")\n shutil.copy2(self.general_config.path_to_config, config_path)\n if self.procedure_config.optuna_tuning:\n shutil.copy2(self.model_config.path_to_tuning_config, config_path)\n else:\n shutil.copy2(self.model_config.path_to_config, config_path)\n\n def _read_general_config(self):\n \"\"\"Set the general config\"\"\"\n config = configparser.ConfigParser()\n config.read(self.general_config.path_to_config)\n config.sections()\n config = config['general']\n self.general_config.test_id = self._get_config_string(config[\"test_id\"])\n self.general_config.input_data_folder = self._get_config_string(config[\"input_data_folder\"])\n self.general_config.data_name = self._get_config_string(config[\"data_name\"])\n self.general_config.pretrained_model = self._get_config_string(config[\"pretrained_model\"])\n self.general_config.scaler_model = self._get_config_string(config[\"scaler_model\"])\n\n def get_general_config(self):\n \"\"\"Get the general config\n\n Returns:\n GeneralConfig: general config\n \"\"\"\n return self.general_config\n\n\n def _read_data_config(self):\n config = configparser.ConfigParser()\n config.read(self.general_config.path_to_config)\n config.sections()\n config = config['data']\n self._parse_config_section(self.data_config, config)\n\n def get_data_config(self):\n \"\"\"Get the data config\n\n Returns:\n DataConfig: data config\n \"\"\"\n return self.data_config\n\n\n def _read_procedure_config(self):\n \"\"\"Set the procedure config\"\"\"\n config = configparser.ConfigParser()\n config.read(self.general_config.path_to_config)\n config.sections()\n config = config['procedure']\n self._parse_config_section(self.procedure_config, config)\n\n if self.procedure_config.naive_training and self.procedure_config.optuna_tuning:\n raise ValueError(\"Naive training and optuna tuning cannot be run at the same time.\"\\\n \"Please set one of them to False in the general_parameter.ini file\"\\\n \"and run the program again.\")\n\n def get_procedure_config(self):\n \"\"\"Get the procedure config\n\n Returns:\n ProcedureConfig: procedure config\n \"\"\"\n return self.procedure_config\n\n\n def _read_model_config(self):\n config = configparser.ConfigParser()\n config.read(self.model_config.path_to_config)\n # Loop through each section in the config\n for section in config.sections():\n self._parse_config_section(self.model_config, config[section])\n\n def get_model_config(self):\n \"\"\"Get the model config\n\n Returns:\n ModelConfig: model config\n \"\"\"\n return self.model_config\n\n def _parse_config_section(self, config_class, config_sec):\n \"\"\"Parse the config section by determining the type of the config and assigning the value\n to the corresponding attribute in the dataclass. This works for simple strings, bools, ints, floats and lists.\n It does not work strings with \".\" in them, e.g. \"1.0.0\" or a path to a file. like \"data/test.csv\"\n for this cases the _get_config_string and _get_config_list methods are used.\n Args:\n config_class (class): config class\n config_sec (dict): config section\n \"\"\"\n for key, value in config_sec.items():\n # Handle inline comments\n value = value.split(';')[0].strip()\n # Convert certain types from string\n if value.lower() == 'true':\n value = True\n elif value.lower() == 'false':\n value = False\n elif ('.' in value and (not value.startswith(\"[\")) and ('\\\\' not in value) and ('/' not in value)):\n value = float(value)\n elif value.isdigit():\n value = int(value)\n elif value.startswith(\"[\"):\n value = self._get_config_list(value)\n elif ('e-' in value.lower()) or ('e+' in value.lower()):\n value = float(value)\n else:\n # Remove extra quotes if present\n value = self._get_config_string(value)\n # Assign the value to the corresponding attribute in the dataclass\n setattr(config_class, key, value)\n\n def _get_config_string(self, value):\n \"\"\"Get the config string\n\n Args:\n value (str): value of the config\n\n Returns:\n str: string of the config\n \"\"\"\n if value == \"None\":\n return None\n return value.replace(\"'\", \"\") if value.startswith(\"'\") else value.replace('\"', \"\")\n\n def _get_config_list(self, value):\n \"\"\"Get the config list\n Args:\n value (str): value of the config\n Returns:\n list: list of the config\n \"\"\"\n try:\n return json.loads(value)\n except:\n return eval(value)\n\n\n def _set_new_config_path(self, path, config):\n \"\"\"Set the path to the config file\n This is used during optuna tuning to set the path to the config file with the best parameters\n Args:\n path (str): path to the config file\n config (class): config class which should be set\n \"\"\"\n config.path_to_config = path"
},
{
"identifier": "DataPreparation",
"path": "arcana/processing/data_processing.py",
"snippet": "class DataPreparation:\n \"\"\"Data preparation class\"\"\"\n def __init__(self, general_config, data_config, procedure_config):\n # configurations\n self.general_config = general_config\n self.data_config = data_config\n self.procedure_config = procedure_config\n # original data\n self.df = None\n # data for the model after the splits\n self.padded_train_data = []\n self.padded_val_data = []\n self.padded_test_data = []\n # data lengths for the train, val and test data\n self.train_lengths = []\n self.val_lengths = []\n self.test_lengths = []\n # test data names for the test data after splits\n self.test_data_names = None\n # scaler for the data standardization\n self.model_data_transformation = None\n # original data splits\n self.df_train_original = None\n self.df_val_original = None\n self.df_test_original = None\n # this is just for standardization\n self.df_train_scaled = None\n self.df_val_scaled = None\n self.df_test_scaled = None\n # maximum cycles that we trained with\n self.scaled_cycle_range = None\n\n\n def get_data_for_model(self):\n \"\"\"Get the data for the model\"\"\"\n original_data =\\\n pd.read_csv(os.path.join(self.general_config.input_data_folder,\n self.general_config.data_name))\n test_group = original_data[\"test_name\"].unique().tolist()\n random_sample = random.sample(test_group, self.data_config.number_of_samples)\n data_sample = original_data[original_data[\"test_name\"].isin(random_sample)]\n self.df = data_sample.copy()[self.data_config.data_headers]\n\n\n def data_splits(self, data, ratio):\n \"\"\"Split the data into train, validation and test data\"\"\"\n splitter = GroupShuffleSplit(test_size=(1 - ratio), random_state=1)\n split_outer = splitter.split(data, groups=data[\"test_name\"])\n split_outer_ratio = next(split_outer)\n df_first_split = data.iloc[list(split_outer_ratio[0])]\n df_second_split = data.iloc[list(split_outer_ratio[1])]\n\n return df_first_split, df_second_split\n\n def get_max_available_scaled_cycle(self):\n \"\"\"Get the maximum available scaled cycle\"\"\"\n if self.procedure_config.preprocess_data:\n # NOTE: comment this is in case you want to limit the prediciton cycles to the maximum available cycles (previous training with ARCANA)\n # max_available_cycle = self.model_data_transformation.data_max_[0]\n # Also check arcana/procedures/predicting.py line 66\n # Comment the next line out\n max_available_cycle = self.data_config.maximum_available_cycles\n min_available_cycle = self.model_data_transformation.data_min_[0]\n original_cycle_range = np.arange(min_available_cycle, max_available_cycle + 1)\n self.scaled_cycle_range = (original_cycle_range - min_available_cycle) / (max_available_cycle - min_available_cycle)\n else:\n # get the number of cycles from the self.df dataframe by filtering the test_name\n max_available_cycle = max(self.df[\"cycle\"])\n min_available_cycle = min(self.df[\"cycle\"])\n self.scaled_cycle_range = np.arange(min_available_cycle, max_available_cycle + 1)\n\n if self.procedure_config.predicting:\n if self.data_config.maximum_available_cycles > max_available_cycle:\n log.warning(\"The maximum available cycle is %s. The selected maximum available cycle is %s. \"\n \"This might cause the model to predict the future cycles unreliably. \",\n max_available_cycle, self.data_config.maximum_available_cycles)\n\n\n def standardize_data(self):\n \"\"\"Standardize the data based on the train data\"\"\"\n # standardize the data based on the train data\n if self.procedure_config.preprocess_data:\n\n self.df_train_scaled, self.model_data_transformation =\\\n utils.standardize_dataset(self.df_train_original.iloc[:, 1:])\n self.df_train_scaled.insert(0, \"test_name\", self.df_train_original[\"test_name\"].values)\n # standardize the validation data based on the train data\n self.df_val_scaled = pd.DataFrame(self.model_data_transformation.transform(self.df_val_original.iloc[:, 1:]),\n columns=self.df_val_original.iloc[:, 1:].columns)\n self.df_val_scaled.insert(0, \"test_name\", self.df_val_original[\"test_name\"].values)\n # standardize the test data based on the train data\n self.df_test_scaled = pd.DataFrame(self.model_data_transformation.transform(self.df_test_original.iloc[:, 1:]),\n columns=self.df_test_original.iloc[:, 1:].columns)\n self.df_test_scaled.insert(0, \"test_name\", self.df_test_original[\"test_name\"].values)\n\n else:\n self.df_train_scaled = self.df_train_original.copy()\n self.df_val_scaled = self.df_val_original.copy()\n self.df_test_scaled = self.df_test_original.copy()\n\n\n def tensorized_and_pad(self, data, padded_data, data_lengths):\n \"\"\"Convert the data to tensor and pad them\n\n Args:\n data (pandas dataframe): data to be converted to tensor\n padded_data (list): list of padded data\n data_lengths (list): list of data lengths\n\n Returns:\n padded_data (list): list of padded data\n data_lengths (list): list of data lengths\n \"\"\"\n # create the padded data by grouping the data based on the test name\n for _, data_groups in data.groupby(\"test_name\"):\n grouped_data_values = data_groups.iloc[:, 1:].values\n padded_data.append(torch.tensor(grouped_data_values))\n data_lengths.append(len(grouped_data_values))\n\n # convert the data to tensor and pad them\n padded_data = pad_sequence(padded_data, batch_first=True, padding_value=0)\n # create a tensor from the length of each sequence\n data_lengths = torch.tensor(data_lengths)\n\n return padded_data, data_lengths\n\n\n def pad_the_splits(self, train, val, test):\n \"\"\"Pad the train, validation and test data\n\n Args:\n train (pandas dataframe): train data\n val (pandas dataframe): validation data\n test (pandas dataframe): test data\n\n Returns:\n padded_train (list): list of padded train data\n padded_val (list): list of padded validation data\n padded_test (list): list of padded test data\n \"\"\"\n # pad the train data\n padded_train, self.train_lengths = self.tensorized_and_pad(data=train, padded_data=self.padded_train_data, data_lengths=self.train_lengths)\n # pad the validation data\n padded_val, self.val_lengths = self.tensorized_and_pad(data=val, padded_data=self.padded_val_data, data_lengths=self.val_lengths)\n # pad the test data\n padded_test, self.test_lengths = self.tensorized_and_pad(data=test, padded_data=self.padded_test_data, data_lengths=self.test_lengths)\n return padded_train, padded_val, padded_test\n\n\n def prepare_data_for_model(self):\n \"\"\"Main functions for data preparation\"\"\"\n # main functions for data preparation\n self.df_train_original, second_split = self.data_splits(self.df, self.data_config.train_ratio)\n self.df_val_original, self.df_test_original = self.data_splits(second_split, self.data_config.val_test_ratio)\n self.test_data_names = self.df_test_original[\"test_name\"].unique().tolist()\n # check if the data should be standardized\n self.standardize_data()\n self.get_max_available_scaled_cycle()\n self.padded_train_data, self.padded_val_data, self.padded_test_data =\\\n self.pad_the_splits(self.df_train_scaled, self.df_val_scaled, self.df_test_scaled)\n\n\n def prepare_test_data_for_pretrained_model(self):\n \"\"\"Prepare the test data for the pretrained model. This is used for the finetuning\"\"\"\n #TODO\n # load the data for testing\n self.get_data_for_model()\n # load the scaled model for transforming the test data\n if self.procedure_config.preprocess_data:\n self.model_data_transformation =\\\n joblib.load(self.general_config.scaler_model)\n # get the maximum available scaled cycle\n self.get_max_available_scaled_cycle()\n self.df_test_original = self.df.copy()\n self.test_data_names = self.df_test_original[\"test_name\"].unique().tolist()\n # standardize the test data based on the train data\n if self.procedure_config.preprocess_data:\n self.df_test_scaled =\\\n pd.DataFrame(self.model_data_transformation.transform(self.df_test_original.iloc[:, 1:]),\n columns=self.df_test_original.iloc[:, 1:].columns)\n self.df_test_scaled.insert(0, \"test_name\", self.df_test_original[\"test_name\"].values)\n else:\n self.df_test_scaled = self.df_test_original.copy()\n # pad the test data\n self.padded_test_data, self.test_lengths =\\\n self.tensorized_and_pad(data=self.df_test_scaled,\n padded_data=self.padded_test_data,\n data_lengths=self.test_lengths)"
},
{
"identifier": "utils",
"path": "arcana/utils/utils.py",
"snippet": "def create_dir(directory):\ndef save_plots(path, name: str = None):\ndef standardize_dataset(data: pd.DataFrame) -> pd.DataFrame:\ndef prepare_folder_structure(test_id):\ndef handle_tensor(obj):\ndef prepare_optuna_folder_structure(trial_path):\ndef save_optuna_fig(save_path, plot_type):\ndef save_test_data(model, model_folder, test_data, test_lengths):\ndef pad_array_to_length(arr, target_length):\ndef align_and_truncate_samples(all_predictions, all_target_data_list):"
}
] | import os
import warnings
import json
import pickle
import numpy as np
import torch
from arcana.logger import logger
from arcana.training import train_model
from arcana.losses.loss import LossFactory
from arcana.regularizations.optimizer_scheduler import SchedulerFactory
from arcana.models.sequence_to_sequence.seq2seq_factory import Seq2SeqFactory
from arcana.procedures.config_handler import ConfigHandler
from arcana.processing.data_processing import DataPreparation
from arcana.utils import utils | 7,439 | self.data_preparation = DataPreparation(self.general_config, self.data_config, self.procedure_config)
# initializing the model class
self.device = None
self.set_device()
# initializing the loss class
self.criterion = None
# initializing the optimizer class
self.optimizer = None
self.scheduler = None
# initializing the model class
self.seq2seq_factory = Seq2SeqFactory(self.model_config)
self.seq_2_seq_trainer = None
#self.model = None
self.train_parameters = None
# initializing the loaders
self.train_loader = None
self.val_loader = None
# get the data splits
if self.general_config.pretrained_model:
if self.procedure_config.transfer_learning:
self.data_splits()
if self.procedure_config.predicting and (not self.procedure_config.transfer_learning):
pass
else:
self.data_splits()
# if ((not self.procedure_config.naive_training) and (not self.procedure_config.transfer_learning) and \
# (not self.procedure_config.optuna_tuning) and (self.procedure_config.predicting)):
# self.data_splits()
def set_device(self):
"""Set the device for training the model
"""
# move to GPU if available
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if self.device.type == "cpu":
torch.set_default_tensor_type("torch.FloatTensor")
if not self.device.type == "cpu":
torch.set_default_tensor_type("torch.cuda.FloatTensor")
log.info(f"device: {self.device}")
def data_splits(self):
"""Get the data splits for training, validation and testing
"""
self.data_preparation.get_data_for_model()
self.data_preparation.prepare_data_for_model()
self.save_data_splits()
def save_data_splits(self):
"""Save the data splits
"""
data_path = os. path.join(self.model_config.result_path, "data_splits")
# save the original data splits
for original_data, data_name in zip([self.data_preparation.df_train_original, self.data_preparation.df_val_original,
self.data_preparation.df_test_original], ["train", "val", "test"]):
original_data.to_csv(os.path.join(data_path, f"{data_name}_original.csv"))
if self.procedure_config.preprocess_data:
# save model data transformation
with open(os.path.join(data_path, "model_data_transformation.pkl"), "wb") as f:
pickle.dump(self.data_preparation.model_data_transformation, f)
# save the test_names of the test data
np.save(os.path.join(data_path, "test_names.npy"), self.data_preparation.test_data_names)
# save the processed data splits
for processed_data, processed_name in zip([self.data_preparation.padded_train_data,
self.data_preparation.padded_val_data, self.data_preparation.padded_test_data],
["train", "val", "test"]):
torch.save(processed_data, os.path.join(data_path, f"{processed_name}_processed.pt"))
def loader_initialization(self):
"""Initialize the data loaders
"""
# define the data loaders
self.train_loader = torch.utils.data.DataLoader(self.data_preparation.padded_train_data,
batch_size=self.model_config.batch_size)
self.val_loader = torch.utils.data.DataLoader(self.data_preparation.padded_val_data,
batch_size=self.model_config.batch_size)
def model_parameter_initialization(self):
"""Initialize the model parameters
"""
# define the data loaders
# self.train_loader = torch.utils.data.DataLoader(self.data_preparation.padded_train_data,
# batch_size=self.model_config.batch_size)
# self.val_loader = torch.utils.data.DataLoader(self.data_preparation.padded_val_data,
# batch_size=self.model_config.batch_size)
# define the model
if self.procedure_config.attention_type == "additive":
self.seq2seq_factory.create_additive_model()
elif self.procedure_config.attention_type == "multihead":
self.seq2seq_factory.create_multihead_model()
# parallelize the model if more than one GPU is available
if torch.cuda.device_count() > 1:
log.info(f"Using {torch.cuda.device_count()} GPUs")
self.seq2seq_factory.seq2seq = torch.nn.DataParallel(self.seq2seq_factory.seq2seq)
def train_element_initialization(self):
"""Initialize the training elements
"""
# define the loss
self.criterion = LossFactory.create_loss(self.model_config)
# define optimizer
optimizer = torch.optim.Adam(self.seq2seq_factory.seq2seq.parameters(),
lr=self.model_config.learning_rate,
weight_decay=self.model_config.weight_decay)
# Instantiate the factory with the optimizer and params
scheduler_factory = SchedulerFactory(optimizer, self.model_config, len_train_loader=len(self.train_loader))
# Get the desired scheduler
scheduler = scheduler_factory.get_scheduler(learning_rate_type = self.procedure_config.learning_rate_type)
# define the trainer
| ''' This module is the main module for training the model. It contains the TrainProcedure class which is the main class'''
# from arcana.plots import plots
warnings.filterwarnings("ignore")
# plots.Plots()
np.random.seed(0)
log = logger.get_logger("arcana.run_procedure")
SEED = 0
torch.cuda.manual_seed(SEED)
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class TrainProcedure:
"""This class is the main class for training the model. It contains some of the necessary functions for training,
predicting and finetuning the model. The class also contains all the parameters for the training, predicting and
tuning of the model. It also contains the functions for saving the model parameters and the
data splits."""
def __init__(self):
config_handler = ConfigHandler()
self.general_config = config_handler.get_general_config()
self.data_config = config_handler.get_data_config()
self.procedure_config = config_handler.get_procedure_config()
self.model_config = config_handler.get_model_config()
self.model_config.dim_weights = torch.tensor(self.model_config.dim_weights)
# initializing the data preparation class
self.data_preparation = DataPreparation(self.general_config, self.data_config, self.procedure_config)
# initializing the model class
self.device = None
self.set_device()
# initializing the loss class
self.criterion = None
# initializing the optimizer class
self.optimizer = None
self.scheduler = None
# initializing the model class
self.seq2seq_factory = Seq2SeqFactory(self.model_config)
self.seq_2_seq_trainer = None
#self.model = None
self.train_parameters = None
# initializing the loaders
self.train_loader = None
self.val_loader = None
# get the data splits
if self.general_config.pretrained_model:
if self.procedure_config.transfer_learning:
self.data_splits()
if self.procedure_config.predicting and (not self.procedure_config.transfer_learning):
pass
else:
self.data_splits()
# if ((not self.procedure_config.naive_training) and (not self.procedure_config.transfer_learning) and \
# (not self.procedure_config.optuna_tuning) and (self.procedure_config.predicting)):
# self.data_splits()
def set_device(self):
"""Set the device for training the model
"""
# move to GPU if available
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if self.device.type == "cpu":
torch.set_default_tensor_type("torch.FloatTensor")
if not self.device.type == "cpu":
torch.set_default_tensor_type("torch.cuda.FloatTensor")
log.info(f"device: {self.device}")
def data_splits(self):
"""Get the data splits for training, validation and testing
"""
self.data_preparation.get_data_for_model()
self.data_preparation.prepare_data_for_model()
self.save_data_splits()
def save_data_splits(self):
"""Save the data splits
"""
data_path = os. path.join(self.model_config.result_path, "data_splits")
# save the original data splits
for original_data, data_name in zip([self.data_preparation.df_train_original, self.data_preparation.df_val_original,
self.data_preparation.df_test_original], ["train", "val", "test"]):
original_data.to_csv(os.path.join(data_path, f"{data_name}_original.csv"))
if self.procedure_config.preprocess_data:
# save model data transformation
with open(os.path.join(data_path, "model_data_transformation.pkl"), "wb") as f:
pickle.dump(self.data_preparation.model_data_transformation, f)
# save the test_names of the test data
np.save(os.path.join(data_path, "test_names.npy"), self.data_preparation.test_data_names)
# save the processed data splits
for processed_data, processed_name in zip([self.data_preparation.padded_train_data,
self.data_preparation.padded_val_data, self.data_preparation.padded_test_data],
["train", "val", "test"]):
torch.save(processed_data, os.path.join(data_path, f"{processed_name}_processed.pt"))
def loader_initialization(self):
"""Initialize the data loaders
"""
# define the data loaders
self.train_loader = torch.utils.data.DataLoader(self.data_preparation.padded_train_data,
batch_size=self.model_config.batch_size)
self.val_loader = torch.utils.data.DataLoader(self.data_preparation.padded_val_data,
batch_size=self.model_config.batch_size)
def model_parameter_initialization(self):
"""Initialize the model parameters
"""
# define the data loaders
# self.train_loader = torch.utils.data.DataLoader(self.data_preparation.padded_train_data,
# batch_size=self.model_config.batch_size)
# self.val_loader = torch.utils.data.DataLoader(self.data_preparation.padded_val_data,
# batch_size=self.model_config.batch_size)
# define the model
if self.procedure_config.attention_type == "additive":
self.seq2seq_factory.create_additive_model()
elif self.procedure_config.attention_type == "multihead":
self.seq2seq_factory.create_multihead_model()
# parallelize the model if more than one GPU is available
if torch.cuda.device_count() > 1:
log.info(f"Using {torch.cuda.device_count()} GPUs")
self.seq2seq_factory.seq2seq = torch.nn.DataParallel(self.seq2seq_factory.seq2seq)
def train_element_initialization(self):
"""Initialize the training elements
"""
# define the loss
self.criterion = LossFactory.create_loss(self.model_config)
# define optimizer
optimizer = torch.optim.Adam(self.seq2seq_factory.seq2seq.parameters(),
lr=self.model_config.learning_rate,
weight_decay=self.model_config.weight_decay)
# Instantiate the factory with the optimizer and params
scheduler_factory = SchedulerFactory(optimizer, self.model_config, len_train_loader=len(self.train_loader))
# Get the desired scheduler
scheduler = scheduler_factory.get_scheduler(learning_rate_type = self.procedure_config.learning_rate_type)
# define the trainer | self.seq_2_seq_trainer = train_model.Seq2SeqTrainer(self.seq2seq_factory.seq2seq, self.criterion, optimizer, self.device, | 1 | 2023-11-21 12:51:37+00:00 | 12k |
JustRin/Stable-Video-Diffusion | scripts/demo/streamlit_helpers.py | [
{
"identifier": "Img2ImgDiscretizationWrapper",
"path": "scripts/demo/discretization.py",
"snippet": "class Img2ImgDiscretizationWrapper:\n \"\"\"\n wraps a discretizer, and prunes the sigmas\n params:\n strength: float between 0.0 and 1.0. 1.0 means full sampling (all sigmas are returned)\n \"\"\"\n\n def __init__(self, discretization: Discretization, strength: float = 1.0):\n self.discretization = discretization\n self.strength = strength\n assert 0.0 <= self.strength <= 1.0\n\n def __call__(self, *args, **kwargs):\n # sigmas start large first, and decrease then\n sigmas = self.discretization(*args, **kwargs)\n print(f\"sigmas after discretization, before pruning img2img: \", sigmas)\n sigmas = torch.flip(sigmas, (0,))\n sigmas = sigmas[: max(int(self.strength * len(sigmas)), 1)]\n print(\"prune index:\", max(int(self.strength * len(sigmas)), 1))\n sigmas = torch.flip(sigmas, (0,))\n print(f\"sigmas after pruning: \", sigmas)\n return sigmas"
},
{
"identifier": "Txt2NoisyDiscretizationWrapper",
"path": "scripts/demo/discretization.py",
"snippet": "class Txt2NoisyDiscretizationWrapper:\n \"\"\"\n wraps a discretizer, and prunes the sigmas\n params:\n strength: float between 0.0 and 1.0. 0.0 means full sampling (all sigmas are returned)\n \"\"\"\n\n def __init__(\n self, discretization: Discretization, strength: float = 0.0, original_steps=None\n ):\n self.discretization = discretization\n self.strength = strength\n self.original_steps = original_steps\n assert 0.0 <= self.strength <= 1.0\n\n def __call__(self, *args, **kwargs):\n # sigmas start large first, and decrease then\n sigmas = self.discretization(*args, **kwargs)\n print(f\"sigmas after discretization, before pruning img2img: \", sigmas)\n sigmas = torch.flip(sigmas, (0,))\n if self.original_steps is None:\n steps = len(sigmas)\n else:\n steps = self.original_steps + 1\n prune_index = max(min(int(self.strength * steps) - 1, steps - 1), 0)\n sigmas = sigmas[prune_index:]\n print(\"prune index:\", prune_index)\n sigmas = torch.flip(sigmas, (0,))\n print(f\"sigmas after pruning: \", sigmas)\n return sigmas"
},
{
"identifier": "DeepFloydDataFiltering",
"path": "scripts/util/detection/nsfw_and_watermark_dectection.py",
"snippet": "class DeepFloydDataFiltering(object):\n def __init__(\n self, verbose: bool = False, device: torch.device = torch.device(\"cpu\")\n ):\n super().__init__()\n self.verbose = verbose\n self._device = None\n self.clip_model, _ = clip.load(\"ViT-L/14\", device=device)\n self.clip_model.eval()\n\n self.cpu_w_weights, self.cpu_w_biases = load_model_weights(\n os.path.join(RESOURCES_ROOT, \"w_head_v1.npz\")\n )\n self.cpu_p_weights, self.cpu_p_biases = load_model_weights(\n os.path.join(RESOURCES_ROOT, \"p_head_v1.npz\")\n )\n self.w_threshold, self.p_threshold = 0.5, 0.5\n\n @torch.inference_mode()\n def __call__(self, images: torch.Tensor) -> torch.Tensor:\n imgs = clip_process_images(images)\n if self._device is None:\n self._device = next(p for p in self.clip_model.parameters()).device\n image_features = self.clip_model.encode_image(imgs.to(self._device))\n image_features = image_features.detach().cpu().numpy().astype(np.float16)\n p_pred = predict_proba(image_features, self.cpu_p_weights, self.cpu_p_biases)\n w_pred = predict_proba(image_features, self.cpu_w_weights, self.cpu_w_biases)\n print(f\"p_pred = {p_pred}, w_pred = {w_pred}\") if self.verbose else None\n query = p_pred > self.p_threshold\n if query.sum() > 0:\n print(f\"Hit for p_threshold: {p_pred}\") if self.verbose else None\n images[query] = T.GaussianBlur(99, sigma=(100.0, 100.0))(images[query])\n query = w_pred > self.w_threshold\n if query.sum() > 0:\n print(f\"Hit for w_threshold: {w_pred}\") if self.verbose else None\n images[query] = T.GaussianBlur(99, sigma=(100.0, 100.0))(images[query])\n return images"
},
{
"identifier": "embed_watermark",
"path": "sgm/inference/helpers.py",
"snippet": "class WatermarkEmbedder:\nclass Img2ImgDiscretizationWrapper:\n def __init__(self, watermark):\n def __call__(self, image: torch.Tensor) -> torch.Tensor:\ndef get_unique_embedder_keys_from_conditioner(conditioner):\ndef perform_save_locally(save_path, samples):\n def __init__(self, discretization, strength: float = 1.0):\n def __call__(self, *args, **kwargs):\ndef do_sample(\n model,\n sampler,\n value_dict,\n num_samples,\n H,\n W,\n C,\n F,\n force_uc_zero_embeddings: Optional[List] = None,\n batch2model_input: Optional[List] = None,\n return_latents=False,\n filter=None,\n device=\"cuda\",\n):\n def denoiser(input, sigma, c):\ndef get_batch(keys, value_dict, N: Union[List, ListConfig], device=\"cuda\"):\ndef get_input_image_tensor(image: Image.Image, device=\"cuda\"):\ndef do_img2img(\n img,\n model,\n sampler,\n value_dict,\n num_samples,\n force_uc_zero_embeddings=[],\n additional_kwargs={},\n offset_noise_level: float = 0.0,\n return_latents=False,\n skip_encode=False,\n filter=None,\n device=\"cuda\",\n):\n def denoiser(x, sigma, c):\nWATERMARK_MESSAGE = 0b101100111110110010010000011110111011000110011110\nWATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]"
},
{
"identifier": "LinearPredictionGuider",
"path": "sgm/modules/diffusionmodules/guiders.py",
"snippet": "class LinearPredictionGuider(Guider):\n def __init__(\n self,\n max_scale: float,\n num_frames: int,\n min_scale: float = 1.0,\n additional_cond_keys: Optional[Union[List[str], str]] = None,\n ):\n self.min_scale = min_scale\n self.max_scale = max_scale\n self.num_frames = num_frames\n self.scale = torch.linspace(min_scale, max_scale, num_frames).unsqueeze(0)\n\n additional_cond_keys = default(additional_cond_keys, [])\n if isinstance(additional_cond_keys, str):\n additional_cond_keys = [additional_cond_keys]\n self.additional_cond_keys = additional_cond_keys\n\n def __call__(self, x: torch.Tensor, sigma: torch.Tensor) -> torch.Tensor:\n x_u, x_c = x.chunk(2)\n\n x_u = rearrange(x_u, \"(b t) ... -> b t ...\", t=self.num_frames)\n x_c = rearrange(x_c, \"(b t) ... -> b t ...\", t=self.num_frames)\n scale = repeat(self.scale, \"1 t -> b t\", b=x_u.shape[0])\n scale = append_dims(scale, x_u.ndim).to(x_u.device)\n\n return rearrange(x_u + scale * (x_c - x_u), \"b t ... -> (b t) ...\")\n\n def prepare_inputs(\n self, x: torch.Tensor, s: torch.Tensor, c: dict, uc: dict\n ) -> Tuple[torch.Tensor, torch.Tensor, dict]:\n c_out = dict()\n\n for k in c:\n if k in [\"vector\", \"crossattn\", \"concat\"] + self.additional_cond_keys:\n c_out[k] = torch.cat((uc[k], c[k]), 0)\n else:\n assert c[k] == uc[k]\n c_out[k] = c[k]\n return torch.cat([x] * 2), torch.cat([s] * 2), c_out"
},
{
"identifier": "VanillaCFG",
"path": "sgm/modules/diffusionmodules/guiders.py",
"snippet": "class VanillaCFG(Guider):\n def __init__(self, scale: float):\n self.scale = scale\n\n def __call__(self, x: torch.Tensor, sigma: torch.Tensor) -> torch.Tensor:\n x_u, x_c = x.chunk(2)\n x_pred = x_u + self.scale * (x_c - x_u)\n return x_pred\n\n def prepare_inputs(self, x, s, c, uc):\n c_out = dict()\n\n for k in c:\n if k in [\"vector\", \"crossattn\", \"concat\"]:\n c_out[k] = torch.cat((uc[k], c[k]), 0)\n else:\n assert c[k] == uc[k]\n c_out[k] = c[k]\n return torch.cat([x] * 2), torch.cat([s] * 2), c_out"
},
{
"identifier": "DPMPP2MSampler",
"path": "sgm/modules/diffusionmodules/sampling.py",
"snippet": "class DPMPP2MSampler(BaseDiffusionSampler):\n def get_variables(self, sigma, next_sigma, previous_sigma=None):\n t, t_next = [to_neg_log_sigma(s) for s in (sigma, next_sigma)]\n h = t_next - t\n\n if previous_sigma is not None:\n h_last = t - to_neg_log_sigma(previous_sigma)\n r = h_last / h\n return h, r, t, t_next\n else:\n return h, None, t, t_next\n\n def get_mult(self, h, r, t, t_next, previous_sigma):\n mult1 = to_sigma(t_next) / to_sigma(t)\n mult2 = (-h).expm1()\n\n if previous_sigma is not None:\n mult3 = 1 + 1 / (2 * r)\n mult4 = 1 / (2 * r)\n return mult1, mult2, mult3, mult4\n else:\n return mult1, mult2\n\n def sampler_step(\n self,\n old_denoised,\n previous_sigma,\n sigma,\n next_sigma,\n denoiser,\n x,\n cond,\n uc=None,\n ):\n denoised = self.denoise(x, denoiser, sigma, cond, uc)\n\n h, r, t, t_next = self.get_variables(sigma, next_sigma, previous_sigma)\n mult = [\n append_dims(mult, x.ndim)\n for mult in self.get_mult(h, r, t, t_next, previous_sigma)\n ]\n\n x_standard = mult[0] * x - mult[1] * denoised\n if old_denoised is None or torch.sum(next_sigma) < 1e-14:\n # Save a network evaluation if all noise levels are 0 or on the first step\n return x_standard, denoised\n else:\n denoised_d = mult[2] * denoised - mult[3] * old_denoised\n x_advanced = mult[0] * x - mult[1] * denoised_d\n\n # apply correction if noise level is not 0 and not first step\n x = torch.where(\n append_dims(next_sigma, x.ndim) > 0.0, x_advanced, x_standard\n )\n\n return x, denoised\n\n def __call__(self, denoiser, x, cond, uc=None, num_steps=None, **kwargs):\n x, s_in, sigmas, num_sigmas, cond, uc = self.prepare_sampling_loop(\n x, cond, uc, num_steps\n )\n\n old_denoised = None\n for i in self.get_sigma_gen(num_sigmas):\n x, old_denoised = self.sampler_step(\n old_denoised,\n None if i == 0 else s_in * sigmas[i - 1],\n s_in * sigmas[i],\n s_in * sigmas[i + 1],\n denoiser,\n x,\n cond,\n uc=uc,\n )\n\n return x"
},
{
"identifier": "DPMPP2SAncestralSampler",
"path": "sgm/modules/diffusionmodules/sampling.py",
"snippet": "class DPMPP2SAncestralSampler(AncestralSampler):\n def get_variables(self, sigma, sigma_down):\n t, t_next = [to_neg_log_sigma(s) for s in (sigma, sigma_down)]\n h = t_next - t\n s = t + 0.5 * h\n return h, s, t, t_next\n\n def get_mult(self, h, s, t, t_next):\n mult1 = to_sigma(s) / to_sigma(t)\n mult2 = (-0.5 * h).expm1()\n mult3 = to_sigma(t_next) / to_sigma(t)\n mult4 = (-h).expm1()\n\n return mult1, mult2, mult3, mult4\n\n def sampler_step(self, sigma, next_sigma, denoiser, x, cond, uc=None, **kwargs):\n sigma_down, sigma_up = get_ancestral_step(sigma, next_sigma, eta=self.eta)\n denoised = self.denoise(x, denoiser, sigma, cond, uc)\n x_euler = self.ancestral_euler_step(x, denoised, sigma, sigma_down)\n\n if torch.sum(sigma_down) < 1e-14:\n # Save a network evaluation if all noise levels are 0\n x = x_euler\n else:\n h, s, t, t_next = self.get_variables(sigma, sigma_down)\n mult = [\n append_dims(mult, x.ndim) for mult in self.get_mult(h, s, t, t_next)\n ]\n\n x2 = mult[0] * x - mult[1] * denoised\n denoised2 = self.denoise(x2, denoiser, to_sigma(s), cond, uc)\n x_dpmpp2s = mult[2] * x - mult[3] * denoised2\n\n # apply correction if noise level is not 0\n x = torch.where(append_dims(sigma_down, x.ndim) > 0.0, x_dpmpp2s, x_euler)\n\n x = self.ancestral_step(x, sigma, next_sigma, sigma_up)\n return x"
},
{
"identifier": "EulerAncestralSampler",
"path": "sgm/modules/diffusionmodules/sampling.py",
"snippet": "class EulerAncestralSampler(AncestralSampler):\n def sampler_step(self, sigma, next_sigma, denoiser, x, cond, uc):\n sigma_down, sigma_up = get_ancestral_step(sigma, next_sigma, eta=self.eta)\n denoised = self.denoise(x, denoiser, sigma, cond, uc)\n x = self.ancestral_euler_step(x, denoised, sigma, sigma_down)\n x = self.ancestral_step(x, sigma, next_sigma, sigma_up)\n\n return x"
},
{
"identifier": "EulerEDMSampler",
"path": "sgm/modules/diffusionmodules/sampling.py",
"snippet": "class EulerEDMSampler(EDMSampler):\n def possible_correction_step(\n self, euler_step, x, d, dt, next_sigma, denoiser, cond, uc\n ):\n return euler_step"
},
{
"identifier": "HeunEDMSampler",
"path": "sgm/modules/diffusionmodules/sampling.py",
"snippet": "class HeunEDMSampler(EDMSampler):\n def possible_correction_step(\n self, euler_step, x, d, dt, next_sigma, denoiser, cond, uc\n ):\n if torch.sum(next_sigma) < 1e-14:\n # Save a network evaluation if all noise levels are 0\n return euler_step\n else:\n denoised = self.denoise(euler_step, denoiser, next_sigma, cond, uc)\n d_new = to_d(euler_step, next_sigma, denoised)\n d_prime = (d + d_new) / 2.0\n\n # apply correction if noise level is not 0\n x = torch.where(\n append_dims(next_sigma, x.ndim) > 0.0, x + d_prime * dt, euler_step\n )\n return x"
},
{
"identifier": "LinearMultistepSampler",
"path": "sgm/modules/diffusionmodules/sampling.py",
"snippet": "class LinearMultistepSampler(BaseDiffusionSampler):\n def __init__(\n self,\n order=4,\n *args,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n\n self.order = order\n\n def __call__(self, denoiser, x, cond, uc=None, num_steps=None, **kwargs):\n x, s_in, sigmas, num_sigmas, cond, uc = self.prepare_sampling_loop(\n x, cond, uc, num_steps\n )\n\n ds = []\n sigmas_cpu = sigmas.detach().cpu().numpy()\n for i in self.get_sigma_gen(num_sigmas):\n sigma = s_in * sigmas[i]\n denoised = denoiser(\n *self.guider.prepare_inputs(x, sigma, cond, uc), **kwargs\n )\n denoised = self.guider(denoised, sigma)\n d = to_d(x, sigma, denoised)\n ds.append(d)\n if len(ds) > self.order:\n ds.pop(0)\n cur_order = min(i + 1, self.order)\n coeffs = [\n linear_multistep_coeff(cur_order, sigmas_cpu, i, j)\n for j in range(cur_order)\n ]\n x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))\n\n return x"
},
{
"identifier": "append_dims",
"path": "sgm/util.py",
"snippet": "def append_dims(x, target_dims):\n \"\"\"Appends dimensions to the end of a tensor until it has target_dims dimensions.\"\"\"\n dims_to_append = target_dims - x.ndim\n if dims_to_append < 0:\n raise ValueError(\n f\"input has {x.ndim} dims but target_dims is {target_dims}, which is less\"\n )\n return x[(...,) + (None,) * dims_to_append]"
},
{
"identifier": "default",
"path": "sgm/util.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "instantiate_from_config",
"path": "sgm/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
}
] | import copy
import math
import os
import cv2
import numpy as np
import streamlit as st
import torch
import torch.nn as nn
import torchvision.transforms as TT
from glob import glob
from typing import Dict, List, Optional, Tuple, Union
from einops import rearrange, repeat
from imwatermark import WatermarkEncoder
from omegaconf import ListConfig, OmegaConf
from PIL import Image
from safetensors.torch import load_file as load_safetensors
from torch import autocast
from torchvision import transforms
from torchvision.utils import make_grid, save_image
from scripts.demo.discretization import (Img2ImgDiscretizationWrapper,
Txt2NoisyDiscretizationWrapper)
from scripts.util.detection.nsfw_and_watermark_dectection import \
DeepFloydDataFiltering
from sgm.inference.helpers import embed_watermark
from sgm.modules.diffusionmodules.guiders import (LinearPredictionGuider,
VanillaCFG)
from sgm.modules.diffusionmodules.sampling import (DPMPP2MSampler,
DPMPP2SAncestralSampler,
EulerAncestralSampler,
EulerEDMSampler,
HeunEDMSampler,
LinearMultistepSampler)
from sgm.util import append_dims, default, instantiate_from_config | 7,216 | guider_config=guider_config,
order=order,
verbose=True,
)
else:
raise ValueError(f"unknown sampler {sampler_name}!")
return sampler
def get_interactive_image() -> Image.Image:
image = st.file_uploader("Input", type=["jpg", "JPEG", "png"])
if image is not None:
image = Image.open(image)
if not image.mode == "RGB":
image = image.convert("RGB")
return image
def load_img(
display: bool = True,
size: Union[None, int, Tuple[int, int]] = None,
center_crop: bool = False,
):
image = get_interactive_image()
if image is None:
return None
if display:
st.image(image)
w, h = image.size
print(f"loaded input image of size ({w}, {h})")
transform = []
if size is not None:
transform.append(transforms.Resize(size))
if center_crop:
transform.append(transforms.CenterCrop(size))
transform.append(transforms.ToTensor())
transform.append(transforms.Lambda(lambda x: 2.0 * x - 1.0))
transform = transforms.Compose(transform)
img = transform(image)[None, ...]
st.text(f"input min/max/mean: {img.min():.3f}/{img.max():.3f}/{img.mean():.3f}")
return img
def get_init_img(batch_size=1, key=None):
init_image = load_img(key=key).cuda()
init_image = repeat(init_image, "1 ... -> b ...", b=batch_size)
return init_image
def do_sample(
model,
sampler,
value_dict,
num_samples,
H,
W,
C,
F,
force_uc_zero_embeddings: Optional[List] = None,
force_cond_zero_embeddings: Optional[List] = None,
batch2model_input: List = None,
return_latents=False,
filter=None,
T=None,
additional_batch_uc_fields=None,
decoding_t=None,
):
force_uc_zero_embeddings = default(force_uc_zero_embeddings, [])
batch2model_input = default(batch2model_input, [])
additional_batch_uc_fields = default(additional_batch_uc_fields, [])
st.text("Sampling")
outputs = st.empty()
precision_scope = autocast
with torch.no_grad():
with precision_scope("cuda"):
with model.ema_scope():
if T is not None:
num_samples = [num_samples, T]
else:
num_samples = [num_samples]
load_model(model.conditioner)
batch, batch_uc = get_batch(
get_unique_embedder_keys_from_conditioner(model.conditioner),
value_dict,
num_samples,
T=T,
additional_batch_uc_fields=additional_batch_uc_fields,
)
c, uc = model.conditioner.get_unconditional_conditioning(
batch,
batch_uc=batch_uc,
force_uc_zero_embeddings=force_uc_zero_embeddings,
force_cond_zero_embeddings=force_cond_zero_embeddings,
)
unload_model(model.conditioner)
for k in c:
if not k == "crossattn":
c[k], uc[k] = map(
lambda y: y[k][: math.prod(num_samples)].to("cuda"), (c, uc)
)
if k in ["crossattn", "concat"] and T is not None:
uc[k] = repeat(uc[k], "b ... -> b t ...", t=T)
uc[k] = rearrange(uc[k], "b t ... -> (b t) ...", t=T)
c[k] = repeat(c[k], "b ... -> b t ...", t=T)
c[k] = rearrange(c[k], "b t ... -> (b t) ...", t=T)
additional_model_inputs = {}
for k in batch2model_input:
if k == "image_only_indicator":
assert T is not None
if isinstance(
|
@st.cache_resource()
def init_st(version_dict, load_ckpt=True, load_filter=True):
state = dict()
if not "model" in state:
config = version_dict["config"]
ckpt = version_dict["ckpt"]
config = OmegaConf.load(config)
model, msg = load_model_from_config(config, ckpt if load_ckpt else None)
state["msg"] = msg
state["model"] = model
state["ckpt"] = ckpt if load_ckpt else None
state["config"] = config
if load_filter:
state["filter"] = DeepFloydDataFiltering(verbose=False)
return state
def load_model(model):
model.cuda()
lowvram_mode = False
def set_lowvram_mode(mode):
global lowvram_mode
lowvram_mode = mode
def initial_model_load(model):
global lowvram_mode
if lowvram_mode:
model.model.half()
else:
model.cuda()
return model
def unload_model(model):
global lowvram_mode
if lowvram_mode:
model.cpu()
torch.cuda.empty_cache()
def load_model_from_config(config, ckpt=None, verbose=True):
model = instantiate_from_config(config.model)
if ckpt is not None:
print(f"Loading model from {ckpt}")
if ckpt.endswith("ckpt"):
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
global_step = pl_sd["global_step"]
st.info(f"loaded ckpt from global step {global_step}")
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
elif ckpt.endswith("safetensors"):
sd = load_safetensors(ckpt)
else:
raise NotImplementedError
msg = None
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
else:
msg = None
model = initial_model_load(model)
model.eval()
return model, msg
def get_unique_embedder_keys_from_conditioner(conditioner):
return list(set([x.input_key for x in conditioner.embedders]))
def init_embedder_options(keys, init_dict, prompt=None, negative_prompt=None):
# Hardcoded demo settings; might undergo some changes in the future
value_dict = {}
for key in keys:
if key == "txt":
if prompt is None:
prompt = "A professional photograph of an astronaut riding a pig"
if negative_prompt is None:
negative_prompt = ""
prompt = st.text_input("Prompt", prompt)
negative_prompt = st.text_input("Negative prompt", negative_prompt)
value_dict["prompt"] = prompt
value_dict["negative_prompt"] = negative_prompt
if key == "original_size_as_tuple":
orig_width = st.number_input(
"orig_width",
value=init_dict["orig_width"],
min_value=16,
)
orig_height = st.number_input(
"orig_height",
value=init_dict["orig_height"],
min_value=16,
)
value_dict["orig_width"] = orig_width
value_dict["orig_height"] = orig_height
if key == "crop_coords_top_left":
crop_coord_top = st.number_input("crop_coords_top", value=0, min_value=0)
crop_coord_left = st.number_input("crop_coords_left", value=0, min_value=0)
value_dict["crop_coords_top"] = crop_coord_top
value_dict["crop_coords_left"] = crop_coord_left
if key == "aesthetic_score":
value_dict["aesthetic_score"] = 6.0
value_dict["negative_aesthetic_score"] = 2.5
if key == "target_size_as_tuple":
value_dict["target_width"] = init_dict["target_width"]
value_dict["target_height"] = init_dict["target_height"]
if key in ["fps_id", "fps"]:
fps = st.number_input("fps", value=6, min_value=1)
value_dict["fps"] = fps
value_dict["fps_id"] = fps - 1
if key == "motion_bucket_id":
mb_id = st.number_input("motion bucket id", 0, 511, value=127)
value_dict["motion_bucket_id"] = mb_id
if key == "pool_image":
st.text("Image for pool conditioning")
image = load_img(
key="pool_image_input",
size=224,
center_crop=True,
)
if image is None:
st.info("Need an image here")
image = torch.zeros(1, 3, 224, 224)
value_dict["pool_image"] = image
return value_dict
def perform_save_locally(save_path, samples):
os.makedirs(os.path.join(save_path), exist_ok=True)
base_count = len(os.listdir(os.path.join(save_path)))
samples = embed_watermark(samples)
for sample in samples:
sample = 255.0 * rearrange(sample.cpu().numpy(), "c h w -> h w c")
Image.fromarray(sample.astype(np.uint8)).save(
os.path.join(save_path, f"{base_count:09}.png")
)
base_count += 1
def init_save_locally(_dir, init_value: bool = False):
save_locally = st.sidebar.checkbox("Save images locally", value=init_value)
if save_locally:
save_path = st.text_input("Save path", value=os.path.join(_dir, "samples"))
else:
save_path = None
return save_locally, save_path
def get_guider(options, key):
guider = st.sidebar.selectbox(
f"Discretization #{key}",
[
"VanillaCFG",
"IdentityGuider",
"LinearPredictionGuider",
],
options.get("guider", 0),
)
additional_guider_kwargs = options.pop("additional_guider_kwargs", {})
if guider == "IdentityGuider":
guider_config = {
"target": "sgm.modules.diffusionmodules.guiders.IdentityGuider"
}
elif guider == "VanillaCFG":
scale = st.number_input(
f"cfg-scale #{key}",
value=options.get("cfg", 5.0),
min_value=0.0,
)
guider_config = {
"target": "sgm.modules.diffusionmodules.guiders.VanillaCFG",
"params": {
"scale": scale,
**additional_guider_kwargs,
},
}
elif guider == "LinearPredictionGuider":
max_scale = st.number_input(
f"max-cfg-scale #{key}",
value=options.get("cfg", 1.5),
min_value=1.0,
)
min_scale = st.number_input(
f"min guidance scale",
value=options.get("min_cfg", 1.0),
min_value=1.0,
max_value=10.0,
)
guider_config = {
"target": "sgm.modules.diffusionmodules.guiders.LinearPredictionGuider",
"params": {
"max_scale": max_scale,
"min_scale": min_scale,
"num_frames": options["num_frames"],
**additional_guider_kwargs,
},
}
else:
raise NotImplementedError
return guider_config
def init_sampling(
key=1,
img2img_strength: Optional[float] = None,
specify_num_samples: bool = True,
stage2strength: Optional[float] = None,
options: Optional[Dict[str, int]] = None,
):
options = {} if options is None else options
num_rows, num_cols = 1, 1
if specify_num_samples:
num_cols = st.number_input(
f"num cols #{key}", value=num_cols, min_value=1, max_value=10
)
steps = st.sidebar.number_input(
f"steps #{key}", value=options.get("num_steps", 40), min_value=1, max_value=1000
)
sampler = st.sidebar.selectbox(
f"Sampler #{key}",
[
"EulerEDMSampler",
"HeunEDMSampler",
"EulerAncestralSampler",
"DPMPP2SAncestralSampler",
"DPMPP2MSampler",
"LinearMultistepSampler",
],
options.get("sampler", 0),
)
discretization = st.sidebar.selectbox(
f"Discretization #{key}",
[
"LegacyDDPMDiscretization",
"EDMDiscretization",
],
options.get("discretization", 0),
)
discretization_config = get_discretization(discretization, options=options, key=key)
guider_config = get_guider(options=options, key=key)
sampler = get_sampler(sampler, steps, discretization_config, guider_config, key=key)
if img2img_strength is not None:
st.warning(
f"Wrapping {sampler.__class__.__name__} with Img2ImgDiscretizationWrapper"
)
sampler.discretization = Img2ImgDiscretizationWrapper(
sampler.discretization, strength=img2img_strength
)
if stage2strength is not None:
sampler.discretization = Txt2NoisyDiscretizationWrapper(
sampler.discretization, strength=stage2strength, original_steps=steps
)
return sampler, num_rows, num_cols
def get_discretization(discretization, options, key=1):
if discretization == "LegacyDDPMDiscretization":
discretization_config = {
"target": "sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization",
}
elif discretization == "EDMDiscretization":
sigma_min = st.number_input(
f"sigma_min #{key}", value=options.get("sigma_min", 0.03)
) # 0.0292
sigma_max = st.number_input(
f"sigma_max #{key}", value=options.get("sigma_max", 14.61)
) # 14.6146
rho = st.number_input(f"rho #{key}", value=options.get("rho", 3.0))
discretization_config = {
"target": "sgm.modules.diffusionmodules.discretizer.EDMDiscretization",
"params": {
"sigma_min": sigma_min,
"sigma_max": sigma_max,
"rho": rho,
},
}
return discretization_config
def get_sampler(sampler_name, steps, discretization_config, guider_config, key=1):
if sampler_name == "EulerEDMSampler" or sampler_name == "HeunEDMSampler":
s_churn = st.sidebar.number_input(f"s_churn #{key}", value=0.0, min_value=0.0)
s_tmin = st.sidebar.number_input(f"s_tmin #{key}", value=0.0, min_value=0.0)
s_tmax = st.sidebar.number_input(f"s_tmax #{key}", value=999.0, min_value=0.0)
s_noise = st.sidebar.number_input(f"s_noise #{key}", value=1.0, min_value=0.0)
if sampler_name == "EulerEDMSampler":
sampler = EulerEDMSampler(
num_steps=steps,
discretization_config=discretization_config,
guider_config=guider_config,
s_churn=s_churn,
s_tmin=s_tmin,
s_tmax=s_tmax,
s_noise=s_noise,
verbose=True,
)
elif sampler_name == "HeunEDMSampler":
sampler = HeunEDMSampler(
num_steps=steps,
discretization_config=discretization_config,
guider_config=guider_config,
s_churn=s_churn,
s_tmin=s_tmin,
s_tmax=s_tmax,
s_noise=s_noise,
verbose=True,
)
elif (
sampler_name == "EulerAncestralSampler"
or sampler_name == "DPMPP2SAncestralSampler"
):
s_noise = st.sidebar.number_input("s_noise", value=1.0, min_value=0.0)
eta = st.sidebar.number_input("eta", value=1.0, min_value=0.0)
if sampler_name == "EulerAncestralSampler":
sampler = EulerAncestralSampler(
num_steps=steps,
discretization_config=discretization_config,
guider_config=guider_config,
eta=eta,
s_noise=s_noise,
verbose=True,
)
elif sampler_name == "DPMPP2SAncestralSampler":
sampler = DPMPP2SAncestralSampler(
num_steps=steps,
discretization_config=discretization_config,
guider_config=guider_config,
eta=eta,
s_noise=s_noise,
verbose=True,
)
elif sampler_name == "DPMPP2MSampler":
sampler = DPMPP2MSampler(
num_steps=steps,
discretization_config=discretization_config,
guider_config=guider_config,
verbose=True,
)
elif sampler_name == "LinearMultistepSampler":
order = st.sidebar.number_input("order", value=4, min_value=1)
sampler = LinearMultistepSampler(
num_steps=steps,
discretization_config=discretization_config,
guider_config=guider_config,
order=order,
verbose=True,
)
else:
raise ValueError(f"unknown sampler {sampler_name}!")
return sampler
def get_interactive_image() -> Image.Image:
image = st.file_uploader("Input", type=["jpg", "JPEG", "png"])
if image is not None:
image = Image.open(image)
if not image.mode == "RGB":
image = image.convert("RGB")
return image
def load_img(
display: bool = True,
size: Union[None, int, Tuple[int, int]] = None,
center_crop: bool = False,
):
image = get_interactive_image()
if image is None:
return None
if display:
st.image(image)
w, h = image.size
print(f"loaded input image of size ({w}, {h})")
transform = []
if size is not None:
transform.append(transforms.Resize(size))
if center_crop:
transform.append(transforms.CenterCrop(size))
transform.append(transforms.ToTensor())
transform.append(transforms.Lambda(lambda x: 2.0 * x - 1.0))
transform = transforms.Compose(transform)
img = transform(image)[None, ...]
st.text(f"input min/max/mean: {img.min():.3f}/{img.max():.3f}/{img.mean():.3f}")
return img
def get_init_img(batch_size=1, key=None):
init_image = load_img(key=key).cuda()
init_image = repeat(init_image, "1 ... -> b ...", b=batch_size)
return init_image
def do_sample(
model,
sampler,
value_dict,
num_samples,
H,
W,
C,
F,
force_uc_zero_embeddings: Optional[List] = None,
force_cond_zero_embeddings: Optional[List] = None,
batch2model_input: List = None,
return_latents=False,
filter=None,
T=None,
additional_batch_uc_fields=None,
decoding_t=None,
):
force_uc_zero_embeddings = default(force_uc_zero_embeddings, [])
batch2model_input = default(batch2model_input, [])
additional_batch_uc_fields = default(additional_batch_uc_fields, [])
st.text("Sampling")
outputs = st.empty()
precision_scope = autocast
with torch.no_grad():
with precision_scope("cuda"):
with model.ema_scope():
if T is not None:
num_samples = [num_samples, T]
else:
num_samples = [num_samples]
load_model(model.conditioner)
batch, batch_uc = get_batch(
get_unique_embedder_keys_from_conditioner(model.conditioner),
value_dict,
num_samples,
T=T,
additional_batch_uc_fields=additional_batch_uc_fields,
)
c, uc = model.conditioner.get_unconditional_conditioning(
batch,
batch_uc=batch_uc,
force_uc_zero_embeddings=force_uc_zero_embeddings,
force_cond_zero_embeddings=force_cond_zero_embeddings,
)
unload_model(model.conditioner)
for k in c:
if not k == "crossattn":
c[k], uc[k] = map(
lambda y: y[k][: math.prod(num_samples)].to("cuda"), (c, uc)
)
if k in ["crossattn", "concat"] and T is not None:
uc[k] = repeat(uc[k], "b ... -> b t ...", t=T)
uc[k] = rearrange(uc[k], "b t ... -> (b t) ...", t=T)
c[k] = repeat(c[k], "b ... -> b t ...", t=T)
c[k] = rearrange(c[k], "b t ... -> (b t) ...", t=T)
additional_model_inputs = {}
for k in batch2model_input:
if k == "image_only_indicator":
assert T is not None
if isinstance( | sampler.guider, (VanillaCFG, LinearPredictionGuider) | 5 | 2023-11-23 10:57:27+00:00 | 12k |
danilonumeroso/conar | train_reasoner.py | [
{
"identifier": "_PROCESSSOR_DICT",
"path": "models/gnns.py",
"snippet": "_PROCESSSOR_DICT = {\n 'MPNN': MPNN,\n 'GATv2': GATv2,\n 'TriMPNN': TripletMPNN,\n 'GPS': GPS,\n}"
},
{
"identifier": "LitAlgorithmReasoner",
"path": "models/algorithm_reasoner.py",
"snippet": "class LitAlgorithmReasoner(pl.LightningModule):\n def __init__(self,\n hidden_dim,\n algo_processor,\n dataset_class,\n dataset_root,\n dataset_kwargs,\n algorithm='mst_prim',\n update_edges_hidden=False,\n use_TF=False,\n use_sinkhorn=True,\n xavier_on_scalars=True,\n learning_rate=get_hyperparameters()['lr'],\n weight_decay=get_hyperparameters()['weight_decay'],\n test_with_val=False,\n test_with_val_every_n_epoch=20,\n test_train_every_n_epoch=20,\n **algorithm_base_kwargs):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.algorithm_base_kwargs = algorithm_base_kwargs\n self.dataset_class = dataset_class\n self.dataset_root = dataset_root\n self.dataset_kwargs = dataset_kwargs\n self.learning_rate = learning_rate\n self.weight_decay = weight_decay\n self.timeit = False\n self.update_edges_hidden = update_edges_hidden\n self.use_TF = use_TF\n self.use_sinkhorn = use_sinkhorn\n self.algorithm_base_kwargs = algorithm_base_kwargs\n self.algorithm = algorithm\n self.xavier_on_scalars = xavier_on_scalars\n self.test_with_val = test_with_val\n self.test_with_val_every_n_epoch = test_with_val_every_n_epoch\n self.test_train_every_n_epoch = test_train_every_n_epoch\n self._datasets = {}\n if self.test_with_val:\n self.val_dataloader = self.val_dataloader_alt\n self.validation_step = self.validation_step_alt\n self._current_epoch = 0\n self.load_dataset('train')\n\n self.algorithm_module = AlgorithmReasoner(self.dataset.spec,\n self.dataset[0],\n hidden_dim,\n algo_processor,\n update_edges_hidden=update_edges_hidden,\n use_TF=use_TF,\n use_sinkhorn=use_sinkhorn,\n timeit=self.timeit,\n xavier_on_scalars=xavier_on_scalars,\n **algorithm_base_kwargs)\n self.save_hyperparameters(ignore=['algo_processor'])\n\n @property\n def current_epoch(self) -> int:\n \"\"\"The current epoch in the ``Trainer``, or 0 if not attached.\"\"\"\n return self.trainer.current_epoch if self._trainer else self._current_epoch\n\n @current_epoch.setter\n def current_epoch(self, epoch) -> int:\n self._current_epoch = epoch\n\n def prepare_for_transfer(self):\n algo_processor = copy.deepcopy(self.algorithm_module.processor)\n self.algorithm_module = AlgorithmReasoner(self.hidden_dim,\n self.node_features,\n self.edge_features,\n self.output_features,\n algo_processor,\n use_TF=False,\n timeit=self.timeit,\n **self.algorithm_base_kwargs)\n for p in self.algorithm_module.processor.parameters():\n p.requires_grad = False\n\n @staticmethod\n def pointer_loss(predecessor_pred, predecessor_gt_edge_1h,\n softmax_idx, num_nodes):\n loss_unreduced = cross_entropy(predecessor_pred, softmax_idx, predecessor_gt_edge_1h, num_nodes)\n sum_loss = loss_unreduced.flatten().sum()\n cnt_loss = predecessor_gt_edge_1h.count_nonzero()\n return sum_loss / cnt_loss\n\n def single_prediction_loss(self, name, pred, pred_gt, batch, graph_mask,\n node_mask, edge_mask):\n loss = None\n stage, loc, data_type = self.dataset.spec[name]\n if loc == Location.GRAPH:\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[graph_mask], pred_gt[graph_mask].argmax(-1))\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[graph_mask].squeeze(-1),\n pred_gt[graph_mask])\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[graph_mask].squeeze(-1),\n pred_gt[graph_mask])\n\n if loc == Location.NODE:\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers(pred_gt, batch.edge_index)\n loss = type(self).pointer_loss(\n pred[edge_mask],\n pred_gt_one_hot[edge_mask],\n batch.edge_index[0][edge_mask], batch.num_nodes)\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[node_mask].squeeze(-1),\n pred_gt[node_mask])\n if data_type == Type.MASK_ONE:\n lsms = torch_scatter.scatter_log_softmax(pred[node_mask], batch.batch[node_mask].unsqueeze(-1), dim=0)\n loss = (-lsms[(pred_gt[node_mask] == 1.)]).mean()\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[node_mask].squeeze(-1),\n pred_gt[node_mask])\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[node_mask], pred_gt[node_mask].argmax(-1))\n if loc == Location.EDGE:\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[edge_mask].squeeze(-1),\n pred_gt[edge_mask])\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[edge_mask], pred_gt[edge_mask].argmax(-1))\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[edge_mask].squeeze(-1),\n pred_gt[edge_mask])\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n pred_gt = pred_gt.int() - starts_edge\n loss = F.cross_entropy(\n pred[edge_mask],\n pred_gt[edge_mask])\n assert loss is not None, f'{stage}/{name}/{loc}/{data_type}'\n return loss\n\n def get_step_loss(self,\n batch,\n all_hint_logits,\n output_logits,\n all_masks_graph):\n\n if self.timeit:\n st = time.time()\n batch = self.algorithm_module.prepare_batch(batch)\n losses_dict = defaultdict(list)\n for i, (pred, graph_mask) in enumerate(zip(all_hint_logits, all_masks_graph)):\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n assert graph_mask.any()\n for name in pred:\n stage, loc, data_type = self.dataset.spec[name]\n pred_gt = getattr(batch, name)[i+1]\n losses_dict[name].append(\n self.single_prediction_loss(name, pred[name], pred_gt,\n batch, graph_mask, node_mask,\n edge_mask))\n\n for name in output_logits:\n graph_mask = torch.ones(batch.num_graphs, dtype=torch.bool, device=self.device)\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n losses_dict[name].append(\n self.single_prediction_loss(name, output_logits[name],\n getattr(batch, name), batch,\n graph_mask, node_mask, edge_mask))\n\n for k, v in losses_dict.items():\n losses_dict[k] = torch.stack(v).mean()\n if self.timeit:\n print(f'loss calculation: {time.time()-st}')\n input()\n\n return losses_dict\n\n def single_prediction_acc(self, name, pred, pred_gt, batch, graph_mask,\n node_mask, edge_mask):\n acc = None\n stage, loc, data_type = self.dataset.spec[name]\n if loc == Location.NODE:\n if data_type == Type.MASK_ONE:\n # try:\n acc = (pred[node_mask].squeeze(-1).nonzero() == pred_gt[node_mask].nonzero()).float().mean()\n # except Exception as e:\n # breakpoint()\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION, Type.MASK]:\n acc = (pred[node_mask].squeeze(-1) == pred_gt[node_mask]).float().mean()\n if data_type == Type.SCALAR:\n acc = ((pred[node_mask].squeeze(-1) - pred_gt[node_mask])**2).mean()\n if data_type == Type.CATEGORICAL:\n acc = (pred[node_mask].argmax(-1) == pred_gt[node_mask].argmax(-1)).float().mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[node_mask].squeeze(-1), pred_gt[node_mask])\n\n if loc == Location.GRAPH:\n if data_type == Type.CATEGORICAL:\n acc = (pred[graph_mask].argmax(-1) == pred_gt[graph_mask].argmax(-1)).float().mean()\n if data_type == Type.SCALAR:\n acc = ((pred[graph_mask].squeeze(-1) - pred_gt[graph_mask])**2).mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[graph_mask].squeeze(-1), pred_gt[graph_mask])\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n acc = (pred[edge_mask].argmax(-1) == pred_gt[edge_mask].argmax(-1)).float().mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[edge_mask].squeeze(-1), pred_gt[edge_mask])\n if data_type == Type.SCALAR:\n acc = ((pred[edge_mask].squeeze(-1) - pred_gt[edge_mask])**2).mean()\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n pred_gt = pred_gt.int() - starts_edge\n acc = (pred[edge_mask] == pred_gt[edge_mask]).float().mean()\n assert acc is not None, f\"Please implement {name}\"\n return acc\n\n def get_metrics(self,\n batch,\n all_hint_logits,\n output_logits,\n all_masks_graph):\n\n batch = self.algorithm_module.prepare_batch(batch)\n accs_dict = defaultdict(list)\n\n for i, (pred, graph_mask) in enumerate(zip(all_hint_logits, all_masks_graph)):\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n outputs = type(self.algorithm_module).convert_logits_to_outputs(\n self.dataset.spec, {'hint': pred},\n batch.edge_index[0],\n batch.edge_index[1],\n batch.num_nodes,\n batch.batch,\n include_probabilities=False)['hint']\n\n for name in outputs:\n acc = self.single_prediction_acc(\n name,\n outputs[name],\n getattr(batch, name)[i+1],\n batch,\n graph_mask,\n node_mask,\n edge_mask)\n accs_dict[name].append(acc)\n\n outputs = type(self.algorithm_module).convert_logits_to_outputs(\n self.dataset.spec,\n output_logits,\n batch.edge_index[0],\n batch.edge_index[1],\n batch.num_nodes,\n batch.batch,\n include_probabilities=False)['output']\n for name in outputs:\n graph_mask = torch.ones(batch.num_graphs, dtype=torch.bool, device=self.device)\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n accs_dict[name].append(\n self.single_prediction_acc(\n name,\n outputs[name],\n getattr(batch, name),\n batch,\n graph_mask,\n node_mask,\n edge_mask))\n\n for k, v in accs_dict.items():\n accs_dict[k] = torch.stack(v).mean()\n\n return accs_dict\n\n def fwd_step(self, batch, batch_idx):\n if self.timeit:\n st = time.time()\n self.algorithm_module.epoch = self.current_epoch\n all_hint_logits, output_logits, masks = self.algorithm_module.process(batch)\n if self.timeit:\n print(f'forward step: {time.time()-st}')\n input()\n return all_hint_logits, output_logits, masks\n\n def training_step(self, batch, batch_idx):\n all_hint_logits, output_logits, masks = self.fwd_step(batch, batch_idx)\n losses_dict = self.get_step_loss(batch, all_hint_logits, output_logits['output'], masks)\n self.log_dict(dict((f'train/loss/{k}', v) for k, v in losses_dict.items()), batch_size=batch.num_graphs)\n total_loss = sum(losses_dict.values()) / len(losses_dict)\n self.log('train/loss/average_loss', total_loss, prog_bar=False, on_step=True, on_epoch=True, batch_size=batch.num_graphs)\n accs_dict = {}\n if self.current_epoch % self.test_train_every_n_epoch == 0:\n accs_dict = self.get_metrics(batch, all_hint_logits, output_logits, masks)\n self.log_dict(dict((f'train/acc/{k}', v) for k, v in accs_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n # if sum(losses_dict.values()) > 1e5:\n # breakpoint()\n return {'loss': total_loss, 'losses_dict': losses_dict, 'accuracies': accs_dict}\n\n def valtest_step(self, batch, batch_idx, mode):\n all_hint_logits, output_logits, masks = self.fwd_step(batch, batch_idx)\n losses_dict = self.get_step_loss(batch, all_hint_logits, output_logits['output'], masks)\n self.log_dict(dict((f'{mode}/loss/{k}', v) for k, v in losses_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n if torch.isnan(sum(losses_dict.values())).any():\n breakpoint()\n self.log(f'{mode}/loss/average_loss', sum(losses_dict.values()) / len(losses_dict), batch_size=batch.num_graphs, add_dataloader_idx=False)\n accs_dict = self.get_metrics(batch, all_hint_logits, output_logits, masks)\n self.log_dict(dict((f'{mode}/acc/{k}', v) for k, v in accs_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n return {'losses': losses_dict, 'accuracies': accs_dict}\n\n def validation_step_alt(self, batch, batch_idx, dataloader_idx):\n if dataloader_idx == 1 and not self.trainer.state.stage == 'sanity_check' and self.current_epoch % self.test_with_val_every_n_epoch == 0:\n return self.valtest_step(batch, batch_idx, 'periodic_test')\n if dataloader_idx == 0:\n return self.valtest_step(batch, batch_idx, 'val')\n\n def validation_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'val')\n\n def test_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'test')\n\n def predict_step(self, batch, batch_idx):\n return self.fwd_step(batch, batch_idx)\n\n def load_dataset(self, split, suffix=''):\n split = split+suffix\n nn = CONFIGS[self.algorithm][split]['num_nodes']\n self.dataset_kwargs['split'] = split\n if (split, nn) not in self._datasets:\n self._datasets[(split, nn)] = self.dataset_class(\n self.dataset_root,\n nn,\n CONFIGS[self.algorithm][split]['num_samples'],\n algorithm=self.algorithm,\n **self.dataset_kwargs)\n self.dataset = self._datasets[(split, nn)]\n print(f'Loading {self.dataset=} (num nodes: {nn}) with kwargs')\n pprint(self.dataset_kwargs)\n print()\n\n def get_a_loader(self, split, suffix=''):\n self.load_dataset(split, suffix='')\n self.algorithm_module.dataset_spec = self.dataset.spec\n dl = DataLoader(self.dataset,\n batch_size=get_hyperparameters()['batch_size'],\n shuffle=True if split == 'train' else False,\n drop_last=False,\n follow_batch=['edge_index'],\n num_workers=1,\n persistent_workers=True)\n return dl\n\n def train_dataloader(self):\n return self.get_a_loader('train')\n\n def val_dataloader_alt(self):\n return [self.get_a_loader('val'), self.get_a_loader('test')]\n\n def val_dataloader(self):\n return self.get_a_loader('val')\n\n def test_dataloader(self, suffix=''):\n return self.get_a_loader('test'+suffix)\n\n def configure_optimizers(self):\n lr = self.learning_rate\n wd = self.weight_decay\n optimizer = optim.Adam(self.parameters(),\n weight_decay=wd,\n lr=lr)\n return optimizer"
},
{
"identifier": "LitAlgorithmProcessor",
"path": "models/algorithm_processor.py",
"snippet": "class LitAlgorithmProcessor(pl.LightningModule):\n\n def __init__(self,\n hidden_dim,\n algorithm_names,\n dataset_kwargs,\n algo_classes,\n ensure_permutation,\n processors=['MPNN'],\n bias=get_hyperparameters()['bias'],\n reduce_proc_hid_w_MLP=False,\n update_edges_hidden=False,\n use_gate=False,\n use_LSTM=False,\n use_ln=False,\n use_TF=False,\n transferring=False,\n freeze_proc=False,\n double_process=False,\n xavier_on_scalars=False,\n biased_gate=False,\n test_with_val=True,\n test_with_val_every_n_epoch=20,\n test_train_every_n_epoch=20,\n lr=get_hyperparameters()['lr'],\n weight_decay=get_hyperparameters()['weight_decay']):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.processors = processors\n self.bias = bias\n self.reduce_proc_hid_w_MLP = reduce_proc_hid_w_MLP\n self.use_gate = use_gate\n self.use_LSTM = use_LSTM\n self.use_ln = use_ln\n self.use_TF = use_TF\n self.update_edges_hidden = update_edges_hidden\n self.transferring = transferring\n self.learning_rate = lr\n self.weight_decay = weight_decay\n self.xavier_on_scalars = xavier_on_scalars\n self.biased_gate = biased_gate\n self.freeze_proc = freeze_proc\n self.double_process = double_process\n self.test_with_val = test_with_val\n self.test_with_val_every_n_epoch = test_with_val_every_n_epoch\n self.test_train_every_n_epoch = test_train_every_n_epoch\n self.val_dataloader = self.val_dataloader_normal\n if self.test_with_val:\n self.val_dataloader = self.val_dataloader_alt\n self.validation_step = self.validation_step_alt\n self.processor_set = LitProcessorSet(\n 2*hidden_dim,\n hidden_dim,\n reduce_with_MLP=reduce_proc_hid_w_MLP,\n update_edges_hidden=update_edges_hidden,\n edge_dim=hidden_dim,\n bias=bias,\n use_gate=use_gate,\n use_LSTM=use_LSTM,\n use_ln=use_ln,\n biased_gate=biased_gate,\n processors=processors)\n self.algorithm_names = algorithm_names\n self.algorithms = nn.ModuleDict()\n for algo in algorithm_names:\n self.algorithms[algo] = algo_classes[algo](\n algorithm=algo,\n hidden_dim=hidden_dim,\n algo_processor=self.processor_set,\n dataset_class=_DATASET_CLASSES[algo],\n dataset_root=_DATASET_ROOTS[algo],\n dataset_kwargs=dataset_kwargs[algo],\n bias=bias,\n use_TF=use_TF,\n transferring=transferring,\n ensure_permutation=ensure_permutation,\n xavier_on_scalars=xavier_on_scalars,\n test_with_val=False, # ALWAYS FALSE\n test_with_val_every_n_epoch=test_with_val_every_n_epoch,\n test_train_every_n_epoch=test_train_every_n_epoch,\n double_process=self.double_process,\n )\n self.save_hyperparameters(ignore=[])\n self.debug_epoch = 1e9\n\n def train_dataloader(self):\n return [self.algorithms[algo].train_dataloader() for algo in self.algorithm_names]\n # return CombinedLoader(dict((name, algo.train_dataloader()) for name, algo in self.algorithms.items()), mode='max_size_cycle')\n\n def val_dataloader_normal(self):\n return CombinedLoader(dict((name, algo.val_dataloader()) for name, algo in self.algorithms.items()), mode='max_size_cycle')\n\n def val_dataloader_alt(self):\n return [self.val_dataloader_normal(), self.test_dataloader()]\n\n def test_dataloader(self, suffix=''):\n return CombinedLoader(dict((name, algo.test_dataloader(suffix=suffix)) for name, algo in self.algorithms.items()), mode='max_size_cycle')\n\n\n def forward(self, batch):\n return self.fwd_step(batch, 0)\n\n def fwd_step(self, batch, batch_idx):\n assert not self.freeze_proc or not any(k.requires_grad for k in self.processor_set.processors[0].parameters()), breakpoint()\n outputs = {}\n for name, algorithm in self.algorithms.items():\n outputs[name] = algorithm.fwd_step(batch[name], batch_idx)\n return outputs\n\n def on_train_epoch_start(self):\n for algorithm in self.algorithms.values():\n algorithm.current_epoch = self.current_epoch\n\n\n def training_step(self, batch, batch_idx):\n total_loss = 0\n for name, algo_batch in zip(self.algorithm_names, batch):\n algorithm = self.algorithms[name]\n output = algorithm.training_step(algo_batch, batch_idx)\n if isinstance(algo_batch, list):\n num_graphs = algo_batch[0].num_graphs\n else:\n num_graphs = algo_batch.num_graphs\n self.log_dict(dict((f'train/loss/{name}/{k}', v) for k, v in output['losses_dict'].items()), batch_size=num_graphs)\n self.log(f'train/loss/{name}/average_loss', output['loss'], on_step=True, on_epoch=True, batch_size=num_graphs)\n self.log_dict(dict((f'train/acc/{name}/{k}', v) for k, v in output['accuracies'].items()), batch_size=num_graphs, add_dataloader_idx=False, on_epoch=True, on_step=False)\n total_loss = total_loss + output['loss']\n total_loss = total_loss / len(self.algorithms)\n self.log('train/loss/average_loss', total_loss, on_step=True, on_epoch=True, prog_bar=False, batch_size=num_graphs)\n if self.current_epoch >= self.debug_epoch:\n breakpoint()\n return {'loss': total_loss}\n\n def valtest_step(self, batch, batch_idx, mode):\n output = {}\n total_loss = 0\n for name, algorithm in self.algorithms.items():\n output[name] = algorithm.valtest_step(batch[name], batch_idx, mode)\n self.log_dict(dict((f'{mode}/loss/{name}/{k}', v) for k, v in output[name]['losses'].items()), batch_size=batch[name].num_graphs, add_dataloader_idx=False)\n average_loss = sum(output[name]['losses'].values()) / len(output[name]['losses'])\n self.log(f'{mode}/loss/{name}/average_loss', average_loss, batch_size=batch[name].num_graphs, add_dataloader_idx=False, on_epoch=True)\n self.log_dict(dict((f'{mode}/acc/{name}/{k}', v) for k, v in output[name]['accuracies'].items()), batch_size=batch[name].num_graphs, add_dataloader_idx=False)\n total_loss = total_loss + average_loss\n total_loss = total_loss / len(self.algorithms)\n self.log(f'{mode}/loss/average_loss', total_loss, batch_size=batch[name].num_graphs, add_dataloader_idx=False)\n return output\n\n\n def validation_step_alt(self, batch, batch_idx, dataloader_idx):\n if dataloader_idx == 1 and not self.trainer.state.stage == 'sanity_check' and self.current_epoch % self.test_with_val_every_n_epoch == 0:\n return self.valtest_step(batch, batch_idx, 'periodic_test')\n if dataloader_idx == 0:\n return self.valtest_step(batch, batch_idx, 'val')\n\n def validation_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'val')\n\n def test_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'test')\n\n def configure_optimizers(self):\n term_params = []\n normal_params = []\n for name, param in self.named_parameters():\n if '_term' in name or 'termination' in name or 'predinet' in name:\n term_params.append(param)\n else:\n normal_params.append(param)\n lr = self.learning_rate\n optimizer = optim.Adam([\n {'params': term_params, 'lr': lr},\n {'params': normal_params, 'lr': lr}\n ],\n lr=lr,\n weight_decay=self.weight_decay)\n return optimizer"
},
{
"identifier": "get_hyperparameters",
"path": "hyperparameters.py",
"snippet": "def get_hyperparameters():\n return {\n 'dim_latent': 128,\n 'num_bits': 8,\n 'weight_decay': 0,\n 'lr': 0.0003,\n 'nee_warmup_steps': 4000,\n 'dim_nodes_mst_prim': 1,\n 'dim_target_mst_prim': 1,\n 'device': 'cuda',\n 'batch_size': 64,\n 'bias': True,\n 'seed': 47, # for dataset generation\n 'calculate_termination_statistics': False,\n }"
},
{
"identifier": "ReasonerZeroerCallback",
"path": "utils_execution.py",
"snippet": "class ReasonerZeroerCallback(pl.callbacks.Callback):\n @staticmethod\n def zero_it(pl_module):\n pl_module.custom_logs = defaultdict(list)\n pl_module.algorithm_module.zero_validation_stats()\n def on_validation_epoch_start(self, trainer, pl_module):\n ReasonerZeroerCallback.zero_it(pl_module)\n def on_test_epoch_start(self, trainer, pl_module):\n ReasonerZeroerCallback.zero_it(pl_module)"
},
{
"identifier": "get_callbacks",
"path": "utils_execution.py",
"snippet": "def get_callbacks(name, serialised_models_dir, patience, monitor='val/loss/average_loss'):\n best_checkpointing_cb = pl.callbacks.ModelCheckpoint(\n dirpath=serialised_models_dir,\n filename=f'best_{name}',\n save_top_k=1,\n monitor=monitor,\n mode='min',\n )\n all_cbs = [best_checkpointing_cb]#, checkpoint_cb]\n if patience is not None:\n early_stopping_cb = pl.callbacks.EarlyStopping(\n monitor=monitor,\n patience=patience,\n verbose=True,\n mode='min',\n )\n all_cbs.append(early_stopping_cb)\n return all_cbs"
},
{
"identifier": "maybe_remove",
"path": "utils_execution.py",
"snippet": "def maybe_remove(path): # path can be regex\n try:\n for f in glob.glob(path):\n os.remove(f)\n except Exception:\n pass"
}
] | import os
import schema
import torch
import pytorch_lightning as pl
from datetime import datetime
from collections import defaultdict
from docopt import docopt
from models.gnns import _PROCESSSOR_DICT
from models.algorithm_reasoner import LitAlgorithmReasoner
from models.algorithm_processor import LitAlgorithmProcessor
from hyperparameters import get_hyperparameters
from utils_execution import ReasonerZeroerCallback, get_callbacks, maybe_remove | 7,390 | """
Script to train the reasoner model.
Usage:
train_reasoner.py [options]
Options:
-h --help Show this screen.
--patience P Patience value. If present, the training will utilise
early stopping based on validation loss.
--max-epochs ME The maximum epochs to train for. If patience value is not
provided it will always train for ME epochs. [default: 1000]
--model-name MN Name of the model when saving. Defaults to current time
and date if not provided.
--processors PS Which processors to use. String of comma separated values.
[default: MPNN]
--RPHWM Whether to Reduce Processor set Hiddens With MLP?
--gradient-clip-val G Constant for gradient clipping. 0 means no clipping.
[default: 1]
--xavier-on-scalars Use Xavier initialisation for linears that encode scalars.
--biased-gate Bias the gating mechanism towards less updating
--update-edges-hidden Whether to also keep a track of hidden edge state.
--use-LSTM Add an LSTMCell just after the processor step
(in case of several processors, each has its own LSTM)
--use-ln Use Layer Norm in the processor.
--algorithms ALGOS List of algorithms to train on. Repeatable. [default: mst_prim]
--sampler-type (default|geometric) What sampler was used for graph generation. [default: default]
--seed S Random seed to set. [default: 47]
"""
if __name__ == '__main__':
hidden_dim = get_hyperparameters()['dim_latent']
serialised_models_dir = os.path.abspath('./serialised_models/')
schema = schema.Schema({
'--help': bool,
'--xavier-on-scalars': bool,
'--biased-gate': bool,
'--update-edges-hidden': bool,
'--use-LSTM': bool,
'--use-ln': bool,
'--patience': schema.Or(None, schema.Use(int)),
'--max-epochs': schema.Or(None, schema.Use(int)),
'--model-name': schema.Or(None, schema.Use(str)),
'--processors': schema.And(schema.Use(lambda x: x.split(',')), lambda lst: all(x in _PROCESSSOR_DICT for x in lst)),
'--RPHWM': bool,
'--gradient-clip-val': schema.Use(int),
'--algorithms': schema.Use(lambda x: x.split(',')),
'--sampler-type': str,
'--seed': schema.Use(int),
})
args = docopt(__doc__)
args = schema.validate(args)
name = args['--model-name'] if args['--model-name'] is not None else datetime.now().strftime('%b-%d-%Y-%H-%M')
pl.utilities.seed.seed_everything(args['--seed'])
lit_processor = LitAlgorithmProcessor(
hidden_dim,
args['--algorithms'],
dict((algo, {'sampler_type': args['--sampler-type']}) for algo in args['--algorithms']),
dict((algo, LitAlgorithmReasoner) for algo in args['--algorithms']),
False, #args['--ensure-permutation'] is False for non-TSP
reduce_proc_hid_w_MLP=args['--RPHWM'],
update_edges_hidden=args['--update-edges-hidden'],
use_TF=False,
use_gate=True,
use_LSTM=args['--use-LSTM'],
use_ln=args['--use-ln'],
freeze_proc=False, # We don't have a transfer task
processors=args['--processors'],
xavier_on_scalars=args['--xavier-on-scalars'],
biased_gate=args['--biased-gate'],
)
| """
Script to train the reasoner model.
Usage:
train_reasoner.py [options]
Options:
-h --help Show this screen.
--patience P Patience value. If present, the training will utilise
early stopping based on validation loss.
--max-epochs ME The maximum epochs to train for. If patience value is not
provided it will always train for ME epochs. [default: 1000]
--model-name MN Name of the model when saving. Defaults to current time
and date if not provided.
--processors PS Which processors to use. String of comma separated values.
[default: MPNN]
--RPHWM Whether to Reduce Processor set Hiddens With MLP?
--gradient-clip-val G Constant for gradient clipping. 0 means no clipping.
[default: 1]
--xavier-on-scalars Use Xavier initialisation for linears that encode scalars.
--biased-gate Bias the gating mechanism towards less updating
--update-edges-hidden Whether to also keep a track of hidden edge state.
--use-LSTM Add an LSTMCell just after the processor step
(in case of several processors, each has its own LSTM)
--use-ln Use Layer Norm in the processor.
--algorithms ALGOS List of algorithms to train on. Repeatable. [default: mst_prim]
--sampler-type (default|geometric) What sampler was used for graph generation. [default: default]
--seed S Random seed to set. [default: 47]
"""
if __name__ == '__main__':
hidden_dim = get_hyperparameters()['dim_latent']
serialised_models_dir = os.path.abspath('./serialised_models/')
schema = schema.Schema({
'--help': bool,
'--xavier-on-scalars': bool,
'--biased-gate': bool,
'--update-edges-hidden': bool,
'--use-LSTM': bool,
'--use-ln': bool,
'--patience': schema.Or(None, schema.Use(int)),
'--max-epochs': schema.Or(None, schema.Use(int)),
'--model-name': schema.Or(None, schema.Use(str)),
'--processors': schema.And(schema.Use(lambda x: x.split(',')), lambda lst: all(x in _PROCESSSOR_DICT for x in lst)),
'--RPHWM': bool,
'--gradient-clip-val': schema.Use(int),
'--algorithms': schema.Use(lambda x: x.split(',')),
'--sampler-type': str,
'--seed': schema.Use(int),
})
args = docopt(__doc__)
args = schema.validate(args)
name = args['--model-name'] if args['--model-name'] is not None else datetime.now().strftime('%b-%d-%Y-%H-%M')
pl.utilities.seed.seed_everything(args['--seed'])
lit_processor = LitAlgorithmProcessor(
hidden_dim,
args['--algorithms'],
dict((algo, {'sampler_type': args['--sampler-type']}) for algo in args['--algorithms']),
dict((algo, LitAlgorithmReasoner) for algo in args['--algorithms']),
False, #args['--ensure-permutation'] is False for non-TSP
reduce_proc_hid_w_MLP=args['--RPHWM'],
update_edges_hidden=args['--update-edges-hidden'],
use_TF=False,
use_gate=True,
use_LSTM=args['--use-LSTM'],
use_ln=args['--use-ln'],
freeze_proc=False, # We don't have a transfer task
processors=args['--processors'],
xavier_on_scalars=args['--xavier-on-scalars'],
biased_gate=args['--biased-gate'],
)
| all_cbs = get_callbacks(name, serialised_models_dir, args['--patience']) | 5 | 2023-11-20 15:32:43+00:00 | 12k |
oniyevski/oniRedemption | ui/main.py | [
{
"identifier": "Header",
"path": "ui/header.py",
"snippet": "class Header:\n def __init__(self, config):\n self.config = config\n self.functions = Functions()\n\n self.logoText = Row(\n controls=[\n Text(\n \"oniRedemption\",\n color=REDEMPTION,\n size=20,\n weight=FontWeight.BOLD\n ),\n Text(\n \"v1.0.1\",\n color=WHITE,\n size=20,\n weight=FontWeight.BOLD\n )\n ]\n )\n self.menu = Row(\n controls=[\n IconButton(\n icons.MINIMIZE_ROUNDED,\n icon_color=WHITE,\n on_click=self.functions.minimazePage\n ),\n IconButton(\n icons.CLOSE,\n icon_color=WHITE,\n on_click=self.functions.closePage\n )\n ]\n )\n self.header = Container(\n alignment=alignment.center,\n content=Row(\n alignment=\"spaceBetween\",\n controls=[\n self.logoText,\n self.menu\n ]\n )\n )"
},
{
"identifier": "Footer",
"path": "ui/footer.py",
"snippet": "class Footer:\n def __init__(self, config):\n self.config = config\n self.logoText = Row(\n spacing=3,\n controls=[\n Container(\n margin=margin.Margin(top=3, bottom=0, left=0, right=0),\n content=Icon(\n icons.HEART_BROKEN,\n size=15,\n color=REDEMPTION\n )\n ),\n Text(\n \"developed by\",\n color=WHITE,\n size=15,\n weight=FontWeight.W_100),\n Text(\n \"oniyevski\",\n color=REDEMPTION,\n size=15,\n weight=FontWeight.BOLD\n )\n ]\n )\n self.footer = Container(\n alignment=alignment.center,\n content=Row(\n alignment=\"center\",\n controls=[\n self.logoText\n ]\n )\n )"
},
{
"identifier": "Content",
"path": "ui/content.py",
"snippet": "class Content:\n def __init__(self, config):\n self.extraPages = [\"shield\", \"settings\"]\n \n self.config = config\n \n self.config.config_loader()\n \n self.loadedLanguage = self.config.lastConfig[\"settings\"][\"language\"]\n \n self.whileBreak = True\n \n self.page = \"start\"\n \n def select_minute_dropdown_change(e):\n self.minute = self.select_minute_dropdown.value\n \n def language_dropdown_change(e):\n self.config.lastConfig[\"settings\"][\"language\"] = e.control.value\n self.config.write_config()\n if e.control.value == self.loadedLanguage:\n self.program_restart.visible = False\n else:\n self.program_restart.visible = True\n self.settings.update()\n \n def settings_change(e, key):\n if e.control.value == True:\n e.control.thumb_color = REDEMPTION\n else:\n e.control.thumb_color = WHITE\n self.settings.update()\n self.config.lastConfig[\"settings\"][key] = e.control.value\n self.config.write_config()\n if self.config.lastConfig[\"settings\"][\"sound_effects\"]:\n e.page.overlay[2].play()\n \n def shield_click(e):\n func = Functions()\n if self.page == \"chronometer\":\n modal = Modal(\n e, \n self.config.get_local_text(\"hey_modal_title\"), self.config.get_local_text(\"the_stopwatch_is_working\"),\n )\n modal.set_actions([\n TextButton(self.config.get_local_text(\"modal_understood\"), on_click=modal.close_dlg, style=ButtonStyle(color=REDEMPTION)),\n ]\n )\n modal.open_dlg()\n return\n self.page = \"shield\"\n self.shield_container.content.bgcolor = REDEMPTION\n self.settings_container.content.bgcolor = BG\n self.timer_container.content.bgcolor = BG\n self.rail.update()\n func.visibler(self.select_minute, False)\n func.visibler(self.settings, False)\n func.visibler(self.shield, True)\n \n def shield_change(e):\n rdr2 = Process(\"RDR2.exe\")\n rdr2Status = rdr2.getProcessStatus()\n if rdr2Status != \"error\" and rdr2Status != \"stopped\":\n modal = Modal(\n e, \n self.config.get_local_text(\"hey_modal_title\"), self.config.get_local_text(\"rdr2_running_modal\"),\n )\n modal.set_actions([\n TextButton(self.config.get_local_text(\"modal_understood\"), on_click=modal.close_dlg, style=ButtonStyle(color=REDEMPTION)),\n ]\n )\n modal.open_dlg()\n if e.control.value == True:\n e.control.value = False\n else:\n e.control.value = True\n self.shield.update()\n return\n if self.shield_password.value == \"\":\n modal = Modal(\n e, \n self.config.get_local_text(\"hey_modal_title\"), self.config.get_local_text(\"password_part_blank_not_leavable\"),\n )\n modal.set_actions([\n TextButton(self.config.get_local_text(\"modal_understood\"), on_click=modal.close_dlg, style=ButtonStyle(color=REDEMPTION)),\n ]\n )\n modal.open_dlg()\n if e.control.value == True:\n e.control.value = False\n else:\n e.control.value = True\n self.shield.update()\n return\n if e.control.value == True:\n try:\n self.shield_password.disabled = True\n xml = '<?xml version=\"1.0\" encoding=\"UTF-8\"?><CDataFileMgr__ContentsOfDataFileXml><disabledFiles /><includedXmlFiles itemType=\"CDataFileMgr__DataFileArray\" /><includedDataFiles /><dataFiles itemType=\"CDataFileMgr__DataFile\"><Item><filename>platform:/data/cdimages/scaleform_platform_pc.rpf</filename><fileType>RPF_FILE</fileType></Item><Item><filename>platform:/data/ui/value_conversion.rpf</filename><fileType>RPF_FILE</fileType></Item><Item><filename>platform:/data/ui/widgets.rpf</filename><fileType>RPF_FILE</fileType></Item><Item><filename>platform:/textures/ui/ui_photo_stickers.rpf</filename><fileType>RPF_FILE</fileType></Item><Item><filename>platform:/textures/ui/ui_platform.rpf</filename><fileType>RPF_FILE</fileType></Item><Item><filename>platform:/data/ui/stylesCatalog</filename><fileType>aWeaponizeDisputants</fileType></Item><Item><filename>platform:/data/cdimages/scaleform_frontend.rpf</filename><fileType>RPF_FILE_PRE_INSTALL</fileType></Item><Item><filename>platform:/textures/ui/ui_startup_textures.rpf</filename><fileType>RPF_FILE</fileType></Item><Item><filename>platform:/data/ui/startup_data.rpf</filename><fileType>RPF_FILE</fileType></Item></dataFiles><contentChangeSets itemType=\"CDataFileMgr__ContentChangeSet\" /><patchFiles /></CDataFileMgr__ContentsOfDataFileXml>'+self.shield_password.value\n f = open(self.config.lastConfig[\"settings\"][\"rdr_path\"] + \"\\\\x64\\\\data\\\\startup.meta\", \"w\")\n f.write(xml)\n f.close()\n except:\n if e.control.value == True:\n e.control.value = False\n else:\n e.control.value = True\n self.shield.update()\n modal = Modal(\n e, \n self.config.get_local_text(\"hey_modal_title\"), self.config.get_local_text(\"rdr2_running_this_process_failed\"),\n )\n modal.set_actions([\n TextButton(self.config.get_local_text(\"modal_understood\"), on_click=modal.close_dlg, style=ButtonStyle(color=REDEMPTION)),\n ]\n )\n modal.open_dlg()\n return\n else:\n if os.path.exists(config.lastConfig[\"settings\"][\"rdr_path\"] + \"\\\\x64\\\\data\\\\startup.meta\"):\n try:\n os.remove(config.lastConfig[\"settings\"][\"rdr_path\"] + \"\\\\x64\\\\data\\\\startup.meta\")\n self.shield_password.disabled = False\n except:\n if e.control.value == True:\n e.control.value = False\n else:\n e.control.value = True\n self.shield.update()\n modal = Modal(\n e, \n self.config.get_local_text(\"hey_modal_title\"), self.config.get_local_text(\"rdr2_running_this_process_failed\"),\n )\n modal.set_actions([\n TextButton(self.config.get_local_text(\"modal_understood\"), on_click=modal.close_dlg, style=ButtonStyle(color=REDEMPTION)),\n ]\n )\n modal.open_dlg()\n return\n if e.control.value == True:\n e.control.thumb_color = REDEMPTION\n if self.config.lastConfig[\"settings\"][\"sound_effects\"]:\n e.page.overlay[1].play()\n else:\n e.control.thumb_color = WHITE\n if self.config.lastConfig[\"settings\"][\"sound_effects\"]:\n e.page.overlay[2].play()\n self.shield.update()\n self.config.lastConfig[\"settings\"][\"shield_password\"] = self.shield_password.value\n self.config.lastConfig[\"settings\"][\"shield_status\"] = e.control.value\n self.config.write_config()\n \n def settings_click(e):\n func = Functions()\n if self.page == \"chronometer\":\n modal = Modal(\n e, \n self.config.get_local_text(\"hey_modal_title\"), self.config.get_local_text(\"the_stopwatch_is_working\"),\n )\n modal.set_actions([\n TextButton(self.config.get_local_text(\"modal_understood\"), on_click=modal.close_dlg, style=ButtonStyle(color=REDEMPTION)),\n ]\n )\n modal.open_dlg()\n return\n self.page = \"settings\"\n self.shield_container.content.bgcolor = BG\n self.settings_container.content.bgcolor = REDEMPTION\n self.timer_container.content.bgcolor = BG\n self.rail.update()\n func.visibler(self.select_minute, False)\n func.visibler(self.shield, False)\n func.visibler(self.settings, True)\n \n def stop_click(e=\"bypass\"):\n func = Functions()\n if e != \"bypass\":\n self.page = \"start\"\n self.shield_container.content.content.color = WHITE\n self.shield_container.update()\n self.settings_container.content.content.color = WHITE\n self.settings_container.update()\n func.visibler(self.timer_container, True)\n func.visibler(self.timer_stop_container, False)\n func.visibler(self.select_minute, True)\n func.visibler(self.minute_part, False)\n self.gold_bar.image_src = func.get_asset(\"0.00.png\")\n self.gold_bar.update()\n self.progressbar.value = None\n self.progressbar_container.update()\n self.percent.value = \"%0.00\"\n self.percent_container.update()\n self.remaining_time.value = f\"0 {self.config.get_local_text('minute')}, 0 {self.config.get_local_text('second')}\"\n self.remaining_time_container.update()\n self.whileBreak = True\n \n def start_click(e):\n func = Functions()\n if self.page in self.extraPages:\n func.visibler(self.settings, False)\n func.visibler(self.shield, False)\n func.visibler(self.select_minute, True)\n self.page = \"start\"\n self.shield_container.content.bgcolor = BG\n self.settings_container.content.bgcolor = BG\n self.timer_container.content.bgcolor = REDEMPTION\n self.rail.update()\n return\n if self.minute == \"\":\n modal = Modal(\n e, \n self.config.get_local_text(\"hey_modal_title\"), self.config.get_local_text(\"you_must_choose_a_minute\"),\n )\n modal.set_actions([\n TextButton(self.config.get_local_text(\"modal_understood\"), on_click=modal.close_dlg, style=ButtonStyle(color=REDEMPTION)),\n ]\n )\n modal.open_dlg()\n return\n self.page = \"chronometer\"\n self.whileBreak = False\n func.visibler(self.timer_container, False)\n func.visibler(self.timer_stop_container, True)\n func.visibler(self.select_minute, False)\n func.visibler(self.minute_part, True)\n self.shield_container.content.content.color = REDEMPTION\n self.shield_container.update()\n self.settings_container.content.content.color = REDEMPTION\n self.settings_container.update()\n chrono = Chronometer(self.select_minute_dropdown.value)\n while True:\n result = chrono.chrono_counter()\n if self.whileBreak:\n stop_click()\n break\n if result == \"time_is_up\" or result[0] < 0 or result[1] < 0:\n self.remaining_time.value = self.config.get_local_text(\"time_is_up\")\n self.remaining_time_container.update()\n self.progressbar.value = None\n self.progressbar_container.update()\n self.percent.value = \"%100\"\n self.percent_container.update()\n self.whileBreak = True\n if int(self.minute) == 3:\n self.gold_bar.image_src = func.get_asset(\"0.08.png\")\n elif int(self.minute) == 6:\n self.gold_bar.image_src = func.get_asset(\"0.16.png\")\n elif int(self.minute) == 9:\n self.gold_bar.image_src = func.get_asset(\"0.24.png\")\n elif int(self.minute) == 12:\n self.gold_bar.image_src = func.get_asset(\"0.32.png\")\n elif int(self.minute) == 15:\n self.gold_bar.image_src = func.get_asset(\"0.36.png\")\n elif int(self.minute) == 20:\n self.gold_bar.image_src = func.get_asset(\"0.40.png\")\n elif int(self.minute) == 25:\n self.gold_bar.image_src = func.get_asset(\"0.44.png\")\n elif int(self.minute) == 30:\n self.gold_bar.image_src = func.get_asset(\"0.48.png\")\n self.gold_bar.update()\n if self.config.lastConfig[\"settings\"][\"task_completed_effect\"]:\n e.page.overlay[0].play()\n break \n if result[3] >= 30:\n self.gold_bar.image_src = func.get_asset(\"0.48.png\")\n elif result[3] >= 25:\n self.gold_bar.image_src = func.get_asset(\"0.44.png\")\n elif result[3] >= 20:\n self.gold_bar.image_src = func.get_asset(\"0.40.png\")\n elif result[3] >= 15:\n self.gold_bar.image_src = func.get_asset(\"0.36.png\")\n elif result[3] >= 12:\n self.gold_bar.image_src = func.get_asset(\"0.32.png\")\n elif result[3] >= 9:\n self.gold_bar.image_src = func.get_asset(\"0.24.png\")\n elif result[3] >= 6:\n self.gold_bar.image_src = func.get_asset(\"0.16.png\")\n elif result[3] >= 3:\n self.gold_bar.image_src = func.get_asset(\"0.08.png\")\n self.gold_bar.update()\n self.progressbar.value = result[2]*0.01\n self.percent.value = \"%\"+str(result[2])\n self.percent_container.update()\n self.progressbar_container.update()\n self.remaining_time.value = str(result[0]) + f\" {self.config.get_local_text('minute')}, \" + str(result[1]) + f\" {self.config.get_local_text('second')}\"\n self.remaining_time_container.update()\n sleep(1)\n \n self.minute = \"\"\n \n self.select_minute_dropdown = Dropdown(\n on_change=select_minute_dropdown_change,\n label=self.config.get_local_text('task_minute'),\n hint_text=self.config.get_local_text('task_selection_hint_text'),\n filled=REDEMPTION,\n focused_border_color=REDEMPTION,\n label_style=TextStyle(color=WHITE, weight=FontWeight.BOLD),\n focused_bgcolor=BG,\n color=WHITE,\n text_style=TextStyle(color=WHITE, weight=FontWeight.BOLD),\n options=[\n dropdown.Option(text=self.config.get_local_text('3_minute'), key=\"3\"),\n dropdown.Option(text=self.config.get_local_text('6_minute'), key=\"6\"),\n dropdown.Option(text=self.config.get_local_text('9_minute'), key=\"9\"),\n dropdown.Option(text=self.config.get_local_text('12_minute'), key=\"12\"),\n dropdown.Option(text=self.config.get_local_text('15_minute'), key=\"15\"),\n dropdown.Option(text=self.config.get_local_text('20_minute'), key=\"20\"),\n dropdown.Option(text=self.config.get_local_text('25_minute'), key=\"25\"),\n dropdown.Option(text=self.config.get_local_text('30_minute'), key=\"30\"),\n ],\n autofocus=True,\n )\n \n self.select_minute = Column(\n visible=False,\n alignment=\"center\",\n horizontal_alignment=\"center\",\n spacing=90,\n controls=[\n self.select_minute_dropdown\n ]\n )\n \n self.languages = list()\n for langKey, value in self.config.lastConfig[\"languages\"].items():\n self.languages.append(dropdown.Option(text=value[\"language_long_name\"], key=str(langKey)))\n \n self.program_restart = Text(self.config.get_local_text('program_restart'), visible=False, color=REDEMPTION)\n \n self.settings = Column(\n visible=False,\n alignment=\"start\",\n spacing=10,\n width=700,\n controls=[\n Row(\n controls=[\n Switch(\n thumb_color=REDEMPTION if self.config.lastConfig[\"settings\"][\"sound_effects\"] == True else WHITE,\n inactive_thumb_color=FG, \n inactive_track_color=BG, \n track_color=BG,\n value=self.config.lastConfig[\"settings\"][\"sound_effects\"],\n on_change=lambda e: settings_change(e, \"sound_effects\") \n ),\n Text(self.config.get_local_text('sound_effects_settings'), size=17, weight=FontWeight.BOLD)\n ]\n ),\n Row(\n controls=[\n Switch(\n thumb_color=REDEMPTION if self.config.lastConfig[\"settings\"][\"task_completed_effect\"] == True else WHITE,\n inactive_thumb_color=FG, \n inactive_track_color=BG, \n track_color=BG,\n value=self.config.lastConfig[\"settings\"][\"task_completed_effect\"],\n on_change=lambda e: settings_change(e, \"task_completed_effect\") \n ),\n Text(self.config.get_local_text('task_completed_effect_settings'), size=17, weight=FontWeight.BOLD)\n ]\n ),\n Row(\n controls=[\n Switch(\n thumb_color=REDEMPTION if self.config.lastConfig[\"settings\"][\"discord_integration\"] == True else WHITE,\n inactive_thumb_color=FG, \n inactive_track_color=BG, \n track_color=BG,\n value=self.config.lastConfig[\"settings\"][\"discord_integration\"],\n on_change=lambda e: settings_change(e, \"discord_integration\") \n ),\n Text(self.config.get_local_text('discord_integration_settings'), size=17, weight=FontWeight.BOLD)\n ]\n ),\n Row(\n controls=[\n Switch(\n thumb_color=REDEMPTION if self.config.lastConfig[\"settings\"][\"discord_shield_password_view\"] == True else WHITE,\n inactive_thumb_color=FG, \n inactive_track_color=BG, \n track_color=BG,\n value=self.config.lastConfig[\"settings\"][\"discord_shield_password_view\"],\n on_change=lambda e: settings_change(e, \"discord_shield_password_view\") \n ),\n Text(self.config.get_local_text('discord_rpc_password_view'), size=17, weight=FontWeight.BOLD)\n ]\n ),\n Dropdown(\n on_change=language_dropdown_change,\n label=self.config.get_local_text('language_dropdown_label'),\n hint_text=self.config.get_local_text('language_long_name'),\n filled=REDEMPTION,\n focused_border_color=REDEMPTION,\n label_style=TextStyle(color=WHITE, weight=FontWeight.BOLD),\n focused_bgcolor=BG,\n color=WHITE,\n text_style=TextStyle(color=WHITE, weight=FontWeight.BOLD),\n width=300,\n options=self.languages,\n autofocus=True\n ),\n self.program_restart\n ]\n )\n \n self.shield_password = TextField(\n label=self.config.get_local_text('lobby_password_label'),\n color=WHITE, \n border_color=REDEMPTION,\n hint_text=self.config.get_local_text('lobby_password_hint_text'),\n label_style=TextStyle(color=WHITE, weight=FontWeight.BOLD),\n text_style=TextStyle(color=WHITE, weight=FontWeight.BOLD),\n focused_border_color=REDEMPTION,\n autofocus=True\n )\n \n if self.config.lastConfig[\"settings\"][\"shield_password\"] is not None:\n self.shield_password.value = self.config.lastConfig[\"settings\"][\"shield_password\"]\n \n if self.config.lastConfig[\"settings\"][\"shield_status\"] == True:\n self.shield_password.disabled = True\n \n self.shield = Column(\n visible=False,\n alignment=\"center\",\n horizontal_alignment=\"center\",\n spacing=90,\n width=700,\n controls=[\n self.shield_password,\n Tooltip(\n text_align=\"center\",\n message=self.config.get_local_text(\"shield_switch_tooltip\"),\n content=Switch(\n thumb_color=REDEMPTION if self.config.lastConfig[\"settings\"][\"shield_status\"] == True else WHITE,\n inactive_thumb_color=FG, \n inactive_track_color=BG, \n track_color=BG,\n value=self.config.lastConfig[\"settings\"][\"shield_status\"],\n on_change=shield_change\n ),\n padding=10,\n border_radius=10,\n text_style=TextStyle(size=10, color=colors.WHITE),\n )\n ]\n )\n \n self.progressbar = ProgressBar(width=700, bgcolor=BG, color=REDEMPTION)\n \n self.progressbar_container = Container(\n content=self.progressbar\n )\n \n self.remaining_time = Text(weight=FontWeight.BOLD, value=f\"0 {self.config.get_local_text('minute')}, 0 {self.config.get_local_text('second')}\", color=WHITE, size=17)\n \n self.remaining_time_container = Container(\n content=self.remaining_time\n )\n \n self.control_progressbar = ProgressBar(width=700, bgcolor=BG, color=REDEMPTION)\n \n self.control_progressbar_container = Container(\n content=self.control_progressbar\n )\n \n self.control_label = Text(weight=FontWeight.BOLD, value=self.config.get_local_text('controls_regressing'), color=WHITE, size=20)\n \n self.control_label_container = Container(\n content=self.control_label\n )\n \n self.percent = Text(weight=FontWeight.BOLD, value=\"%0.00\", color=WHITE, size=17)\n \n self.percent_container = Container(\n content=self.percent\n )\n \n self.gold_bar = Container(\n width=220,\n height=150,\n image_src=Functions().get_asset(\"0.00.png\"),\n image_fit=ImageFit.FILL,\n )\n \n self.minute_part = Column(\n visible=False,\n alignment=\"center\",\n horizontal_alignment=\"center\",\n spacing=30,\n controls=[\n self.gold_bar,\n Container(\n padding=padding.only(left=20, right=20, bottom=0, top=0),\n content=Row(\n alignment=\"spaceBetween\",\n controls=[\n self.remaining_time_container,\n self.percent_container\n ] \n )\n ),\n self.progressbar_container\n ]\n )\n \n self.control = Column(\n alignment=\"center\",\n horizontal_alignment=\"center\",\n spacing=30,\n controls=[\n Container(\n padding=padding.only(left=20, right=20, bottom=0, top=0),\n content=Row(\n alignment=\"center\",\n controls=[\n self.control_label_container,\n ] \n )\n ),\n self.control_progressbar_container\n ]\n )\n \n self.timer_stop_container = Tooltip(\n visible=False,\n text_align=\"center\",\n message=self.config.get_local_text('timer_stop_tooltip'),\n content=Container(\n width=50,\n bgcolor=REDEMPTION,\n border_radius=10,\n height=50,\n content=Icon(icons.STOP, color=WHITE),\n on_click=stop_click\n ),\n padding=10,\n border_radius=10,\n text_style=TextStyle(size=10, color=colors.WHITE),\n )\n \n self.timer_container = Tooltip(\n text_align=\"center\",\n message=self.config.get_local_text('timer_start_tooltip'),\n content=Container(\n width=50,\n bgcolor=REDEMPTION,\n border_radius=10,\n height=50,\n content=Icon(icons.PLAY_ARROW, color=WHITE),\n on_click=start_click\n ),\n padding=10,\n border_radius=10,\n text_style=TextStyle(size=10, color=colors.WHITE),\n )\n \n self.shield_container = Tooltip(\n text_align=\"center\",\n message=self.config.get_local_text('shield_tooltip'),\n content=Container(\n width=50,\n bgcolor=BG,\n border_radius=10,\n height=50,\n content=Icon(icons.SHIELD, color=WHITE),\n on_click=shield_click\n ),\n padding=10,\n border_radius=10,\n text_style=TextStyle(size=10, color=colors.WHITE),\n )\n \n self.settings_container = Tooltip(\n text_align=\"center\",\n message=self.config.get_local_text('settings_tooltip'),\n content=Container(\n width=50,\n bgcolor=BG,\n border_radius=10,\n height=50,\n content=Icon(icons.SETTINGS, color=WHITE),\n on_click=settings_click\n ),\n padding=10,\n border_radius=10,\n text_style=TextStyle(size=10, color=colors.WHITE),\n )\n \n self.rail = Container(\n visible=False,\n content=Row(\n alignment=\"center\",\n controls=[\n Container(\n bgcolor=FG,\n border_radius=10,\n padding=10,\n width=200,\n content=Row(\n spacing=10,\n alignment=\"center\",\n controls=[\n self.timer_stop_container,\n self.timer_container,\n self.shield_container,\n self.settings_container\n ]\n )\n )\n ]\n )\n )\n \n self.general_container = Container(\n height=490,\n border_radius=10,\n bgcolor=FG,\n content=Column(\n alignment=\"center\",\n controls=[\n self.control,\n self.select_minute,\n self.minute_part,\n self.shield,\n self.settings\n ] \n ),\n padding=50\n )\n \n self.content = Container(\n alignment=alignment.center,\n content=Column(\n spacing=20,\n controls=[\n self.general_container,\n self.rail\n ]\n )\n )"
},
{
"identifier": "Functions",
"path": "modules/functions.py",
"snippet": "class Functions:\n def __init__(self) -> None:\n pass\n \n def get_asset_dir(self):\n if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):\n imageDir = os.path.join(sys._MEIPASS)\n else:\n imageDir = os.getcwd()\n return imageDir\n \n def get_asset(self, assetName):\n return self.get_asset_dir()+ \"/assets/\" + assetName\n \n def visibler(self, element, status):\n element.visible = status\n element.update()\n \n def text_set(self, element, updateElement, text):\n element.value = text\n updateElement.update()"
},
{
"identifier": "Config",
"path": "modules/config.py",
"snippet": "class Config:\n def __init__(self):\n self.lastConfig = {}\n \n def config_loader(self):\n try:\n f = open(f'config.json', encoding=\"utf-8\") \n self.lastConfig = json.load(f)\n f = open(f'languages.json', encoding=\"utf-8\") \n self.lastConfig[\"languages\"] = json.load(f)[\"languages\"]\n return True\n except:\n return False\n \n def write_config(self):\n with open('config.json', 'w', encoding='utf-8') as f:\n json.dump({\"settings\": self.lastConfig[\"settings\"]}, f, ensure_ascii=False, indent=4)\n self.config_loader()\n\n def get_local_text(self, key):\n return self.lastConfig[\"languages\"][self.lastConfig[\"settings\"][\"language\"]][key]"
},
{
"identifier": "Process",
"path": "modules/process.py",
"snippet": "class Process:\n def __init__(self, process):\n self.process = process\n\n def getProcessStatus(self) -> str:\n try:\n for proc in psutil.process_iter():\n if proc.name() == self.process:\n return proc.cwd()\n return \"stopped\"\n except:\n return \"error\"\n\n def stopProcess(self):\n try:\n getStatus = self.getProcessStatus()\n if getStatus != \"stopped\" and getStatus != \"error\":\n subprocess.check_output(f\"taskkill /im {self.process}\", shell=False, stderr=subprocess.STDOUT)\n return \"stopped\"\n else:\n return \"error\"\n except:\n return \"error\""
},
{
"identifier": "Modal",
"path": "modules/modal.py",
"snippet": "class Modal:\n def __init__(self, e, title, message, actions=None) -> None:\n self.e = e\n self.dlg_modal = AlertDialog(\n modal=True,\n title=Text(title),\n content=Text(message),\n actions_alignment=MainAxisAlignment.END\n )\n if actions is not None:\n for action in actions:\n self.dlg_modal.actions.append(action)\n \n def close_dlg(self, event=None):\n self.dlg_modal.open = False\n self.e.page.update()\n \n def open_dlg(self):\n self.e.page.dialog = self.dlg_modal\n self.dlg_modal.open = True\n self.e.page.update()\n \n def set_actions(self, actions=None):\n if actions is not None:\n for action in actions:\n self.dlg_modal.actions.append(action)"
},
{
"identifier": "Discord",
"path": "modules/discord.py",
"snippet": "class Discord(threading.Thread):\n def __init__(self, content, config):\n threading.Thread.__init__(self)\n self.content = content\n self.config = config\n self.clientID = \"1177622601808490576\"\n self.rpc = Presence(self.clientID, pipe=0)\n self.buttons = [\n {\n \"label\": self.config.get_local_text('rpc_download_software'), \n \"url\": \"https://github.com/oniyevski/oniRedemption/releases\"\n }, \n {\n \"label\": self.config.get_local_text('rpc_author_page'), \n \"url\": \"https://oniyevski.pw/\"\n }\n ]\n self.epochTime = 0\n self.epochFix = False\n \n def rpc_connect(self):\n try:\n self.rpc.connect()\n return True\n except:\n return False\n \n def start_rpc(self):\n while True:\n if self.config.lastConfig[\"settings\"][\"discord_integration\"]: \n try:\n if self.content.page == \"start\":\n self.epochFix = False\n self.rpc.update(\n details=self.config.get_local_text('rpc_homepage'),\n large_image=\"logo\",\n buttons=self.buttons\n )\n elif self.content.page == \"shield\":\n self.epochFix = False\n if self.config.lastConfig[\"settings\"][\"shield_status\"] and self.config.lastConfig[\"settings\"][\"shield_password\"] is not None:\n self.rpc.update(\n details=f\"{self.config.get_local_text('rpc_shield_password')} \" + (self.config.lastConfig[\"settings\"][\"shield_password\"] if self.config.lastConfig[\"settings\"][\"discord_shield_password_view\"] and self.config.lastConfig[\"settings\"][\"shield_password\"] is None else str(self.config.lastConfig[\"settings\"][\"shield_password\"])[:2]+\"********\"),\n state=f\"{self.config.get_local_text('rpc_shield_status')} \" + (self.config.get_local_text('rpc_active') if self.config.lastConfig[\"settings\"][\"shield_status\"] else self.config.get_local_text('rpc_deactive')),\n large_image=\"logo\",\n buttons=self.buttons\n )\n else:\n self.rpc.update(\n details=f\"{self.config.get_local_text('rpc_shield_status')} \" + self.config.get_local_text('rpc_deactive'),\n large_image=\"logo\",\n buttons=self.buttons\n )\n elif self.content.page == \"settings\":\n self.epochFix = False\n self.rpc.update(\n details=self.config.get_local_text('rpc_settings'),\n large_image=\"logo\",\n buttons=self.buttons,\n )\n elif self.content.page == \"chronometer\":\n if self.content.whileBreak:\n self.epochFix = False\n self.rpc.update(\n details=self.config.get_local_text('rpc_mission_completed'),\n state=self.config.get_local_text(f'{self.content.minute}_minute'),\n large_image=\"logo\",\n buttons=self.buttons\n )\n else:\n if self.epochFix == False:\n self.epochTime = int(time.time())\n self.epochFix = True\n self.rpc.update(\n details=self.config.get_local_text('rpc_on_mission'),\n state=self.config.get_local_text(f'{self.content.minute}_minute'),\n large_image=\"logo\",\n buttons=self.buttons,\n start=self.epochTime\n )\n except:\n pass\n time.sleep(5)\n else:\n time.sleep(2)"
}
] | import os, threading
from time import sleep
from flet import *
from ui.colors import *
from ui.header import Header
from ui.footer import Footer
from ui.content import Content
from modules.functions import Functions
from modules.config import Config
from modules.process import Process
from modules.modal import Modal
from modules.discord import Discord | 8,041 |
config = Config()
headerClass = Header(config)
footerClass = Footer(config)
contentClass = Content(config)
func = Functions()
|
config = Config()
headerClass = Header(config)
footerClass = Footer(config)
contentClass = Content(config)
func = Functions()
| discord = Discord(content=contentClass, config=config) | 7 | 2023-11-24 21:15:24+00:00 | 12k |
davecasp/add-thin | add_thin/config.py | [
{
"identifier": "DataModule",
"path": "add_thin/data.py",
"snippet": "class DataModule(pl.LightningDataModule):\n \"\"\"\n Datamodule for variable length event sequences for temporal point processes.\n\n Parameters:\n ----------\n root : str\n Path to data.\n name : str\n Name of dataset.\n split_seed : int\n Seed for random split.\n batch_size : int\n Batch size.\n train_size : float\n Percentage of data to use for training.\n val_size : float\n Percentage of data to use for validation.\n forecast : bool\n Whether to use the dataset for forecasting.\n \"\"\"\n\n def __init__(\n self,\n root: Path,\n name: str,\n split_seed: int = 80672983,\n batch_size: int = 32,\n train_size: float = 0.6,\n val_size: float = 0.2,\n forecast: bool = False,\n ) -> None:\n super().__init__()\n self.root = root\n self.split_seed = split_seed\n self.batch_size = batch_size\n self.train_percentage = train_size\n self.val_percentage = val_size\n self.name = name\n self.forecast = forecast\n\n self.dataset = None\n self.train_data = None\n self.val_data = None\n self.test_data = None\n\n def prepare_data(self) -> None:\n \"\"\"Load sequence data from root.\"\"\"\n time_sequences = load_sequences(self.root, self.name)\n\n if self.forecast:\n self.forecast_horizon = FORECAST_HORIZON[self.name]\n else:\n self.forecast_horizon = None\n\n self.dataset = SequenceDataset(sequences=time_sequences)\n self.tmax = self.dataset.tmax\n self.train_size = int(self.train_percentage * len(self.dataset))\n self.val_size = int(self.val_percentage * len(self.dataset))\n self.test_size = len(self.dataset) - (self.train_size + self.val_size)\n\n self.train_data, self.val_data, self.test_data = random_split(\n self.dataset,\n [self.train_size, self.val_size, self.test_size],\n generator=torch.Generator().manual_seed(self.split_seed),\n )\n\n self.get_statistics()\n\n def get_statistics(self):\n # Get train stats\n seq_lengths = []\n for i in range(len(self.train_data)):\n seq_lengths.append(len(self.train_data[i]))\n self.n_max = max(seq_lengths)\n\n def setup(self, stage=None) -> None:\n pass\n\n def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_data,\n batch_size=self.batch_size,\n collate_fn=Batch.from_sequence_list,\n num_workers=0,\n shuffle=True,\n )\n\n def val_dataloader(self) -> DataLoader:\n return DataLoader(\n self.val_data,\n batch_size=len(self.val_data), # evaluate all at once\n collate_fn=Batch.from_sequence_list,\n num_workers=0,\n drop_last=False,\n )\n\n def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_data,\n batch_size=len(self.test_data), # evaluate all at once\n collate_fn=Batch.from_sequence_list,\n num_workers=0,\n drop_last=False,\n )"
},
{
"identifier": "AddThin",
"path": "add_thin/diffusion/model.py",
"snippet": "class AddThin(DiffusionModell):\n \"\"\"\n Implementation of AddThin (Add and Thin: Diffusion for Temporal Point Processes).\n\n Parameters\n ----------\n classifier_model : nn.Module\n Model for predicting the intersection of x_0 and x_n from x_n\n intensity_model : nn.Module\n Model for predicting the intensity of x_0 without x_n\n max_time : float\n T of the temporal point process\n n_max : int, optional\n Maximum number of events, by default 100\n steps : int, optional\n Number of diffusion steps, by default 100\n hidden_dims : int, optional\n Hidden dimensions of the models, by default 128\n emb_dim : int, optional\n Embedding dimensions of the models, by default 32\n encoder_layer : int, optional\n Number of encoder layers, by default 4\n kernel_size : int, optional\n Kernel size of the CNN, by default 16\n forecast : None, optional\n If not None, will turn the model into a conditional one for forecasting\n \"\"\"\n\n def __init__(\n self,\n classifier_model,\n intensity_model,\n max_time: float,\n n_max: int = 100,\n steps: int = 100,\n hidden_dims: int = 128,\n emb_dim: int = 32,\n encoder_layer: int = 4,\n kernel_size: int = 16,\n forecast=None,\n ) -> None:\n super().__init__(steps)\n # Set models parametrizing the approximate posterior\n self.classifier_model = classifier_model\n self.intensity_model = intensity_model\n\n self.n_max = n_max\n\n # Init forecast settings\n if forecast:\n self.forecast = True\n self.history_encoder = nn.GRU(\n input_size=emb_dim,\n hidden_size=emb_dim,\n batch_first=True,\n )\n self.history_mlp = nn.Sequential(\n nn.Linear(2 * emb_dim, hidden_dims), nn.ReLU()\n )\n self.forecast_window = forecast\n else:\n self.forecast = False\n self.history = None\n\n self.set_encoders(\n hidden_dims=hidden_dims,\n max_time=max_time,\n emb_dim=emb_dim,\n encoder_layer=encoder_layer,\n kernel_size=kernel_size,\n steps=steps,\n )\n\n def set_encoders(\n self,\n hidden_dims: int,\n max_time: float,\n emb_dim: int,\n encoder_layer: int,\n kernel_size: int,\n steps: int,\n ) -> None:\n \"\"\"\n Set the encoders for the model.\n\n Parameters\n ----------\n hidden_dims : int\n Hidden dimensions of the models\n max_time : float\n T of the temporal point process\n emb_dim : int\n Embedding dimensions of the models\n encoder_layer : int\n Number of encoder layers\n kernel_size : int\n Kernel size of the CNN\n steps : int\n Number of diffusion steps\n \"\"\"\n # Event time encoder\n position_emb = NyquistFrequencyEmbedding(\n dim=emb_dim // 2, timesteps=max_time\n )\n self.time_encoder = nn.Sequential(position_emb)\n\n # Diffusion time encoder\n position_emb = NyquistFrequencyEmbedding(dim=emb_dim, timesteps=steps)\n self.diffusion_time_encoder = nn.Sequential(\n position_emb,\n nn.Linear(emb_dim, emb_dim),\n nn.GELU(),\n nn.Linear(emb_dim, emb_dim),\n )\n\n # Event sequence encoder\n self.sequence_encoder = CNNSeqEmb(\n emb_layer=encoder_layer,\n input_dim=hidden_dims,\n emb_dims=hidden_dims,\n kernel_size=kernel_size,\n )\n\n def set_history(self, batch: Batch) -> None:\n \"\"\"\n Set the history to condition the model.\n\n Parameters\n ----------\n batch : Batch\n Batch of data\n \"\"\"\n B, L = batch.time.shape\n\n # Encode event times\n time_emb = self.time_encoder(\n torch.cat(\n [batch.time.unsqueeze(-1), batch.tau.unsqueeze(-1)], dim=-1\n )\n ).reshape(B, L, -1)\n\n # Compute history embedding\n embedding = self.history_encoder(time_emb)[0]\n\n # Index relative to time and set history\n index = (batch.mask.sum(-1).long() - 1).unsqueeze(-1).unsqueeze(-1)\n gather_index = index.repeat(1, 1, embedding.shape[-1])\n self.history = embedding.gather(1, gather_index).squeeze(-2)\n\n def compute_emb(\n self, n: TensorType[torch.long, \"batch\"], x_n: Batch\n ) -> Tuple[\n TensorType[\"batch\", \"embedding\"],\n TensorType[\"batch\", \"sequence\", \"embedding\"],\n TensorType[\"batch\", \"sequence\", \"embedding\"],\n ]:\n \"\"\"\n Get the embeddings of x_n.\n\n Parameters\n ----------\n n : TensorType[torch.long, \"batch\"]\n Diffusion time step\n x_n : Batch\n Batch of data\n\n Returns\n -------\n Tuple[\n TensorType[\"batch\", \"embedding\"],\n TensorType[\"batch\", \"sequence\", \"embedding\"],\n TensorType[\"batch\", \"sequence\", \"embedding\"],\n ]\n Diffusion time embedding, event time embedding, event sequence embedding\n \"\"\"\n B, L = x_n.batch_size, x_n.seq_len\n\n # embed diffusion and process time\n dif_time_emb = self.diffusion_time_encoder(n)\n\n # Condition ADD-THIN on history by adding it to the diffusion time embedding\n if self.forecast:\n dif_time_emb = self.history_mlp(\n torch.cat([self.history, dif_time_emb], dim=-1)\n )\n\n # Embed event and interevent time\n time_emb = self.time_encoder(\n torch.cat([x_n.time.unsqueeze(-1), x_n.tau.unsqueeze(-1)], dim=-1)\n ).reshape(B, L, -1)\n\n # Embed event sequence and mask out\n event_emb = self.sequence_encoder(time_emb)\n event_emb = event_emb * x_n.mask[..., None]\n\n return (\n dif_time_emb,\n time_emb,\n event_emb,\n )\n\n def get_n(self, shape, device, min=None, max=None) -> TensorType[int]:\n \"\"\"\n Uniformly sample n, i.e., the diffusion time step per sequence.\n\n Parameters\n ----------\n shape :\n Shape of the tensor\n device :\n Device of the tensor\n min : None, optional\n Minimum value of n, by default None\n max : None, optional\n Maximum value of n, by default None\n\n Returns\n -------\n TensorType[int]\n Sampled n\n \"\"\"\n if min is None or max is None:\n min = 0\n max = self.steps\n return torch.randint(\n min,\n max,\n size=shape,\n device=device,\n dtype=torch.long,\n )\n\n def noise(\n self, x_0: Batch, n: TensorType[torch.long, \"batch\"]\n ) -> Tuple[Batch, Batch]:\n \"\"\"\n Sample x_n from x_0 by applying the noising process.\n\n Parameters\n ----------\n x_0 : Batch\n Batch of data\n n : TensorType[torch.long, \"batch\"]\n Number of noise steps\n\n Returns\n -------\n Tuple[Batch, Batch]\n x_n and thinned x_0\n \"\"\"\n # Thin x_0\n x_0_kept, x_0_thinned = x_0.thin(alpha=self.alpha_cumprod[n])\n\n # Superposition with HPP (add)\n hpp = generate_hpp(\n tmax=x_0.tmax,\n n_sequences=len(x_0),\n intensity=1 - self.alpha_cumprod[n],\n )\n x_n = x_0_kept.add_events(hpp)\n\n return x_n, x_0_thinned\n\n def forward(\n self, x_0: Batch\n ) -> Tuple[\n TensorType[float, \"batch\", \"sequence_x_n\"],\n TensorType[float, \"batch\"],\n Batch,\n ]:\n \"\"\"\n Forward pass to train the model, i.e., predict x_0 from x_n.\n\n Parameters\n ----------\n x_0 : Batch\n Batch of data\n\n Returns\n -------\n Tuple[\n TensorType[float, \"batch\", \"sequence_x_n\"],\n TensorType[float, \"batch\"],\n Batch,\n ]\n classification logits, log likelihood of x_0 without x_n, noised data\n \"\"\"\n # Uniformly sample n\n n = self.get_n(\n min=0,\n max=self.steps,\n shape=(len(x_0),),\n device=x_0.time.device,\n )\n # Noise x_0 to get x_n\n x_n, x_0_thin = self.noise(x_0=x_0, n=n)\n\n # Embed x_n\n (dif_time_emb, time_emb, event_emb) = self.compute_emb(n=n, x_n=x_n)\n\n # Predict x_0 from x_n\n x_n_and_x_0_logits = self.classifier_model(\n dif_time_emb=dif_time_emb,\n time_emb=time_emb,\n event_emb=event_emb,\n )\n\n # Evaluate intensity of thinned x_0\n log_like_x_0 = self.intensity_model.log_likelihood(\n event_emb=event_emb,\n dif_time_emb=dif_time_emb,\n x_0=x_0_thin,\n x_n=x_n,\n )\n\n return x_n_and_x_0_logits, log_like_x_0, x_n\n\n def sample(self, n_samples: int, tmax) -> Batch:\n \"\"\"\n Sample x_0 from ADD-THIN starting from x_N.\n\n Parameters\n ----------\n n_samples : int\n Number of samples\n tmax : float\n T of the temporal point process\n begin_forecast : None, optional\n Beginning of the forecast, by default None\n end_forecast : None, optional\n End of the forecast, by default None\n\n Returns\n -------\n Batch\n Sampled x_0s\n \"\"\"\n # Init x_N by sampling from HPP\n x_N = generate_hpp(tmax=tmax, n_sequences=n_samples)\n x_n_1 = x_N\n\n # Sample x_N-1, ..., x_1 by applying posterior\n for n_int in range(self.steps - 1, 0, -1):\n n = torch.full(\n (n_samples,), n_int, device=tmax.device, dtype=torch.long\n )\n x_n_1 = self.sample_posterior(x_n=x_n_1, n=n)\n\n # Sample x_0\n n = torch.full(\n (n_samples,), n_int - 1, device=tmax.device, dtype=torch.long\n )\n x_0, _, _, _ = self.sample_x_0(n=n, x_n=x_n_1)\n\n return x_0\n\n def sample_x_0(\n self, n: TensorType[int], x_n: Batch\n ) -> Tuple[Batch, Batch, Batch, Batch]:\n \"\"\"\n Sample x_0 from x_n by classifying the intersection of x_0 and x_n and sampling from the intensity.\n\n Parameters\n ----------\n n : TensorType[int]\n Diffusion time steps\n x_n : Batch\n Batch of data\n\n Returns\n -------\n Tuple[Batch, Batch, Batch, Batch]\n x_0, classified_x_0, sampled_x_0, classified_not_x_0\n \"\"\"\n (\n dif_time_emb,\n time_emb,\n event_emb,\n ) = self.compute_emb(n=n, x_n=x_n)\n\n # Sample x_0\\x_n from intensity\n sampled_x_0 = self.intensity_model.sample(\n event_emb=event_emb,\n dif_time_emb=dif_time_emb,\n n_samples=1,\n x_n=x_n,\n )\n\n # Classify (x_0 ∩ x_n) from x_n\n x_n_and_x_0_logits = self.classifier_model(\n dif_time_emb=dif_time_emb, time_emb=time_emb, event_emb=event_emb\n )\n classified_x_0, classified_not_x_0 = x_n.thin(\n alpha=x_n_and_x_0_logits.sigmoid()\n )\n return (\n classified_x_0.add_events(sampled_x_0),\n classified_x_0,\n sampled_x_0,\n classified_not_x_0,\n )\n\n def sample_posterior(self, x_n: Batch, n: TensorType[int]) -> Batch:\n \"\"\"\n Sample x_n-1 from x_n by predicting x_0 and then sampling from the posterior.\n\n Parameters\n ----------\n x_n : Batch\n Batch of data\n n : TensorType\n Diffusion time steps\n\n Returns\n -------\n Batch\n x_n-1\n \"\"\"\n # Sample x_0 and x_n\\x_0\n _, classified_x_0, sampled_x_0, classified_not_x_0 = self.sample_x_0(\n n=n, x_n=x_n\n )\n\n # Sample C\n x_0_kept, _ = sampled_x_0.thin(alpha=self.alpha_x0_kept[n - 1])\n\n # Sample D\n hpp = generate_hpp(\n tmax=x_n.tmax,\n n_sequences=x_n.batch_size,\n intensity=self.add_remove[n - 1],\n )\n\n # Sample E\n x_n_kept, _ = classified_not_x_0.thin(alpha=self.alpha_xn_kept[n - 1])\n\n # Superposition of B, C, D, E to attain x_n-1\n x_n_1 = (\n classified_x_0.add_events(hpp)\n .add_events(x_n_kept)\n .add_events(x_0_kept)\n )\n return x_n_1"
},
{
"identifier": "PointClassifier",
"path": "add_thin/backbones/classifier.py",
"snippet": "class PointClassifier(nn.Module):\n \"\"\"\n Classifier to predict the intersection of x_0 and x_n given x_n.\n\n Parameters:\n ----------\n\n hidden_dims : int\n Number of hidden dimensions\n layer : int\n Number of layers\n \"\"\"\n\n def __init__(\n self,\n hidden_dims: int,\n layer: int,\n ) -> None:\n super().__init__()\n input_dim = 3 * hidden_dims\n\n # Instantiate MLP for the classifier\n layers = [nn.Linear(input_dim, hidden_dims), nn.ReLU()]\n for _ in range(layer - 1):\n layers.append(nn.Linear(hidden_dims, hidden_dims))\n layers.append(nn.ReLU())\n layers.append(nn.Linear(hidden_dims, 1))\n self.model = nn.Sequential(*layers)\n\n def forward(\n self,\n dif_time_emb: TensorType[float, \"batch\", \"embedding\"],\n time_emb: TensorType[float, \"batch\", \"sequence\", \"time_emb\"],\n event_emb: TensorType[\"batch\", \"sequence\", \"embedding\"],\n ) -> TensorType[float, \"batch\", \"sequence\"]:\n \"\"\"\n Parameters:\n ----------\n dif_time_emb : TensorType[float, \"batch\", \"embedding\"]\n Embedding of the diffusion time\n time_emb : TensorType[float, \"batch\", \"sequence\", \"time_emb\"]\n Embedding of the event times\n event_emb : TensorType[\"batch\", \"sequence\", \"embedding\"]\n Context embedding of the events\n\n Returns:\n -------\n logits : TensorType[float, \"batch\", \"sequence\"]\n Logits for each event in the sequences\n \"\"\"\n # Concatenate embeddings\n _, L, _ = time_emb.shape\n x = torch.cat(\n [\n time_emb,\n event_emb,\n dif_time_emb.unsqueeze(1).repeat(1, L, 1),\n ],\n dim=-1,\n )\n\n logits = self.model(x).squeeze(-1)\n return logits"
},
{
"identifier": "MixtureIntensity",
"path": "add_thin/distributions/intensities.py",
"snippet": "class MixtureIntensity(nn.Module):\n \"\"\"\n Class parameterizing the intensity function as a weighted mixture of distributions.\n\n Parameters:\n ----------\n n_components : int, optional\n Number of components to use in the mixture, by default 10\n embedding_size : int, optional\n Size of the event embedding, by default 128\n distribution : str, optional\n Distribution to use for the components, by default \"normal\"\n\n \"\"\"\n\n def __init__(\n self,\n n_components: int = 10,\n embedding_size: int = 128,\n distribution: str = \"normal\",\n ) -> None:\n super().__init__()\n\n assert (\n distribution in DISTRIBUTIONS.keys()\n ), f\"{distribution} not in {DISTRIBUTIONS.keys()}\"\n self.w_activation = torch.nn.Softplus()\n self.distribution = DISTRIBUTIONS[distribution]\n\n # Parallel compute parameters weight, mu and sigma for n components with one MLP\n self.n_components = n_components\n self.mlp = nn.Sequential(\n nn.Linear(embedding_size, embedding_size),\n nn.ReLU(),\n nn.Linear(embedding_size, 3 * n_components),\n )\n self.rejections_sample_multiple = 2\n\n def get_intensity_parameters(\n self,\n x_n: Batch,\n event_emb: TensorType[float, \"batch\", \"seq\", \"embedding\"],\n dif_time_emb: TensorType[float, \"batch\", \"embedding\"],\n ) -> Tuple[TensorType, TensorType, TensorType]:\n \"\"\"\n Compute the parameters of the intensity function.\n\n Parameters:\n ----------\n x_n : Batch\n Batch of event sequences to condition on\n event_emb : TensorType[float, \"batch\", \"seq\", \"embedding\"]\n Context embedding of the events\n dif_time_emb : TensorType[float, \"batch\", \"embedding\"]\n Embedding of the diffusion time\n\n Returns:\n -------\n location, scale, weight: List[TensorType]\n The parameters of the intensity function\n \"\"\"\n\n # Compute masked mean over sequence (zero padded)\n n_events = x_n.mask.sum(-1)\n seq_emb = event_emb.sum(1) / torch.clamp(n_events[..., None], min=1)\n\n parameters = self.mlp(torch.cat([seq_emb, dif_time_emb], dim=-1))\n return torch.split(\n parameters,\n [self.n_components, self.n_components, self.n_components],\n dim=-1,\n )\n\n def get_distribution(\n self,\n event_emb: TensorType[float, \"batch\", \"seq\", \"embedding\"],\n dif_time_emb: TensorType[float, \"batch\", \"embedding\"],\n x_n: Batch,\n L,\n ) -> Tuple[D.MixtureSameFamily, TensorType[float, \"batch\"]]:\n \"\"\"\n Instantiate the mixture-distribution parameterizing the intensity function.\n\n Parameters:\n ----------\n event_emb : TensorType[float, \"batch\", \"seq\", \"embedding\"]\n Context embedding of the events\n dif_time_emb : TensorType[float, \"batch\", \"embedding\"]\n Embedding of the diffusion time\n x_n : Batch\n Batch of event sequences to condition on\n L : int\n Maximum sequence length\n\n Returns:\n -------\n density, cumulative_intensity: Tuple[D.MixtureSameFamily, TensorType[float, \"batch\"]]\n The distribution and the cumulative intensity\n \"\"\"\n location, scale, weight = self.get_intensity_parameters(\n x_n=x_n,\n event_emb=event_emb,\n dif_time_emb=dif_time_emb,\n )\n\n # Include the number of events in x_n for the cumulative intensity\n weight = self.w_activation(weight)\n cumulative_intensity = (weight).sum(-1) * (x_n.mask.sum(-1) + 1)\n\n # Probs is normalized to sum to 1\n mixture_dist = D.Categorical(probs=weight.unsqueeze(1).repeat(1, L, 1))\n\n # Distribution parameters are the same for each sequence element\n component_dist = self.distribution(\n location.unsqueeze(1).repeat(1, L, 1),\n scale.unsqueeze(1).repeat(1, L, 1),\n )\n return (\n MixtureSameFamily(mixture_dist, component_dist),\n cumulative_intensity,\n )\n\n def log_likelihood(\n self,\n x_0: Batch,\n event_emb: TensorType[float, \"batch\", \"seq\", \"embedding\"],\n dif_time_emb: TensorType[float, \"batch\", \"embedding\"],\n x_n: Batch,\n ) -> TensorType[float, \"batch\"]:\n \"\"\"\n Compute the log-likelihood of the event sequences.\n\n Parameters:\n ----------\n x_0 : Batch\n Batch of event sequences\n event_emb : TensorType[float, \"batch\", \"seq\", \"embedding\"]\n Context embedding of the events\n dif_time_emb : TensorType[float, \"batch\", \"embedding\"]\n Embedding of the diffusion time\n x_n : Batch\n Batch of event sequences to condition on\n\n Returns:\n -------\n log_likelihood: TensorType[float, \"batch\"]\n The log-likelihood of the event sequences\n \"\"\"\n density, cif = self.get_distribution(\n event_emb=event_emb,\n dif_time_emb=dif_time_emb,\n x_n=x_n,\n L=x_0.seq_len,\n )\n\n # Normalize event time to [0, 1]\n x = x_0.time / x_0.tmax\n\n # Compute log-intensity with re-weighting\n log_intensity = (\n (density.log_prob(x) + torch.log(cif)[..., None]) * x_0.mask\n ).sum(-1)\n\n # Compute CIF for normalization\n cdf = density.cdf(torch.ones_like(x)).mean(1)\n cif = cif * cdf # Rescale between 0 and T\n\n return log_intensity - cif\n\n def sample(\n self,\n event_emb: TensorType[float, \"batch\", \"seq\", \"embedding\"],\n dif_time_emb: TensorType[float, \"batch\", \"embedding\"],\n n_samples: int,\n x_n: Batch,\n ) -> Batch:\n \"\"\"\n Sample event sequences from the intensity function.\n\n Parameters:\n ----------\n event_emb : TensorType[float, \"batch\", \"seq\", \"embedding\"]\n Context embedding of the events\n dif_time_emb : TensorType[float, \"batch\", \"embedding\"]\n Embedding of the diffusion time\n n_samples : int\n Number of samples to draw\n x_n : Batch\n Batch of event sequences to condition on\n\n Returns:\n -------\n Batch\n The sampled event sequences\n \"\"\"\n tmax = x_n.tmax\n density, cif = self.get_distribution(\n event_emb=event_emb,\n dif_time_emb=dif_time_emb,\n x_n=x_n,\n L=1,\n )\n\n # Get number of points per sample sequence from CIF\n count_distribution = D.Poisson(\n rate=cif\n * density.cdf(\n torch.ones(n_samples, 1, device=event_emb.device)\n ).squeeze()\n )\n sequence_len = (\n count_distribution.sample((n_samples,)).squeeze()\n ).long()\n\n # TODO implement smarter truncated normal, without rejection sampling.\n max_seq_len = sequence_len.max()\n\n while True:\n times = (\n density.sample(\n ((max_seq_len + 1) * self.rejections_sample_multiple,)\n )\n .squeeze(-1)\n .T\n * tmax\n )\n\n # Reject if not in [0, tmax]\n inside = torch.logical_and(times <= tmax, times >= 0)\n sort_idx = torch.argsort(\n inside.int(), stable=True, descending=True, dim=-1\n )\n inside = torch.take_along_dim(inside, sort_idx, dim=-1)[\n :, :max_seq_len\n ]\n times = torch.take_along_dim(times, sort_idx, dim=-1)[\n :, :max_seq_len\n ]\n\n # Randomly mask out events exceeding the actual sequence length\n mask = (\n torch.arange(0, times.shape[-1], device=times.device)[None, :]\n < sequence_len[:, None]\n )\n mask = mask * inside\n\n if (mask.sum(-1) == sequence_len).all():\n break\n else:\n self.rejections_sample_multiple += 1\n warnings.warn(\n f\"\"\"\nRejection sampling multiple increased to {self.rejections_sample_multiple}, as not enough event times were inside [0, tmax].\n\"\"\".strip()\n )\n\n times = times * mask\n\n return Batch.remove_unnescessary_padding(\n time=times, mask=mask, tmax=tmax, kept=None\n )"
},
{
"identifier": "DensityEstimation",
"path": "add_thin/tasks.py",
"snippet": "class DensityEstimation(Tasks):\n def __init__(\n self, model, learning_rate, lr_decay, weight_decay, lr_schedule\n ):\n super().__init__(\n model, learning_rate, lr_decay, weight_decay, lr_schedule\n )\n\n def training_step(self, batch, batch_idx):\n loss = self.step(batch, \"train\")\n return {\"loss\": loss}\n\n def validation_step(self, batch, batch_idx):\n with torch.no_grad():\n if self.global_step >= 1:\n sample = self.model.sample(1000, tmax=batch.tmax).to_time_list()\n\n mmd = MMD(\n sample,\n batch.to_time_list(),\n batch.tmax.detach().cpu().numpy(),\n )[0]\n wasserstein = lengths_distribution_wasserstein_distance(\n sample,\n batch.to_time_list(),\n batch.tmax.detach().cpu().numpy(),\n self.model.n_max,\n )\n self.log(\"val/sample_mmd\", mmd, batch_size=batch.batch_size)\n self.log(\n \"val/sample_count_wasserstein\",\n wasserstein,\n batch_size=batch.batch_size,\n )\n\n def test_step(self, batch, batch_idx):\n pass"
},
{
"identifier": "Forecasting",
"path": "add_thin/tasks.py",
"snippet": "class Forecasting(Tasks):\n def __init__(\n self,\n model,\n learning_rate,\n lr_decay,\n weight_decay,\n lr_schedule,\n ):\n super().__init__(\n model, learning_rate, lr_decay, weight_decay, lr_schedule\n )\n\n def set_history(self, batch):\n # Sample random start time for forecast window\n times = (\n torch.rand((len(batch),), device=batch.tmax.device)\n * (batch.tmax - 2 * self.model.forecast_window)\n + self.model.forecast_window\n )\n # Get history, future, and bounds of forecast window\n history, future, forecast_end, forecast_start = batch.split_time(\n times, times + self.model.forecast_window\n )\n self.model.set_history(history)\n return future, forecast_end, forecast_start\n\n def training_step(self, batch, batch_idx):\n future, _, forecast_start = self.set_history(batch)\n\n # rescale forecast to [0, T], same for inter-event times tau\n future.time = (\n (future.time - forecast_start[:, None]) / self.model.forecast_window\n ) * future.tmax\n future.tau = (future.tau / (self.model.forecast_window)) * future.tmax\n\n loss = self.step(future, \"train\")\n return {\"loss\": loss}\n\n def validation_step(self, batch, batch_idx):\n if self.global_step >= 1:\n futures = []\n samples = []\n maes = []\n # sample 5 forecast horizons per batch\n for _ in range(5):\n future, tmax, tmin = self.set_history(batch)\n sample = self.model.sample(len(future), tmax=future.tmax)\n # rescale and shift to right forecast window\n sample.time = (sample.time / future.tmax) * (tmax - tmin)[\n :, None\n ] + tmin[:, None]\n samples = samples + sample.to_time_list()\n futures = futures + future.to_time_list()\n maes.append(\n torch.abs(future.mask.sum(-1) - sample.mask.sum(-1))\n / (future.mask.sum(-1) + 1)\n )\n\n wasserstein = forecast_wasserstein(\n samples,\n futures,\n batch.tmax.detach().cpu().item(),\n )\n\n self.log(\n \"val/MAE_counts\",\n torch.cat(maes).mean(),\n batch_size=batch.batch_size,\n )\n self.log(\n \"val/forecast_wasserstein_distance\",\n wasserstein,\n batch_size=batch.batch_size,\n )\n\n def test_step(self, batch, batch_idx):\n pass"
}
] | from pathlib import Path
from omegaconf import DictConfig
from add_thin.data import DataModule
from add_thin.diffusion.model import AddThin
from add_thin.backbones.classifier import PointClassifier
from add_thin.distributions.intensities import MixtureIntensity
from add_thin.tasks import DensityEstimation, Forecasting | 8,121 |
def instantiate_datamodule(config: DictConfig, task_name):
return DataModule(
Path(config.root),
config.name,
batch_size=config.batch_size,
forecast=task_name == "forecast",
)
def instantiate_model(config: DictConfig, datamodule) -> AddThin:
classifier = PointClassifier(
hidden_dims=config.hidden_dims,
layer=config.classifier_layer,
)
|
def instantiate_datamodule(config: DictConfig, task_name):
return DataModule(
Path(config.root),
config.name,
batch_size=config.batch_size,
forecast=task_name == "forecast",
)
def instantiate_model(config: DictConfig, datamodule) -> AddThin:
classifier = PointClassifier(
hidden_dims=config.hidden_dims,
layer=config.classifier_layer,
) | intensity = MixtureIntensity( | 3 | 2023-11-24 13:18:19+00:00 | 12k |
harisankar95/pathfinding3D | pathfinding3d/finder/msp.py | [
{
"identifier": "heuristic",
"path": "pathfinding3d/core/heuristic.py",
"snippet": "def null(dx: Union[int, float], dy: Union[int, float], dz: Union[int, float]) -> float:\ndef manhattan(dx: Union[int, float], dy: Union[int, float], dz: Union[int, float]) -> float:\ndef euclidean(dx: Union[int, float], dy: Union[int, float], dz: Union[int, float]) -> float:\ndef chebyshev(dx: Union[int, float], dy: Union[int, float], dz: Union[int, float]) -> float:\ndef octile(dx: Union[int, float], dy: Union[int, float], dz: Union[int, float]) -> float:"
},
{
"identifier": "Grid",
"path": "pathfinding3d/core/grid.py",
"snippet": "class Grid:\n def __init__(\n self,\n width: int = 0,\n height: int = 0,\n depth: int = 0,\n matrix: MatrixType = None,\n grid_id: Optional[int] = None,\n inverse: bool = False,\n ):\n \"\"\"\n A grid represents the map (as 3d-list of nodes).\n\n Parameters\n ----------\n width : int, optional\n The width of the grid.\n height : int, optional\n The height of the grid.\n depth : int, optional\n The depth of the grid.\n matrix : MatrixType\n A 3D array of values (numbers or objects specifying weight)\n that determine how nodes are connected and if they are walkable.\n If no matrix is given, all nodes will be walkable.\n inverse : bool, optional\n If true, all values in the matrix that are not 0 will be considered\n walkable. Otherwise all values that are 0 will be considered walkable.\n \"\"\"\n self.width, self.height, self.depth = self._validate_dimensions(width, height, depth, matrix)\n self.nodes = (\n build_nodes(self.width, self.height, self.depth, matrix, inverse, grid_id)\n if self.is_valid_grid()\n else [[[]]]\n )\n\n def _validate_dimensions(self, width: int, height: int, depth: int, matrix: MatrixType) -> tuple:\n if matrix is not None:\n if not (\n isinstance(matrix, (list, np.ndarray))\n and len(matrix) > 0\n and len(matrix[0]) > 0\n and len(matrix[0][0]) > 0\n ):\n raise ValueError(\"Provided matrix is not a 3D structure or is empty.\")\n return len(matrix), len(matrix[0]), len(matrix[0][0])\n return width, height, depth\n\n def is_valid_grid(self) -> bool:\n return self.width > 0 and self.height > 0 and self.depth > 0\n\n def node(self, x: int, y: int, z: int) -> Optional[GridNode]:\n \"\"\"\n Get node at position\n\n Parameters\n ----------\n x : int\n x position\n y : int\n y position\n z : int\n z position\n\n Returns\n -------\n GridNode\n node at position\n \"\"\"\n return self.nodes[x][y][z] if self.inside(x, y, z) else None\n\n def inside(self, x: int, y: int, z: int) -> bool:\n \"\"\"\n Check, if field position is inside map\n\n Parameters\n ----------\n x : int\n x position\n y : int\n y position\n z : int\n z position\n\n Returns\n -------\n bool\n True, if position is inside map\n \"\"\"\n return 0 <= x < self.width and 0 <= y < self.height and 0 <= z < self.depth\n\n def walkable(self, x: int, y: int, z: int) -> bool:\n \"\"\"\n Check, if the tile is inside grid and if it is set as walkable\n\n Parameters\n ----------\n x : int\n x position\n y : int\n y position\n z : int\n z position\n\n Returns\n -------\n bool\n True, if position is inside map and walkable\n \"\"\"\n return self.inside(x, y, z) and self.nodes[x][y][z].walkable\n\n def calc_cost(self, node_a: GridNode, node_b: GridNode, weighted: bool = False) -> float:\n \"\"\"\n Get the distance between current node and the neighbor (cost)\n\n Parameters\n ----------\n node_a : GridNode\n current node\n node_b : GridNode\n neighbor node\n weighted : bool, optional\n True, if weighted algorithm is used, by default False\n\n Returns\n -------\n float\n distance between current node and the neighbor (cost)\n \"\"\"\n # Check if we have a straight, diagonal in plane or diagonal in space\n dx = node_b.x - node_a.x\n dy = node_b.y - node_a.y\n dz = node_b.z - node_a.z\n\n ng = math.sqrt(dx * dx + dy * dy + dz * dz)\n\n # weight for weighted algorithms\n if weighted:\n ng *= node_b.weight\n\n return ng\n\n def neighbors(\n self,\n node: GridNode,\n diagonal_movement: int = DiagonalMovement.never,\n ) -> List[GridNode]:\n \"\"\"\n Get all neighbors of one node\n\n Parameters\n ----------\n node : GridNode\n node to get neighbors from\n diagonal_movement : int, optional\n if diagonal movement is allowed\n (see enum in diagonal_movement), by default DiagonalMovement.never\n\n Returns\n -------\n list\n list of neighbor nodes\n \"\"\"\n x, y, z = node.x, node.y, node.z\n\n neighbors = []\n # current plane\n cs0 = cd0 = cs1 = cd1 = cs2 = cd2 = cs3 = cd3 = False\n # upper plane\n us0 = ud0 = us1 = ud1 = us2 = ud2 = us3 = ud3 = ut = False # ut = upper top\n # lower plane\n ls0 = ld0 = ls1 = ld1 = ls2 = ld2 = ls3 = ld3 = lb = False # lb = lower bottom\n\n # -y\n if self.walkable(x, y - 1, z):\n neighbors.append(self.nodes[x][y - 1][z])\n cs0 = True\n\n # +x\n if self.walkable(x + 1, y, z):\n neighbors.append(self.nodes[x + 1][y][z])\n cs1 = True\n\n # +y\n if self.walkable(x, y + 1, z):\n neighbors.append(self.nodes[x][y + 1][z])\n cs2 = True\n\n # -x\n if self.walkable(x - 1, y, z):\n neighbors.append(self.nodes[x - 1][y][z])\n cs3 = True\n\n # +z\n if self.walkable(x, y, z + 1):\n neighbors.append(self.nodes[x][y][z + 1])\n ut = True\n\n # -z\n if self.walkable(x, y, z - 1):\n neighbors.append(self.nodes[x][y][z - 1])\n lb = True\n\n # check for connections to other grids\n if node.connections:\n neighbors.extend(node.connections)\n\n if diagonal_movement == DiagonalMovement.never:\n return neighbors\n\n if diagonal_movement == DiagonalMovement.only_when_no_obstacle:\n cd0 = cs0 and cs1\n cd1 = cs1 and cs2\n cd2 = cs2 and cs3\n cd3 = cs3 and cs0\n\n us0 = cs0 and ut\n us1 = cs1 and ut\n us2 = cs2 and ut\n us3 = cs3 and ut\n\n ls0 = cs0 and lb\n ls1 = cs1 and lb\n ls2 = cs2 and lb\n ls3 = cs3 and lb\n\n elif diagonal_movement == DiagonalMovement.if_at_most_one_obstacle:\n cd0 = cs0 or cs1\n cd1 = cs1 or cs2\n cd2 = cs2 or cs3\n cd3 = cs3 or cs0\n\n us0 = cs0 or ut\n us1 = cs1 or ut\n us2 = cs2 or ut\n us3 = cs3 or ut\n\n ls0 = cs0 or lb\n ls1 = cs1 or lb\n ls2 = cs2 or lb\n ls3 = cs3 or lb\n\n elif diagonal_movement == DiagonalMovement.always:\n cd0 = cd1 = cd2 = cd3 = True\n us0 = us1 = us2 = us3 = True\n ls0 = ls1 = ls2 = ls3 = True\n\n # +x -y\n if cd0 and self.walkable(x + 1, y - 1, z):\n neighbors.append(self.nodes[x + 1][y - 1][z])\n else:\n cd0 = False\n\n # +x +y\n if cd1 and self.walkable(x + 1, y + 1, z):\n neighbors.append(self.nodes[x + 1][y + 1][z])\n else:\n cd1 = False\n\n # -x +y\n if cd2 and self.walkable(x - 1, y + 1, z):\n neighbors.append(self.nodes[x - 1][y + 1][z])\n else:\n cd2 = False\n\n # -x -y\n if cd3 and self.walkable(x - 1, y - 1, z):\n neighbors.append(self.nodes[x - 1][y - 1][z])\n else:\n cd3 = False\n\n # -y +z\n if us0 and self.walkable(x, y - 1, z + 1):\n neighbors.append(self.nodes[x][y - 1][z + 1])\n else:\n us0 = False\n\n # +x +z\n if us1 and self.walkable(x + 1, y, z + 1):\n neighbors.append(self.nodes[x + 1][y][z + 1])\n else:\n us1 = False\n\n # +y +z\n if us2 and self.walkable(x, y + 1, z + 1):\n neighbors.append(self.nodes[x][y + 1][z + 1])\n else:\n us2 = False\n\n # -x +z\n if us3 and self.walkable(x - 1, y, z + 1):\n neighbors.append(self.nodes[x - 1][y][z + 1])\n else:\n us3 = False\n\n # -y -z\n if ls0 and self.walkable(x, y - 1, z - 1):\n neighbors.append(self.nodes[x][y - 1][z - 1])\n else:\n ls0 = False\n\n # +x -z\n if ls1 and self.walkable(x + 1, y, z - 1):\n neighbors.append(self.nodes[x + 1][y][z - 1])\n else:\n ls1 = False\n\n # +y -z\n if ls2 and self.walkable(x, y + 1, z - 1):\n neighbors.append(self.nodes[x][y + 1][z - 1])\n else:\n ls2 = False\n\n # -x -z\n if ls3 and self.walkable(x - 1, y, z - 1):\n neighbors.append(self.nodes[x - 1][y][z - 1])\n else:\n ls3 = False\n\n # remaining daigonal neighbors\n if diagonal_movement == DiagonalMovement.only_when_no_obstacle:\n ud0 = cs0 and cd0 and cs1 and us0 and us1 and ut\n ud1 = cs1 and cd1 and cs2 and us1 and us2 and ut\n ud2 = cs2 and cd2 and cs3 and us2 and us3 and ut\n ud3 = cs3 and cd3 and cs0 and us3 and us0 and ut\n\n ld0 = cs0 and cd0 and cs1 and ls0 and ls1 and lb\n ld1 = cs1 and cd1 and cs2 and ls1 and ls2 and lb\n ld2 = cs2 and cd2 and cs3 and ls2 and ls3 and lb\n ld3 = cs3 and cd3 and cs0 and ls3 and ls0 and lb\n\n elif diagonal_movement == DiagonalMovement.if_at_most_one_obstacle:\n ud0 = sum([cs0, cd0, cs1, us0, us1, ut]) >= 5\n ud1 = sum([cs1, cd1, cs2, us1, us2, ut]) >= 5\n ud2 = sum([cs2, cd2, cs3, us2, us3, ut]) >= 5\n ud3 = sum([cs3, cd3, cs0, us3, us0, ut]) >= 5\n\n ld0 = sum([cs0, cd0, cs1, ls0, ls1, lb]) >= 5\n ld1 = sum([cs1, cd1, cs2, ls1, ls2, lb]) >= 5\n ld2 = sum([cs2, cd2, cs3, ls2, ls3, lb]) >= 5\n ld3 = sum([cs3, cd3, cs0, ls3, ls0, lb]) >= 5\n\n elif diagonal_movement == DiagonalMovement.always:\n ud0 = ud1 = ud2 = ud3 = True\n ld0 = ld1 = ld2 = ld3 = True\n\n # +x -y +z\n if ud0 and self.walkable(x + 1, y - 1, z + 1):\n neighbors.append(self.nodes[x + 1][y - 1][z + 1])\n\n # +x +y +z\n if ud1 and self.walkable(x + 1, y + 1, z + 1):\n neighbors.append(self.nodes[x + 1][y + 1][z + 1])\n\n # -x +y +z\n if ud2 and self.walkable(x - 1, y + 1, z + 1):\n neighbors.append(self.nodes[x - 1][y + 1][z + 1])\n\n # -x -y +z\n if ud3 and self.walkable(x - 1, y - 1, z + 1):\n neighbors.append(self.nodes[x - 1][y - 1][z + 1])\n\n # +x -y -z\n if ld0 and self.walkable(x + 1, y - 1, z - 1):\n neighbors.append(self.nodes[x + 1][y - 1][z - 1])\n\n # +x +y -z\n if ld1 and self.walkable(x + 1, y + 1, z - 1):\n neighbors.append(self.nodes[x + 1][y + 1][z - 1])\n\n # -x +y -z\n if ld2 and self.walkable(x - 1, y + 1, z - 1):\n neighbors.append(self.nodes[x - 1][y + 1][z - 1])\n\n # -x -y -z\n if ld3 and self.walkable(x - 1, y - 1, z - 1):\n neighbors.append(self.nodes[x - 1][y - 1][z - 1])\n\n return neighbors\n\n def cleanup(self):\n \"\"\"\n Cleanup grid\n \"\"\"\n for x_nodes in self.nodes:\n for y_nodes in x_nodes:\n for z_node in y_nodes:\n z_node.cleanup()"
},
{
"identifier": "SimpleHeap",
"path": "pathfinding3d/core/heap.py",
"snippet": "class SimpleHeap:\n \"\"\"\n A simple implementation of a heap data structure optimized for pathfinding.\n It maintains an open list of nodes, a status for each node, and a function to retrieve nodes.\n \"\"\"\n\n def __init__(self, node: GridNode, grid: Union[Grid, World]):\n \"\"\"\n Initializes the SimpleHeap with a given node and grid.\n\n Parameters\n ----------\n node : GridNode\n The initial node to be added to the heap. This node should have an 'f' attribute representing its cost.\n grid : Union[Grid, World]\n The grid in which the nodes are located.\n \"\"\"\n\n self.grid = grid\n self._get_node_tuple = self._determine_node_retrieval_function()\n self._get_node = self._determine_node_function()\n self.open_list = [self._get_node_tuple(node, 0)]\n self.removed_node_tuples = set()\n self.heap_order = {}\n self.number_pushed = 0\n\n def _determine_node_retrieval_function(self) -> Callable:\n \"\"\"\n Determines the node retrieval function based on the type of grid.\n\n Returns\n -------\n function\n A function that takes a node tuple and returns the corresponding node.\n\n Raises\n ------\n ValueError\n If the grid is not of type Grid or World.\n \"\"\"\n if isinstance(self.grid, Grid):\n return lambda node, heap_order: (node.f, heap_order, *node.identifier)\n\n if isinstance(self.grid, World):\n return lambda node, heap_order: (node.f, heap_order, *node.identifier)\n\n raise ValueError(\"Unsupported grid type\")\n\n def _determine_node_function(self) -> Callable:\n \"\"\"\n Determines the node function based on the type of grid.\n\n Returns\n -------\n function\n A function that takes a node tuple and returns the corresponding node.\n\n Raises\n ------\n ValueError\n If the grid is not of type Grid or World.\n \"\"\"\n\n if isinstance(self.grid, Grid):\n return lambda node_tuple: self.grid.node(*node_tuple[2:])\n\n if isinstance(self.grid, World):\n return lambda node_tuple: self.grid.grids[node_tuple[5]].node(*node_tuple[2:5])\n\n raise ValueError(\"Unsupported grid type\")\n\n def pop_node(self) -> GridNode:\n \"\"\"\n Pops the node with the lowest cost from the heap.\n\n Returns\n -------\n GridNode\n The node with the lowest cost.\n \"\"\"\n node_tuple = heapq.heappop(self.open_list)\n while node_tuple in self.removed_node_tuples:\n node_tuple = heapq.heappop(self.open_list)\n\n return self._get_node(node_tuple)\n\n def push_node(self, node: GridNode):\n \"\"\"\n Pushes a node to the heap.\n\n Parameters\n ----------\n node : GridNode\n The node to be pushed to the heap.\n \"\"\"\n self.number_pushed = self.number_pushed + 1\n node_tuple = self._get_node_tuple(node, self.number_pushed)\n\n self.heap_order[node.identifier] = self.number_pushed\n\n heapq.heappush(self.open_list, node_tuple)\n\n def remove_node(self, node: GridNode, old_f: float):\n \"\"\"\n Remove the node from the heap.\n\n This just stores it in a set and we just ignore the node if it does\n get popped from the heap.\n\n Parameters\n ----------\n node : GridNode\n The node to be removed from the heap.\n old_f: float\n The old cost of the node.\n \"\"\"\n heap_order = self.heap_order[node.identifier]\n node_tuple = self._get_node_tuple(node, heap_order)\n self.removed_node_tuples.add(node_tuple)\n\n def __len__(self) -> int:\n \"\"\"\n Returns the length of the heap.\n\n Returns\n -------\n int\n The length of the heap.\n \"\"\"\n return len(self.open_list)"
},
{
"identifier": "GridNode",
"path": "pathfinding3d/core/node.py",
"snippet": "class GridNode(Node):\n \"\"\"\n basic node, saves X, Y and Z coordinates on some grid and determine if\n it is walkable.\n \"\"\"\n\n # Coordinates\n x: int = 0\n y: int = 0\n z: int = 0\n\n # Wether this node can be walked through.\n walkable: bool = True\n\n # used for weighted algorithms\n weight: float = 1.0\n\n # grid_id is used if we have more than one grid,\n # normally we just count our grids by number\n # but you can also use a string here.\n # Set it to None if you only have one grid.\n grid_id: Optional[int] = None\n\n connections: Optional[List] = None\n\n identifier: Optional[Tuple] = None\n\n def __post_init__(self):\n super().__init__()\n # for heap\n self.identifier: Tuple = (\n (self.x, self.y, self.z) if self.grid_id is None else (self.x, self.y, self.z, self.grid_id)\n )\n\n def __iter__(self):\n yield self.x\n yield self.y\n yield self.z\n if self.grid_id is not None:\n yield self.grid_id\n\n def connect(self, other_node: \"GridNode\"):\n if not self.connections:\n self.connections = [other_node]\n else:\n self.connections.append(other_node)"
},
{
"identifier": "Finder",
"path": "pathfinding3d/finder/finder.py",
"snippet": "class Finder:\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n weighted: bool = True,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n weighted: the algorithm supports weighted nodes\n (should be True for A* and Dijkstra)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n self.time_limit = time_limit\n self.max_runs = max_runs\n self.weighted = weighted\n\n self.diagonal_movement = diagonal_movement\n self.weight = weight\n self.heuristic = heuristic\n\n self.start_time: float = 0.0\n self.runs: int = 0\n\n def apply_heuristic(self, node_a: GridNode, node_b: GridNode, heuristic: Optional[Callable] = None) -> float:\n \"\"\"\n Helper function to apply heuristic\n\n Parameters\n ----------\n node_a : GridNode\n first node\n node_b : GridNode\n second node\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n\n Returns\n -------\n float\n heuristic value\n \"\"\"\n if not heuristic:\n heuristic = self.heuristic\n return heuristic(\n abs(node_a.x - node_b.x),\n abs(node_a.y - node_b.y),\n abs(node_a.z - node_b.z),\n )\n\n def find_neighbors(\n self,\n grid: Grid,\n node: GridNode,\n diagonal_movement: Optional[int] = None,\n ) -> List[GridNode]:\n \"\"\"\n Find neighbor, same for Djikstra, A*, Bi-A*, IDA*\n\n Parameters\n ----------\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n node : GridNode\n node to find neighbors for\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n\n Returns\n -------\n List[GridNode]\n list of neighbors\n \"\"\"\n if not diagonal_movement:\n diagonal_movement = self.diagonal_movement\n return grid.neighbors(node, diagonal_movement=diagonal_movement)\n\n def keep_running(self):\n \"\"\"\n Check, if we run into time or iteration constrains.\n\n Raises\n ------\n ExecutionTimeException\n if we run into a time constrain\n ExecutionRunsException\n if we run into a iteration constrain\n \"\"\"\n if self.runs >= self.max_runs:\n raise ExecutionRunsException(\n f\"{self.__class__.__name__} run into barrier of {self.max_runs} iterations without \"\n \"finding the destination\"\n )\n\n if time.time() - self.start_time >= self.time_limit:\n raise ExecutionTimeException(\n f\"{self.__class__.__name__} took longer than {self.time_limit} seconds, aborting!\"\n )\n\n def process_node(\n self,\n grid: Grid,\n node: GridNode,\n parent: GridNode,\n end: GridNode,\n open_list: List,\n open_value: int = 1,\n ):\n \"\"\"\n We check if the given node is part of the path by calculating its\n cost and add or remove it from our path\n\n Parameters\n ----------\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n node : GridNode\n the node we like to test\n (the neighbor in A* or jump-node in JumpPointSearch)\n parent : GridNode\n the parent node (of the current node we like to test)\n end : GridNode\n the end point to calculate the cost of the path\n open_list : List\n the list that keeps track of our current path\n open_value : bool\n needed if we like to set the open list to something\n else than True (used for bi-directional algorithms)\n \"\"\"\n\n # calculate cost from current node (parent) to the next node (neighbor)\n ng = parent.g + grid.calc_cost(parent, node, self.weighted)\n\n if not node.opened or ng < node.g:\n old_f = node.f\n node.g = ng\n node.h = node.h or self.apply_heuristic(node, end)\n # f is the estimated total cost from start to goal\n node.f = node.g + node.h\n node.parent = parent\n\n if not node.opened:\n open_list.push_node(node)\n node.opened = open_value\n else:\n # the node can be reached with smaller cost.\n # Since its f value has been updated, we have to\n # update its position in the open list\n open_list.remove_node(node, old_f)\n open_list.push_node(node)\n\n def check_neighbors(\n self,\n start: GridNode,\n end: GridNode,\n grid: Grid,\n open_list: List,\n open_value: int = 1,\n backtrace_by=None,\n ) -> Optional[List[GridNode]]:\n \"\"\"\n find next path segment based on given node\n (or return path if we found the end)\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n open_list : List\n stores nodes that will be processed next\n\n Returns\n -------\n Optional[List[GridNode]]\n path\n \"\"\"\n raise NotImplementedError(\"Please implement check_neighbors in your finder\")\n\n def find_path(self, start: GridNode, end: GridNode, grid: Grid) -> Tuple[List, int]:\n \"\"\"\n Find a path from start to end node on grid by iterating over\n all neighbors of a node (see check_neighbors)\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n (can be a list of grids)\n\n Returns\n -------\n Tuple[List, int]\n path, number of iterations\n \"\"\"\n self.start_time = time.time() # execution time limitation\n self.runs = 0 # count number of iterations\n start.opened = True\n\n open_list = SimpleHeap(start, grid)\n\n while len(open_list) > 0:\n self.runs += 1\n self.keep_running()\n\n path = self.check_neighbors(start, end, grid, open_list)\n if path:\n return path, self.runs\n\n # failed to find path\n return [], self.runs\n\n def __repr__(self):\n \"\"\"\n Return a human readable representation\n \"\"\"\n return f\"<{self.__class__.__name__}\" f\"diagonal_movement={self.diagonal_movement} >\""
}
] | import time
from collections import deque, namedtuple
from typing import List, Tuple
from ..core import heuristic
from ..core.grid import Grid
from ..core.heap import SimpleHeap
from ..core.node import GridNode
from ..finder.finder import Finder | 7,642 |
class MinimumSpanningTree(Finder):
"""
Minimum Spanning Tree implementation by Brad Beattie
(see https://github.com/brean/python-pathfinding/issues/18)
The wikipedia page has a nice description about MSP:
https://en.wikipedia.org/wiki/Minimum_spanning_tree
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.heuristic = heuristic.null
def tree(self, grid: Grid, start: GridNode) -> List:
"""
Returns a list of nodes that are part of the minimum spanning tree
of the grid.
Parameters
----------
grid : Grid
grid that stores all possible steps/tiles as 3D-list
start : GridNode
start node
Returns
-------
List
"""
return list(self.itertree(grid, start))
def itertree(self, grid: Grid, start: GridNode):
"""
Returns a generator that yields nodes that are part of the minimum
spanning tree of the grid.
Parameters
----------
grid : Grid
grid that stores all possible steps/tiles as 3D-list
start : GridNode
start node
"""
# Finder.process_node requires an end node, which we don't have.
# The following value tricks the call to Finder.apply_heuristic.
# Though maybe we want to generate a limited spanning tree that
# trends in a certain direction? In which case we'd want a more
# nuanced solution.
end = namedtuple("FakeNode", ["x", "y", "z"])(-1, -1, -1)
start.opened = True
|
class MinimumSpanningTree(Finder):
"""
Minimum Spanning Tree implementation by Brad Beattie
(see https://github.com/brean/python-pathfinding/issues/18)
The wikipedia page has a nice description about MSP:
https://en.wikipedia.org/wiki/Minimum_spanning_tree
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.heuristic = heuristic.null
def tree(self, grid: Grid, start: GridNode) -> List:
"""
Returns a list of nodes that are part of the minimum spanning tree
of the grid.
Parameters
----------
grid : Grid
grid that stores all possible steps/tiles as 3D-list
start : GridNode
start node
Returns
-------
List
"""
return list(self.itertree(grid, start))
def itertree(self, grid: Grid, start: GridNode):
"""
Returns a generator that yields nodes that are part of the minimum
spanning tree of the grid.
Parameters
----------
grid : Grid
grid that stores all possible steps/tiles as 3D-list
start : GridNode
start node
"""
# Finder.process_node requires an end node, which we don't have.
# The following value tricks the call to Finder.apply_heuristic.
# Though maybe we want to generate a limited spanning tree that
# trends in a certain direction? In which case we'd want a more
# nuanced solution.
end = namedtuple("FakeNode", ["x", "y", "z"])(-1, -1, -1)
start.opened = True
| open_list = SimpleHeap(start, grid) | 2 | 2023-11-21 10:14:12+00:00 | 12k |
vtarasv/pocket-cfdm | predict.py | [
{
"identifier": "DEVICE",
"path": "params.py",
"snippet": "DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
},
{
"identifier": "logger",
"path": "utils/general.py",
"snippet": "def load_pkl(path):\ndef save_pkl(obj, path):\ndef read_strings_from_txt(path):\ndef parallelize(func, data):\ndef chunker(seq, size):\ndef chunker_df(df, size):\ndef set_mol_pose(mol: rdkit.Chem.rdchem.Mol, pos: np.ndarray):\ndef get_symmetry_rmsd(mol, coords1, coords2):\ndef time_limit(seconds):\n def signal_handler(signum, frame):\nclass TimeoutException(Exception):"
},
{
"identifier": "TtoSigma",
"path": "utils/diffusion.py",
"snippet": "class TtoSigma:\n def __init__(self, tr_sigma_min: float, tr_sigma_max: float,\n rot_sigma_min: float, rot_sigma_max: float,\n tor_sigma_min: float, tor_sigma_max: float):\n self.tr_sigma_min = tr_sigma_min\n self.tr_sigma_max = tr_sigma_max\n self.rot_sigma_min = rot_sigma_min\n self.rot_sigma_max = rot_sigma_max\n self.tor_sigma_min = tor_sigma_min\n self.tor_sigma_max = tor_sigma_max\n\n def __call__(self,\n t_tr: Union[float, torch.Tensor],\n t_rot: Union[float, torch.Tensor],\n t_tor: Union[float, torch.Tensor]):\n tr_sigma = self.tr_sigma_min ** (1 - t_tr) * self.tr_sigma_max ** t_tr\n rot_sigma = self.rot_sigma_min ** (1 - t_rot) * self.rot_sigma_max ** t_rot\n tor_sigma = self.tor_sigma_min ** (1 - t_tor) * self.tor_sigma_max ** t_tor\n return tr_sigma, rot_sigma, tor_sigma"
},
{
"identifier": "get_t_schedule",
"path": "utils/diffusion.py",
"snippet": "def get_t_schedule(inference_steps):\n return np.linspace(1, 0, inference_steps + 1)[:-1]"
},
{
"identifier": "ExponentialMovingAverage",
"path": "utils/training.py",
"snippet": "class ExponentialMovingAverage:\n \"\"\" from https://github.com/yang-song/score_sde_pytorch/blob/main/models/ema.py\n Maintains (exponential) moving average of a set of parameters. \"\"\"\n\n def __init__(self, parameters, decay, use_num_updates=True):\n \"\"\"\n Args:\n parameters: Iterable of `torch.nn.Parameter`; usually the result of\n `model.parameters()`.\n decay: The exponential decay.\n use_num_updates: Whether to use number of updates when computing\n averages.\n \"\"\"\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n self.decay = decay\n self.num_updates = 0 if use_num_updates else None\n self.shadow_params = [p.clone().detach()\n for p in parameters if p.requires_grad]\n self.collected_params = []\n\n def update(self, parameters):\n \"\"\"\n Update currently maintained parameters.\n Call this every time the parameters are updated, such as the result of\n the `optimizer.step()` call.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; usually the same set of\n parameters used to initialize this object.\n \"\"\"\n decay = self.decay\n if self.num_updates is not None:\n self.num_updates += 1\n decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))\n one_minus_decay = 1.0 - decay\n with torch.no_grad():\n parameters = [p for p in parameters if p.requires_grad]\n for s_param, param in zip(self.shadow_params, parameters):\n s_param.sub_(one_minus_decay * (s_param - param))\n\n def copy_to(self, parameters):\n \"\"\"\n Copy current parameters into given collection of parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored moving averages.\n \"\"\"\n parameters = [p for p in parameters if p.requires_grad]\n for s_param, param in zip(self.shadow_params, parameters):\n if param.requires_grad:\n param.data.copy_(s_param.data)\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)\n\n def state_dict(self):\n return dict(decay=self.decay, num_updates=self.num_updates,\n shadow_params=self.shadow_params)\n\n def load_state_dict(self, state_dict, device):\n self.decay = state_dict['decay']\n self.num_updates = state_dict['num_updates']\n self.shadow_params = [tensor.to(device) for tensor in state_dict['shadow_params']]"
},
{
"identifier": "lig_cat_dims",
"path": "features.py",
"snippet": "def safe_index(l_, e):\n def __init__(self, mol: Mol):\n def get_features(self):\n def get_atom_features(mol):\n def get_edges(mol):\n def __init__(self, prot: Protein, radius: float = None, max_neighbors: int = None):\n def get_features(self):\n def get_atom_features(prot):\nclass LigandFeaturizer:\nclass PocketFeaturizer:"
},
{
"identifier": "set_time",
"path": "dataset.py",
"snippet": "def set_time(data: Union[Data, HeteroData], t: float, batch: int):\n for node_type in data.node_types:\n data[node_type].node_t = t * torch.ones(data[node_type].num_nodes)\n data.complex_t = t * torch.ones(batch)\n return data"
},
{
"identifier": "modify_conformer",
"path": "dataset.py",
"snippet": "def modify_conformer(data, tr_update, rot_update, torsion_updates):\n lig_center = torch.mean(data['ligand'].pos, dim=0, keepdim=True)\n rot_mat = axis_angle_to_matrix(rot_update.squeeze())\n rigid_new_pos = (data['ligand'].pos - lig_center) @ rot_mat.T + tr_update + lig_center\n\n if not torsion_updates.size:\n data['ligand'].pos = rigid_new_pos\n return data\n\n torsion_edge_index = data['ligand', 'ligand'].edge_index.T[data['rotation_edge_mask']]\n rotation_node_mask = data['rotation_node_mask']\n if isinstance(rotation_node_mask, list):\n rotation_node_mask = rotation_node_mask[0]\n flexible_new_pos = modify_torsion_angles(rigid_new_pos, torsion_edge_index,\n rotation_node_mask, torsion_updates)\n R, t = rigid_transform_kabsch_3d(flexible_new_pos.T, rigid_new_pos.T)\n aligned_flexible_pos = flexible_new_pos @ R.T + t.T\n data['ligand'].pos = aligned_flexible_pos\n return data"
},
{
"identifier": "randomize_position",
"path": "dataset.py",
"snippet": "def randomize_position(data, tr_sigma_max):\n # randomize torsion angles\n torsion_updates = np.random.uniform(low=-np.pi, high=np.pi, size=data['rotation_edge_mask'].sum())\n torsion_edge_index = data['ligand', 'ligand'].edge_index.T[data['rotation_edge_mask']]\n rotation_node_mask = data['rotation_node_mask']\n if isinstance(rotation_node_mask, list):\n rotation_node_mask = rotation_node_mask[0]\n data['ligand'].pos = \\\n modify_torsion_angles(data['ligand'].pos, torsion_edge_index,\n rotation_node_mask, torsion_updates)\n\n # randomize position\n molecule_center = torch.mean(data['ligand'].pos, dim=0, keepdim=True)\n random_rotation = torch.from_numpy(R.random().as_matrix()).float()\n data['ligand'].pos = (data['ligand'].pos - molecule_center) @ random_rotation.T\n\n # randomize translation\n tr_update = torch.normal(mean=0, std=tr_sigma_max, size=(1, 3))\n data['ligand'].pos += tr_update\n\n return data"
},
{
"identifier": "PredSDFDataLoader",
"path": "dataset.py",
"snippet": "class PredSDFDataLoader:\n def __init__(self, df_path, pf, pocket_cent, device):\n self.df = PandasTools.LoadSDF(df_path, idName=\"ID\", molColName=\"ROMol\")\n self.df.apply(lambda x: x[\"ROMol\"].SetProp(\"_Name\", x[\"ID\"]), axis=1)\n self.pf = pf\n self.pocket_cent = pocket_cent\n self.device = device\n\n self.n_samples = self.df.shape[0]\n\n def __iter__(self):\n self.idx_curr = 0\n return self\n\n def __next__(self):\n if self.idx_curr < self.n_samples:\n mol = self.df.iloc[self.idx_curr]['ROMol']\n self.idx_curr += 1\n graph = get_features_pred(mol, self.pf, self.pocket_cent)\n if graph is not None:\n graphs = [graph.to(self.device) for _ in range(1)]\n return Batch.from_data_list(graphs)\n else:\n raise StopIteration\n\n def __len__(self):\n return self.n_samples"
},
{
"identifier": "FitModel",
"path": "model.py",
"snippet": "class FitModel(torch.nn.Module):\n def __init__(self, *, t_to_sigma, ns, nv, sh_lmax, dropout, num_conv_layers, tp_batch_norm,\n sigma_embed_dim, sigma_embed_scale, distance_embed_dim, cross_distance_embed_dim,\n lig_cat_dims, lig_cont_feats, lig_max_radius, lig_edge_features,\n prot_cat_dims, prot_cont_feats,\n cross_max_radius, center_max_radius, scale_by_sigma):\n super(FitModel, self).__init__()\n self.t_to_sigma = t_to_sigma\n self.ns = ns\n self.lig_max_radius = lig_max_radius\n self.lig_edge_features = lig_edge_features\n self.scale_by_sigma = scale_by_sigma\n\n self.sh_irreps = o3.Irreps.spherical_harmonics(lmax=sh_lmax)\n\n self.timestep_emb_func = SinusoidalEmbedding(embedding_dim=sigma_embed_dim, embedding_scale=sigma_embed_scale)\n\n self.lig_node_embedding = \\\n AtomEncoder(emb_dim=ns, cat_dims=lig_cat_dims, cont_feats=lig_cont_feats, sigma_embed_dim=sigma_embed_dim)\n lig_edge_dim = lig_edge_features + sigma_embed_dim + distance_embed_dim\n self.lig_edge_embedding = \\\n nn.Sequential(nn.Linear(lig_edge_dim, ns), nn.ReLU(), nn.Dropout(dropout), nn.Linear(ns, ns))\n self.lig_distance_expansion = GaussianSmearing(0.0, lig_max_radius, distance_embed_dim)\n\n self.prot_node_embedding = \\\n AtomEncoder(emb_dim=ns, cat_dims=prot_cat_dims, cont_feats=prot_cont_feats, sigma_embed_dim=sigma_embed_dim)\n atom_edge_dim = sigma_embed_dim + distance_embed_dim\n self.prot_edge_embedding = \\\n nn.Sequential(nn.Linear(atom_edge_dim, ns), nn.ReLU(), nn.Dropout(dropout), nn.Linear(ns, ns))\n\n cross_edge_dim = sigma_embed_dim + cross_distance_embed_dim\n self.cross_edge_embedding = \\\n nn.Sequential(nn.Linear(cross_edge_dim, ns), nn.ReLU(), nn.Dropout(dropout), nn.Linear(ns, ns))\n self.cross_distance_expansion = GaussianSmearing(0.0, cross_max_radius, cross_distance_embed_dim)\n\n irrep_seq = [f'{ns}x0e',\n f'{ns}x0e + {nv}x1o',\n f'{ns}x0e + {nv}x1o + {nv}x1e',\n f'{ns}x0e + {nv}x1o + {nv}x1e + {ns}x0o']\n lig_conv_layers, prot_conv_layers, lig_to_prot_conv_layers, prot_to_lig_conv_layers = [], [], [], []\n for i in range(num_conv_layers):\n in_irreps = irrep_seq[min(i, len(irrep_seq) - 1)]\n out_irreps = irrep_seq[min(i + 1, len(irrep_seq) - 1)]\n parameters = {'in_irreps': in_irreps, 'sh_irreps': self.sh_irreps, 'out_irreps': out_irreps,\n 'n_edge_features': 3 * ns, 'hidden_features': 3 * ns,\n 'residual': False, 'batch_norm': tp_batch_norm, 'dropout': dropout}\n lig_layer = TensorProductConvLayer(**parameters)\n lig_conv_layers.append(lig_layer)\n prot_layer = TensorProductConvLayer(**parameters)\n prot_conv_layers.append(prot_layer)\n lig_to_prot_layer = TensorProductConvLayer(**parameters)\n lig_to_prot_conv_layers.append(lig_to_prot_layer)\n prot_to_lig_layer = TensorProductConvLayer(**parameters)\n prot_to_lig_conv_layers.append(prot_to_lig_layer)\n\n self.lig_conv_layers = nn.ModuleList(lig_conv_layers)\n self.prot_conv_layers = nn.ModuleList(prot_conv_layers)\n self.lig_to_prot_conv_layers = nn.ModuleList(lig_to_prot_conv_layers)\n self.prot_to_lig_conv_layers = nn.ModuleList(prot_to_lig_conv_layers)\n\n # translation + rotation\n center_edge_dim = distance_embed_dim + sigma_embed_dim\n self.center_edge_embedding = \\\n nn.Sequential(nn.Linear(center_edge_dim, ns), nn.ReLU(), nn.Dropout(dropout), nn.Linear(ns, ns))\n self.center_distance_expansion = GaussianSmearing(0.0, center_max_radius, distance_embed_dim)\n parameters = {\"in_irreps\": self.lig_conv_layers[-1].out_irreps, \"sh_irreps\": self.sh_irreps,\n \"out_irreps\": f'2x1o + 2x1e', \"n_edge_features\": 2 * ns,\n \"residual\": False, \"dropout\": dropout, \"batch_norm\": tp_batch_norm}\n self.final_conv = TensorProductConvLayer(**parameters)\n self.tr_final_layer = \\\n nn.Sequential(nn.Linear(1 + sigma_embed_dim, ns), nn.Dropout(dropout), nn.ReLU(), nn.Linear(ns, 1))\n self.rot_final_layer = \\\n nn.Sequential(nn.Linear(1 + sigma_embed_dim, ns), nn.Dropout(dropout), nn.ReLU(), nn.Linear(ns, 1))\n\n # torsion\n self.final_edge_embedding = \\\n nn.Sequential(nn.Linear(distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout), nn.Linear(ns, ns))\n self.final_tp_tor = o3.FullTensorProduct(self.sh_irreps, \"2e\")\n parameters = {\"in_irreps\": self.lig_conv_layers[-1].out_irreps, \"sh_irreps\": self.final_tp_tor.irreps_out,\n \"out_irreps\": f'{ns}x0o + {ns}x0e', \"n_edge_features\": 3 * ns,\n \"residual\": False, \"dropout\": dropout, \"batch_norm\": tp_batch_norm}\n self.tor_bond_conv = TensorProductConvLayer(**parameters)\n self.tor_final_layer = \\\n nn.Sequential(nn.Linear(2 * ns, ns, bias=False), nn.Tanh(), nn.Dropout(dropout), nn.Linear(ns, 1, bias=False))\n\n def forward(self, data):\n data.to(DEVICE)\n tr_sigma, rot_sigma, tor_sigma = self.t_to_sigma(data.complex_t, data.complex_t, data.complex_t)\n\n lig_node_attr_cat, lig_node_attr_sigma_emb, lig_edge_index, lig_edge_attr, lig_edge_sh = \\\n self.build_lig_conv_graph(data)\n lig_src, lig_dst = lig_edge_index\n lig_node_attr = self.lig_node_embedding(x_cat=lig_node_attr_cat, sigma_emb=lig_node_attr_sigma_emb)\n lig_edge_attr = self.lig_edge_embedding(lig_edge_attr)\n\n prot_node_attr_cat, prot_node_attr_sigma_emb, prot_edge_index, prot_edge_attr, prot_edge_sh = \\\n self.build_prot_conv_graph(data)\n prot_src, prot_dst = prot_edge_index\n prot_node_attr = self.prot_node_embedding(x_cat=prot_node_attr_cat, sigma_emb=prot_node_attr_sigma_emb)\n prot_edge_attr = self.prot_edge_embedding(prot_edge_attr)\n\n cross_cutoff = (tr_sigma * 3 + 20).unsqueeze(1)\n cross_edge_index, cross_edge_attr, cross_edge_sh = self.build_cross_conv_graph(data, cross_cutoff)\n cross_lig, cross_prot = cross_edge_index\n cross_edge_attr = self.cross_edge_embedding(cross_edge_attr)\n\n for l_ in range(len(self.lig_conv_layers)):\n # intra graph message passing\n lig_edge_attr_ = \\\n torch.cat([lig_edge_attr, lig_node_attr[lig_src, :self.ns], lig_node_attr[lig_dst, :self.ns]], -1)\n lig_intra_update = \\\n self.lig_conv_layers[l_](lig_node_attr, lig_edge_index, lig_edge_attr_, lig_edge_sh)\n\n # inter graph message passing\n prot_to_lig_edge_attr_ = \\\n torch.cat([cross_edge_attr, lig_node_attr[cross_lig, :self.ns], prot_node_attr[cross_prot, :self.ns]], -1)\n lig_inter_update = \\\n self.prot_to_lig_conv_layers[l_](prot_node_attr, cross_edge_index, prot_to_lig_edge_attr_,\n cross_edge_sh, out_nodes=lig_node_attr.shape[0])\n\n last_layer = (l_ == len(self.lig_conv_layers) - 1)\n prot_intra_update, prot_inter_update = None, None\n if not last_layer:\n prot_edge_attr_ = \\\n torch.cat([prot_edge_attr, prot_node_attr[prot_src, :self.ns], prot_node_attr[prot_dst, :self.ns]], -1)\n prot_intra_update = \\\n self.prot_conv_layers[l_](prot_node_attr, prot_edge_index, prot_edge_attr_, prot_edge_sh)\n\n lig_to_prot_edge_attr_ = \\\n torch.cat([cross_edge_attr, lig_node_attr[cross_lig, :self.ns], prot_node_attr[cross_prot, :self.ns]], -1)\n prot_inter_update = \\\n self.lig_to_prot_conv_layers[l_](lig_node_attr, torch.flip(cross_edge_index, dims=[0]), lig_to_prot_edge_attr_,\n cross_edge_sh, out_nodes=prot_node_attr.shape[0])\n\n # padding original features\n lig_node_attr = F.pad(lig_node_attr, (0, lig_intra_update.shape[-1] - lig_node_attr.shape[-1]))\n # update features with residual updates\n lig_node_attr = lig_node_attr + lig_intra_update + lig_inter_update\n\n if not last_layer:\n prot_node_attr = F.pad(prot_node_attr, (0, prot_intra_update.shape[-1] - prot_node_attr.shape[-1]))\n prot_node_attr = prot_node_attr + prot_intra_update + prot_inter_update\n\n # compute translational and rotational score vectors\n center_edge_index, center_edge_attr, center_edge_sh = self.build_center_conv_graph(data)\n center_edge_attr = self.center_edge_embedding(center_edge_attr)\n center_edge_attr = torch.cat([center_edge_attr, lig_node_attr[center_edge_index[1], :self.ns]], -1)\n global_pred = self.final_conv(lig_node_attr, center_edge_index, center_edge_attr, center_edge_sh, out_nodes=data.num_graphs)\n\n tr_pred = global_pred[:, :3] + global_pred[:, 6:9]\n rot_pred = global_pred[:, 3:6] + global_pred[:, 9:]\n data.graph_sigma_emb = self.timestep_emb_func(data.complex_t)\n\n # fix the magnitude of translational and rotational score vectors\n tr_norm = torch.linalg.vector_norm(tr_pred, dim=1).unsqueeze(1)\n tr_pred = tr_pred / tr_norm * self.tr_final_layer(torch.cat([tr_norm, data.graph_sigma_emb], dim=1))\n rot_norm = torch.linalg.vector_norm(rot_pred, dim=1).unsqueeze(1)\n rot_pred = rot_pred / rot_norm * self.rot_final_layer(torch.cat([rot_norm, data.graph_sigma_emb], dim=1))\n\n if self.scale_by_sigma:\n tr_pred = tr_pred / tr_sigma.unsqueeze(1)\n rot_pred = rot_pred * so3.score_norm(rot_sigma.cpu()).unsqueeze(1).to(DEVICE)\n\n if data['rotation_edge_mask'].sum() == 0:\n return tr_pred, rot_pred, torch.empty(0, device=DEVICE)\n\n # torsional components\n tor_bonds, tor_edge_index, tor_edge_attr, tor_edge_sh = self.build_bond_conv_graph(data)\n tor_src, tor_dst = tor_bonds\n tor_bond_vec = data['ligand'].pos[tor_dst] - data['ligand'].pos[tor_src]\n tor_bond_attr = lig_node_attr[tor_src] + lig_node_attr[tor_dst]\n\n tor_bonds_sh = o3.spherical_harmonics(\"2e\", tor_bond_vec, normalize=True, normalization='component')\n tor_edge_sh = self.final_tp_tor(tor_edge_sh, tor_bonds_sh[tor_edge_index[0]])\n\n tor_edge_attr = torch.cat([tor_edge_attr, lig_node_attr[tor_edge_index[1], :self.ns],\n tor_bond_attr[tor_edge_index[0], :self.ns]], -1)\n tor_pred = self.tor_bond_conv(lig_node_attr, tor_edge_index, tor_edge_attr, tor_edge_sh,\n out_nodes=data['rotation_edge_mask'].sum(), reduce='mean')\n tor_pred = self.tor_final_layer(tor_pred).squeeze(1)\n edge_sigma = tor_sigma[data['ligand'].batch][data['ligand', 'ligand'].edge_index[0]][data['rotation_edge_mask']]\n\n if self.scale_by_sigma:\n tor_pred = tor_pred * torch.sqrt(torch.tensor(torus.score_norm(edge_sigma.cpu().numpy())).float().to(DEVICE))\n\n return tr_pred, rot_pred, tor_pred\n\n def build_lig_conv_graph(self, data):\n # builds the ligand graph edges and initial node and edge features\n data['ligand'].node_sigma_emb = self.timestep_emb_func(data['ligand'].node_t)\n\n # compute edges\n radius_edges = radius_graph(data['ligand'].pos, self.lig_max_radius, data['ligand'].batch).long()\n edge_index = torch.cat([data['ligand', 'ligand'].edge_index, radius_edges], 1)\n edge_attr = torch.cat([data['ligand', 'ligand'].edge_attr,\n torch.zeros(radius_edges.shape[-1], self.lig_edge_features, device=DEVICE)], 0)\n\n # compute initial features\n src, dst = edge_index\n edge_sigma_emb = data['ligand'].node_sigma_emb[src]\n edge_attr = torch.cat([edge_attr, edge_sigma_emb], 1)\n node_attr_cat = data['ligand'].x_cat\n # node_attr_cont = data['ligand'].x_cont\n node_attr_sigma_emb = data['ligand'].node_sigma_emb\n\n edge_vec = data['ligand'].pos[dst] - data['ligand'].pos[src]\n edge_length_emb = self.lig_distance_expansion(edge_vec.norm(dim=-1))\n\n edge_attr = torch.cat([edge_attr, edge_length_emb], 1)\n edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')\n\n return node_attr_cat, node_attr_sigma_emb, edge_index, edge_attr, edge_sh\n\n def build_prot_conv_graph(self, data):\n # build the graph between receptor atoms\n data['protein'].node_sigma_emb = self.timestep_emb_func(data['protein'].node_t)\n node_attr_cat = data['protein'].x_cat\n node_attr_sigma_emb = data['protein'].node_sigma_emb\n\n # this assumes the edges were already created in preprocessing since protein's structure is fixed\n edge_index = data['protein', 'protein'].edge_index\n src, dst = edge_index\n edge_vec = data['protein'].pos[dst] - data['protein'].pos[src]\n\n edge_length_emb = self.lig_distance_expansion(edge_vec.norm(dim=-1))\n edge_sigma_emb = data['protein'].node_sigma_emb[src]\n edge_attr = torch.cat([edge_sigma_emb, edge_length_emb], 1)\n edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')\n\n return node_attr_cat, node_attr_sigma_emb, edge_index, edge_attr, edge_sh\n\n def build_cross_conv_graph(self, data, cross_distance_cutoff):\n # builds the cross edges between ligand and receptor\n if torch.is_tensor(cross_distance_cutoff):\n # different cutoff for every graph (depends on the diffusion time)\n edge_index = radius(data['protein'].pos / cross_distance_cutoff[data['protein'].batch],\n data['ligand'].pos / cross_distance_cutoff[data['ligand'].batch], 1,\n data['protein'].batch, data['ligand'].batch, max_num_neighbors=10000)\n else:\n edge_index = radius(data['protein'].pos, data['ligand'].pos, cross_distance_cutoff,\n data['protein'].batch, data['ligand'].batch, max_num_neighbors=10000)\n\n src, dst = edge_index\n edge_vec = data['protein'].pos[dst] - data['ligand'].pos[src]\n\n edge_length_emb = self.cross_distance_expansion(edge_vec.norm(dim=-1))\n edge_sigma_emb = data['ligand'].node_sigma_emb[src]\n edge_attr = torch.cat([edge_sigma_emb, edge_length_emb], 1)\n edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')\n\n return edge_index, edge_attr, edge_sh\n\n def build_center_conv_graph(self, data):\n # builds the filter and edges for the convolution generating translational and rotational scores\n edge_index = torch.cat([data['ligand'].batch.unsqueeze(0),\n torch.arange(len(data['ligand'].batch)).to(DEVICE).unsqueeze(0)], dim=0)\n\n center_pos = torch.zeros((data.num_graphs, 3)).to(DEVICE)\n center_pos.index_add_(0, index=data['ligand'].batch, source=data['ligand'].pos)\n center_pos = center_pos / torch.bincount(data['ligand'].batch).unsqueeze(1)\n\n src, dst = edge_index\n edge_vec = data['ligand'].pos[dst] - center_pos[src]\n edge_attr = self.center_distance_expansion(edge_vec.norm(dim=-1))\n edge_sigma_emb = data['ligand'].node_sigma_emb[dst]\n edge_attr = torch.cat([edge_attr, edge_sigma_emb], 1)\n edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')\n\n return edge_index, edge_attr, edge_sh\n\n def build_bond_conv_graph(self, data):\n # builds the graph for the convolution between the center of the rotatable bonds and the neighbouring nodes\n bonds = data['ligand', 'ligand'].edge_index[:, data['rotation_edge_mask']].long()\n bond_pos = (data['ligand'].pos[bonds[0]] + data['ligand'].pos[bonds[1]]) / 2\n bond_batch = data['ligand'].batch[bonds[0]]\n edge_index = radius(data['ligand'].pos, bond_pos, self.lig_max_radius,\n batch_x=data['ligand'].batch, batch_y=bond_batch)\n\n edge_vec = data['ligand'].pos[edge_index[1]] - bond_pos[edge_index[0]]\n edge_attr = self.lig_distance_expansion(edge_vec.norm(dim=-1))\n\n edge_attr = self.final_edge_embedding(edge_attr)\n edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')\n\n return bonds, edge_index, edge_attr, edge_sh"
}
] | import copy
import time
import os
import yaml
import numpy as np
import pandas as pd
import torch
from argparse import ArgumentParser
from pathlib import Path
from tqdm import tqdm
from scipy.spatial.distance import pdist, cdist
from torch_geometric.loader import DataLoader
from rdkit import Chem
from params import DEVICE
from utils import logger, ExponentialMovingAverage, TtoSigma, get_t_schedule, set_mol_pose
from features import lig_cat_dims, lig_cont_feats, prot_cat_dims, prot_cont_feats, PocketFeaturizer
from dataset import set_time, modify_conformer, randomize_position, PredSDFDataLoader
from model import FitModel
from rai_chem.protein import PDBParser, Protein
from rai_chem.score import get_fit_score | 7,253 |
def main(args):
with open("data/config/train.yml", "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
with open("data/config/pred.yml", "r") as f:
pconfig = yaml.load(f, Loader=yaml.FullLoader)
if args.samples is not None:
pconfig["samples"] = args.samples
if args.batch_size is not None:
pconfig["batch_size"] = args.batch_size
tmp_pdb_path = ".tmp_hs.pdb"
os.system(f"reduce -Quiet -Trim {args.pdb} > .tmp.pdb")
os.system(f"reduce -Quiet -NOFLIP .tmp.pdb > {tmp_pdb_path}")
with open(tmp_pdb_path, "r") as f:
pdb_lines = f.readlines()
pocket = PDBParser(args.pdb, pdb_lines, remove_hs=False)
pocket = Protein(args.pdb, pocket.atoms)
pocket_cent = pocket.atoms["Coord"].mean(axis=0)
pf = PocketFeaturizer(pocket, radius=config["prot_radius"], max_neighbors=config["prot_max_neighbors"]).graph_feat
pf["coords"] -= pocket_cent
loader = PredSDFDataLoader(args.sdf, pf, pocket_cent, device=DEVICE)
logger.debug(f"using parameters: {config}")
t_to_sigma = TtoSigma(tr_sigma_min=config["tr_sigma_min"], tr_sigma_max=config["tr_sigma_max"],
rot_sigma_min=config["rot_sigma_min"], rot_sigma_max=config["rot_sigma_max"],
tor_sigma_min=config["tor_sigma_min"], tor_sigma_max=config["tor_sigma_max"])
t_schedule = get_t_schedule(inference_steps=pconfig["inference_steps"])
|
def main(args):
with open("data/config/train.yml", "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
with open("data/config/pred.yml", "r") as f:
pconfig = yaml.load(f, Loader=yaml.FullLoader)
if args.samples is not None:
pconfig["samples"] = args.samples
if args.batch_size is not None:
pconfig["batch_size"] = args.batch_size
tmp_pdb_path = ".tmp_hs.pdb"
os.system(f"reduce -Quiet -Trim {args.pdb} > .tmp.pdb")
os.system(f"reduce -Quiet -NOFLIP .tmp.pdb > {tmp_pdb_path}")
with open(tmp_pdb_path, "r") as f:
pdb_lines = f.readlines()
pocket = PDBParser(args.pdb, pdb_lines, remove_hs=False)
pocket = Protein(args.pdb, pocket.atoms)
pocket_cent = pocket.atoms["Coord"].mean(axis=0)
pf = PocketFeaturizer(pocket, radius=config["prot_radius"], max_neighbors=config["prot_max_neighbors"]).graph_feat
pf["coords"] -= pocket_cent
loader = PredSDFDataLoader(args.sdf, pf, pocket_cent, device=DEVICE)
logger.debug(f"using parameters: {config}")
t_to_sigma = TtoSigma(tr_sigma_min=config["tr_sigma_min"], tr_sigma_max=config["tr_sigma_max"],
rot_sigma_min=config["rot_sigma_min"], rot_sigma_max=config["rot_sigma_max"],
tor_sigma_min=config["tor_sigma_min"], tor_sigma_max=config["tor_sigma_max"])
t_schedule = get_t_schedule(inference_steps=pconfig["inference_steps"])
| model = FitModel( | 10 | 2023-11-23 16:09:18+00:00 | 12k |
yuukawahiroshi/ddb-tools | mixins_ddb.py | [
{
"identifier": "DDIModel",
"path": "utils/ddi_utils.py",
"snippet": "class DDIModel:\n def __init__(self, ddi_bytes: bytes) -> None:\n self.ddi_bytes = ddi_bytes\n self.ddi_data = None\n self.phdc_data = {}\n self.tdb_data = {}\n self.sta_data = {}\n self.art_data = {}\n self.vqm_data = {}\n self.offset_map = {}\n\n def read(self, temp_path: Optional[str] = None, cat_only: bool = False):\n if temp_path or cat_only:\n import yaml\n\n if cat_only:\n with open(os.path.join(temp_path, 'sta.yml'), mode='r',\n encoding='utf-8') as sta_f:\n self.sta_data = yaml.load(sta_f)\n with open(os.path.join(temp_path, 'art.yml'), mode='r',\n encoding='utf-8') as art_f:\n self.art_data = yaml.load(art_f)\n vqm_data = None\n if os.path.isfile(os.path.join(temp_path, 'vqm.yml')):\n with open(os.path.join(temp_path, 'vqm.yml'), mode='r',\n encoding='utf-8') as vqm_f:\n self.vqm_data = yaml.load(vqm_f)\n else:\n self.ddi_data = io.BytesIO(self.ddi_bytes)\n # DBSe\n # Tonio.ddi has no DBSe block\n \n # assert int.from_bytes(ddi_data.read(8), byteorder='little') == 0\n # assert ddi_data.read(4).decode() == 'DBSe'\n # assert int.from_bytes(ddi_data.read(4), byteorder='little') == 0\n # assert int.from_bytes(ddi_data.read(8), byteorder='little') == 1\n # assert int.from_bytes(ddi_data.read(4), byteorder='little') == 3\n\n # PHDC\n phdc_offset = self.ddi_bytes.find(b'PHDC')\n if phdc_offset >= 0:\n self.ddi_data.seek(phdc_offset)\n self.phdc_data = self.read_phdc()\n\n self.offset_map['phdc'] = [phdc_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'phdc.yml'), mode='w',\n encoding='utf-8') as phdc_f:\n phdc_str = yaml.dump(self.phdc_data, default_flow_style=False,\n sort_keys=False)\n phdc_f.write(phdc_str)\n\n # TDB\n tdb_offset = self.ddi_bytes.find(b'\\xFF'*8+b'TDB ')\n if tdb_offset >= 0:\n self.ddi_data.seek(tdb_offset)\n self.tdb_data = self.read_tdb()\n self.offset_map['tdb'] = [tdb_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'tdb.yml'), mode='w',\n encoding='utf-8') as tdb_f:\n tdb_str = yaml.dump(self.tdb_data, default_flow_style=False,\n sort_keys=False)\n tdb_f.write(tdb_str)\n\n # DBV\n dbv_offset = self.ddi_bytes.find(b'\\x00'*8+b'DBV ')\n self.ddi_data.seek(dbv_offset)\n self.read_dbv()\n self.offset_map['dbv'] = [dbv_offset, self.ddi_data.tell()]\n\n # STA\n sta_offset = self.ddi_bytes.find(b'\\x00'*8+b'STA ')\n sta_offset = reverse_search(self.ddi_bytes, b'ARR ', sta_offset) - 8\n self.ddi_data.seek(sta_offset)\n self.sta_data = self.read_sta()\n self.offset_map['sta'] = [sta_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'sta.yml'), mode='w',\n encoding='utf-8') as sta_f:\n sta_str = yaml.dump(self.sta_data, default_flow_style=False,\n sort_keys=False)\n sta_f.write(sta_str)\n\n # ART\n art_offset = self.ddi_bytes.find(b'\\x00'*8+b'ART ')\n art_offset = reverse_search(self.ddi_bytes, b'ARR ', art_offset) - 8\n self.ddi_data.seek(art_offset)\n self.art_data = self.read_art()\n self.offset_map['art'] = [art_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'art.yml'), mode='w',\n encoding='utf-8') as art_f:\n art_str = yaml.dump(self.art_data, default_flow_style=False,\n sort_keys=False)\n art_f.write(art_str)\n\n # VQM\n vqm_offset = self.ddi_bytes.find(b'\\xFF'*8+b'VQM ')\n self.vqm_data = None\n if vqm_offset != -1:\n self.ddi_data.seek(vqm_offset)\n self.vqm_data = self.read_vqm()\n self.offset_map['vqm'] = [vqm_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'vqm.yml'), mode='w',\n encoding='utf-8') as vqm_f:\n vqm_str = yaml.dump(self.vqm_data, default_flow_style=False,\n sort_keys=False)\n vqm_f.write(vqm_str)\n \n \n # DDI convert\n self.ddi_data_dict: dict[str, dict[str, list[artp_type]]] = {\n 'sta': {},\n 'art': {},\n }\n\n if self.vqm_data is not None:\n self.ddi_data_dict = {\n 'vqm': {},\n 'sta': {},\n 'art': {},\n }\n vqm_dict = []\n for idx, vqmp in self.vqm_data.items():\n vqm_dict.append({'snd': vqmp['snd'], 'epr': vqmp['epr'], 'pitch': vqmp['pitch1']})\n self.ddi_data_dict['vqm'] = vqm_dict\n\n sta_dict: dict[str, list[artp_type]] = {}\n for stau in self.sta_data.values():\n stau_dict: list[artp_type] = []\n for idx, stap in stau['stap'].items():\n stau_dict.append({'snd': stap['snd'], 'epr': stap['epr'], 'pitch': stap['pitch1']})\n sta_dict[stau['phoneme']] = stau_dict\n self.ddi_data_dict['sta'] = {key: sta_dict[key]\n for key in sorted(sta_dict.keys())}\n\n art_dict: dict[str, list[artp_type]] = {}\n for art in self.art_data.values():\n if 'artu' in art.keys():\n for artu in art['artu'].values():\n key = art['phoneme']+' '+artu['phoneme']\n art_dict[key] = []\n for artp in artu['artp'].values():\n art_dict[key].append({'snd': artp['snd'],\n 'snd_start': artp['snd_start'],\n 'epr': artp['epr'],\n 'pitch': artp['pitch1']})\n if 'art' in art.keys():\n for sub_art in art['art'].values():\n sub_art: art_type\n if 'artu' in sub_art.keys():\n for artu in sub_art['artu'].values():\n key = art['phoneme']+' '+sub_art['phoneme']+' '+artu['phoneme']\n art_dict[key] = []\n for artp in artu['artp'].values():\n art_dict[key].append({'snd': artp['snd'],\n 'snd_start': artp['snd_start'],\n 'epr': artp['epr'],\n 'pitch': artp['pitch1']})\n self.ddi_data_dict['art'] = {key: art_dict[key]\n for key in sorted(art_dict.keys())}\n\n\n def save(self, dst_path: Optional[str] = None):\n import yaml\n\n with open(os.path.join(dst_path, 'ddi.yml'), mode='w', encoding='utf-8') as ddi_f:\n ddi_str = yaml.dump(self.ddi_data_dict, default_flow_style=False,\n sort_keys=False)\n ddi_f.write(ddi_str)\n\n\n def read_phdc(self):\n phdc_data: dict[str, dict[int, list[str]]\n | dict[str, dict[int, str]]\n | dict[str, list[str]]\n | str]\n phdc_data = {}\n # PHDC\n phoneme_data: dict[str, list[str]] = {\"voiced\": [], \"unvoiced\": []}\n assert self.ddi_data.read(4).decode() == 'PHDC'\n phdc_size = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 4\n phoneme_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for i in range(phoneme_num):\n bytes_str = self.ddi_data.read(0x1F)\n assert bytes_str[-1] in [0, 1]\n real_data = bytes_str[:-1].decode().strip('\\x00')\n\n phoneme_type = \"voiced\" if bytes_str[-1] == 0 else \"unvoiced\"\n\n phoneme_data[phoneme_type].append(real_data)\n phdc_data['phoneme'] = phoneme_data\n\n # PHG2\n phg2_data: dict[str, dict[int, str]] = {}\n assert self.ddi_data.read(4).decode() == 'PHG2'\n phg2_size = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n phg2_epr_guide_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for i in range(phg2_epr_guide_num):\n phg2_key = read_str(self.ddi_data)\n phg2_data[phg2_key] = {}\n temp_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for j in range(temp_num):\n idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n phg2_data[phg2_key][idx] = read_str(self.ddi_data)\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n phdc_data['phg2'] = phg2_data\n\n # epr_guide\n epr_guide_data: dict[str, list[str]] = {}\n epr_guide_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_guide_size = phdc_size-phg2_size-0x10-0x1F*phoneme_num-4\n epr_guide_bytes = self.ddi_data.read(epr_guide_size)\n offset = 0\n for i in range(epr_guide_num):\n key = epr_guide_bytes[offset:offset+0x20].decode().strip('\\x00')\n assert int.from_bytes(epr_guide_bytes[offset+0x20:offset+0x24],\n byteorder='little') == 4\n epr_guide_data[key] = []\n offset += 0x24\n while(offset < len(epr_guide_bytes) and epr_guide_bytes[offset] == 0):\n if epr_guide_bytes[offset+7] == 0x40:\n value = epr_guide_bytes[offset:offset + 7]\n start_idx = 0\n for i in range(7):\n if value[i] != 0:\n start_idx = i\n break\n # TODO: Need to check carefully. \"b'XXX'\" and we only take XXX\n value = bytes_to_str(value[start_idx:])\n epr_guide_data[key].append(value)\n else:\n assert int.from_bytes(epr_guide_bytes[offset:offset + 8],\n byteorder='little') == 0\n epr_guide_data[key].append('')\n offset += 8\n assert offset == len(epr_guide_bytes)\n phdc_data['epr_guide'] = epr_guide_data\n\n # hash string\n # phdc_data['hash'] = self.ddi_data.read(0x20).decode()\n # assert int.from_bytes(self.ddi_data.read(0xE0), byteorder='little') == 0\n # assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n # assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 2\n\n return phdc_data\n\n\n def read_tdb(self) -> dict[int, str]:\n tdb_data: dict[int, str] = {}\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'TDB '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi (B9 13 10 00)\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n tmm_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n str_list = ['pitch', 'dynamics', 'opening']\n for i in range(tmm_num):\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'TMM '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n # print(i, idx)\n str_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert str_num == 3\n for j in range(str_num):\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert int.from_bytes(read_arr(self.ddi_data), byteorder='little') == 0\n assert read_str(self.ddi_data) == str_list[j]\n phoneme = read_str(self.ddi_data)\n tdb_data[idx] = phoneme\n assert read_str(self.ddi_data) == 'timbre'\n return tdb_data\n\n\n def read_dbv(self) -> None:\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'DBV '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # 4 for AVANNA, 5 for others?\n\n\n def read_sta(self) -> dict[int, artu_type]:\n sta_data: dict[int, artu_type] = {}\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert int.from_bytes(read_arr(self.ddi_data), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n\n assert self.ddi_data.read(4).decode() == 'STA '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n stau_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for i in range(stau_num):\n stau_data: artu_type = {'phoneme': '', 'stap': {}}\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'STAu'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n stau_idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(8) == b'\\xFF'*8\n stap_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for j in range(stap_num):\n stap_data: artp_type = {'snd': '', 'snd_length': '', 'epr': []}\n _pos = self.ddi_data.tell()\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'STAp'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n stap_data['unknown1'] = bytes_to_str(self.ddi_data.read(0x0a))\n stap_data['pitch1'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['pitch2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['unknown2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['dynamics'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['unknown3'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n \n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 2\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0x3D\n assert self.ddi_data.read(4).decode() == 'EMPT'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert read_str(self.ddi_data) == 'SND'\n stap_data['snd_length'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'EMPT'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert read_str(self.ddi_data) == 'EpR'\n self.ddi_data.read(4) # == b'\\xFF'*4 Exception: Tonio.ddi (epr_num)\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n epr_offset_pos = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8),\n byteorder='little')\n epr_list.append(f'{epr_offset_pos:0>8x}={epr_offset:0>8x}')\n stap_data['epr'] = epr_list\n stap_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n snd_identifier = int.from_bytes(self.ddi_data.read(4),\n byteorder='little')\n # TODO: why this number?\n snd_offset_pos = self.ddi_data.tell()\n snd_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n stap_data['snd'] = f'{snd_offset_pos:0>8x}={snd_offset:016x}_{snd_identifier:08x}'\n\n _pos = self.ddi_data.tell()\n stap_data['unknown4'] = bytes_to_str(self.ddi_data.read(0x10))\n stap_idx = read_str(self.ddi_data)\n assert stap_idx not in stau_data['stap'].keys()\n stau_data['stap'][stap_idx] = stap_data\n stau_data['stap'] = {k: stau_data['stap'][k]\n for k in sorted(stau_data['stap'].keys())}\n stau_data['phoneme'] = read_str(self.ddi_data)\n sta_data[stau_idx] = stau_data\n sta_data = {k: sta_data[k] for k in sorted(sta_data.keys())}\n assert read_str(self.ddi_data) == 'normal'\n assert read_str(self.ddi_data) == 'stationary'\n return sta_data\n\n\n def read_art(self) -> dict[int, art_type]:\n total_art_data: dict[int, art_type] = {}\n int.from_bytes(self.ddi_data.read(8), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(read_arr(self.ddi_data), byteorder='little') != 0\n while(True):\n start = self.ddi_data.read(8)\n if not (start in [b'\\x00'*8, b'\\xFF'*8]):\n offset = self.ddi_data.tell()-8\n self.ddi_data.seek(offset)\n assert read_str(self.ddi_data) == 'articulation'\n break\n assert self.ddi_data.read(4).decode() == 'ART '\n art_idx, art_data = self.read_art_block()\n total_art_data[art_idx] = art_data\n total_art_data = {key: total_art_data[key]\n for key in sorted(total_art_data.keys())}\n return total_art_data\n\n\n def read_art_block(self) -> tuple[int, art_type]:\n art_data: art_type = {'phoneme': '', 'artu': {}, 'art': {}}\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n art_idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n artu_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n i = -1\n for i in range(artu_num):\n artu_data: artu_type = {'phoneme': '', 'artp': {}}\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n block_type = self.ddi_data.read(4).decode()\n if block_type == 'ART ':\n sub_art_idx, sub_art_data = self.read_art_block()\n art_data['art'][sub_art_idx] = sub_art_data\n continue\n else:\n assert block_type == 'ARTu'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n artu_idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n # TODO: why to be 1?\n assert int.from_bytes(self.ddi_data.read(8),\n byteorder='little') in [0, 1]\n self.ddi_data.read(4)\n assert self.ddi_data.read(4) == b'\\xFF'*4\n artp_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for j in range(artp_num):\n artp_data: artp_type = {'snd': '', 'snd_unknown': '', 'epr': []}\n dev_artp_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n artp_data['dev_artp'] = f'{dev_artp_offset:0>8x}'\n assert self.ddi_data.read(4).decode() == 'ARTp'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n artp_data['unknown1'] = bytes_to_str(self.ddi_data.read(0x0a))\n artp_data['pitch1'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['pitch2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['unknown2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['dynamics'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['unknown3'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n # print(f'art {i:4d} {j:4d} {unknown}')\n # if env['unknown'] is None:\n # env['unknown'] = unknown\n # else:\n # assert env['unknown'] == unknown\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 2\n # TODO: This doesn't seem to be an index actually\n artp_idx = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n assert self.ddi_data.read(4).decode() == 'EMPT'\n snd_len_empt1 = int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n # artp_data['snd_len_empt1'] = f'{snd_len_empt1:08x}'\n assert read_str(self.ddi_data) == 'SND'\n snd_len_sta = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n artp_data['snd_len_sta'] = f'{snd_len_sta:08x}'\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'EMPT'\n snd_len_empt2 = int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n # artp_data['snd_len_empt2'] = f'{snd_len_empt2:08x}'\n assert read_str(self.ddi_data) == 'EpR'\n loc = self.ddi_data.tell()\n try:\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n epr_offset_pos = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8),\n byteorder='little')\n epr_list.append(f'{epr_offset_pos:0>8x}={epr_offset:0>8x}')\n artp_data['epr'] = epr_list\n artp_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n except AssertionError:\n self.ddi_data.seek(loc)\n self.ddi_data.read(4) # == b'\\xFF'*4 Exception: Tonio.ddi (epr_num)\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n epr_offset_pos = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8),\n byteorder='little')\n epr_list.append(f'{epr_offset_pos:0>8x}={epr_offset:0>8x}')\n artp_data['epr'] = epr_list\n artp_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n \n snd_identifier = int.from_bytes(self.ddi_data.read(4),\n byteorder='little')\n # TODO: why this number?\n snd_offset_pos = self.ddi_data.tell()\n snd_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n artp_data['snd'] = f'{snd_offset_pos:08x}={snd_offset-0x12:016x}_{snd_identifier:08x}'\n\n snd_offset2_pos = self.ddi_data.tell()\n snd_offset2 = int.from_bytes(self.ddi_data.read(8), byteorder='little') # == snd_offset+0x800 Exception: Tonio.ddi (0)\n artp_data['snd_start'] = f'{snd_offset2_pos:08x}={snd_offset2-0x12:016x}_{snd_identifier:08x}'\n\n ddi_bytes: bytes = self.ddi_bytes[self.ddi_data.tell():self.ddi_data.tell() + 1024]\n align_length = ddi_bytes.find(b'default')-4\n align_bytes = self.ddi_data.read(align_length)\n frame_align = []\n if align_length > 4:\n align_group_num = int.from_bytes(align_bytes[0:4], byteorder='little')\n # In V3 format, each group has int32 * 4 bytes\n align_bytes = align_bytes[4:]\n align_io = io.BytesIO(align_bytes)\n for _ in range(0, align_group_num):\n frame_align_group = {\n \"start\": int.from_bytes(align_io.read(4), byteorder='little'),\n \"end\": int.from_bytes(align_io.read(4), byteorder='little'),\n \"start2\": int.from_bytes(align_io.read(4), byteorder='little'),\n \"end2\": int.from_bytes(align_io.read(4), byteorder='little'),\n }\n frame_align.append(frame_align_group)\n else: # V2 format\n frame_align_group = []\n for i in range(0, len(align_bytes), 4):\n frame_align_group.append(int.from_bytes(align_bytes[i:i+4], byteorder='little'))\n frame_align.append(frame_align_group)\n artp_data['frame_align'] = frame_align\n \n assert read_str(self.ddi_data) == 'default'\n\n assert artp_idx not in artu_data['artp'].keys()\n artu_data['artp'][artp_idx] = artp_data\n artu_data['artp'] = {k: artu_data['artp'][k]\n for k in sorted(artu_data['artp'].keys())}\n artu_data['phoneme'] = read_str(self.ddi_data)\n art_data['artu'][artu_idx] = artu_data\n art_data['artu'] = {k: art_data['artu'][k]\n for k in sorted(art_data['artu'].keys())}\n art_data['art'] = {k: art_data['art'][k]\n for k in sorted(art_data['art'].keys())}\n art_data['phoneme'] = read_str(self.ddi_data)\n if len(art_data['art'].keys()) == 0:\n del art_data['art']\n if len(art_data['artu'].keys()) == 0:\n del art_data['artu']\n return art_idx, art_data\n\n\n def read_vqm(self) -> dict[int, artp_type]:\n vqm_data: dict[int, artp_type] = {}\n\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'VQM '\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert self.ddi_data.read(8) == b'\\xFF'*8\n\n assert self.ddi_data.read(4).decode() == 'VQMu'\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n\n vqmp_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == vqmp_num\n for i in range(vqmp_num):\n vqmp_data = {'snd': '', 'epr': []}\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'VQMp'\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n vqmp_data['unknown1'] = bytes_to_str(self.ddi_data.read(0x0a))\n vqmp_data['pitch1'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n vqmp_data['pitch2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n vqmp_data['unknown2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n vqmp_data['dynamics'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n # TODO: that may not be same as env['unknown']\n vqmp_data['unknown3'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert self.ddi_data.read(4) == b'\\xFF'*4\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n ddi_epr_offset = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n epr_list.append(f'{ddi_epr_offset:0>8x}={epr_offset:0>8x}')\n vqmp_data['epr'] = epr_list\n vqmp_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n snd_identifier = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n ddi_snd_offset = self.ddi_data.tell()\n snd_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n vqmp_data['snd'] = f'{ddi_snd_offset:0>8x}={snd_offset:016x}_{snd_identifier:08x}'\n assert self.ddi_data.read(0x10) == b'\\xFF'*0x10\n vqmp_idx = int(read_str(self.ddi_data))\n vqm_data[vqmp_idx] = vqmp_data\n assert read_str(self.ddi_data) == 'GROWL'\n assert read_str(self.ddi_data) == 'vqm'\n return vqm_data"
},
{
"identifier": "str_to_bytes",
"path": "utils/ddi_utils.py",
"snippet": "def str_to_bytes(data: str) -> bytes:\n return bytes([int(piece, 16) for piece in data.split(' ')])"
},
{
"identifier": "str_to_data",
"path": "utils/ddi_utils.py",
"snippet": "def str_to_data(data: str) -> bytes:\n data = str(data)\n return len(data).to_bytes(4, byteorder='little') + data.encode()"
},
{
"identifier": "stream_reverse_search",
"path": "utils/ddi_utils.py",
"snippet": "def stream_reverse_search(data: io.BufferedReader, search: bytes, offset: int, limit: int = -1) -> int:\n if limit == -1:\n limit = 1024 * 1024 * 10\n offset -= len(search)\n for i in range(offset, 0, -1):\n data.seek(i)\n if data.read(len(search)) == search:\n return i\n if offset - i > limit:\n break\n\n return -1"
}
] | from typing import TypedDict
from utils.ddi_utils import DDIModel, str_to_bytes, str_to_data, stream_reverse_search
import argparse
import io
import re
import os
import os.path
import struct | 10,377 | epr: list[int]
snd_id: int
snd: int
fs: int
unknown1: str
pitch1: float
pitch2: float
unknown2: float
unknown3: float
dynamics: float
def byte_replace(src_bytes: bytes, offset: int, override_len: int, replace_bytes: bytes):
return src_bytes[:offset] + replace_bytes + src_bytes[offset + override_len:]
def parse_args(args=None): # : list[str]
# initialize parser
parser = argparse.ArgumentParser(formatter_class=SmartFormatter)
parser.add_argument('--src_path', required=True,
help='source ddi file path')
parser.add_argument('--mixins_path',
help='the mixins ddi file path. default to be same as src_path')
parser.add_argument('--dst_path',
help='output folder, '
'default to be "./[singer name]/mixins"')
parser.add_argument('--mixins_item',
choices=['vqm', 'sta2vqm'],
default='vqm',
help='R|mixins item, '
'default to be "vqm"\n'
'select from: \n'
' vqm: growl\n'
' sta2vqm: convert stationary entry to growl\n')
parser.add_argument('--sta2vqm_phoneme',
default="Grw",
help='phoneme for sta2vqm, will use this phoneme to generate growl, default to be "Grw"')
# parse args
args = parser.parse_args(args)
src_ddi_path: str = os.path.normpath(args.src_path)
if not os.path.exists(src_ddi_path):
raise Exception("ddi file not exists")
src_path = os.path.dirname(src_ddi_path)
src_singer_name = os.path.splitext(os.path.basename(src_ddi_path))[0]
mixins_ddi_path = args.mixins_path or src_ddi_path
mixins_ddi_path: str = os.path.normpath(mixins_ddi_path)
mixins_path = os.path.dirname(mixins_ddi_path)
mixins_singer_name = os.path.splitext(os.path.basename(mixins_ddi_path))[0]
dst_path: str = args.dst_path
if dst_path is None:
dst_path = os.path.join(src_path, "mixins")
dst_path: str = os.path.normpath(dst_path)
# make dirs
if not os.path.exists(dst_path):
os.makedirs(dst_path)
mixins_item = args.mixins_item
return src_path, src_singer_name, mixins_path, mixins_singer_name, dst_path, mixins_item, args
def _create_vqm_stream(vqm_meta_list: list[VQMMeta]):
# Create VQM struct
vqm_stream = io.BytesIO()
vqm_stream.write(b'\xFF'*8)
vqm_stream.write(b'VQM ')
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write(b'\xFF'*8)
vqm_stream.write(b'VQMu')
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write(len(vqm_meta_list).to_bytes(4, byteorder='little'))
vqm_stream.write(len(vqm_meta_list).to_bytes(4, byteorder='little'))
for vqm_meta in vqm_meta_list:
vqm_stream.write(b'\xFF'*8)
vqm_stream.write(b"VQMp")
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write(str_to_bytes(vqm_meta["unknown1"]))
vqm_stream.write(struct.pack("<f", 224.0)) # Unknown
vqm_stream.write(struct.pack("<f", vqm_meta["pitch2"]))
vqm_stream.write(struct.pack("<f", vqm_meta["unknown2"]))
vqm_stream.write(struct.pack("<f", vqm_meta["dynamics"]))
vqm_stream.write(struct.pack("<f", vqm_meta["unknown3"]))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
# EpR
vqm_stream.write(b'\xFF'*4)
vqm_stream.write(len(vqm_meta["epr"]).to_bytes(4, byteorder='little'))
for epr_offset in vqm_meta["epr"]:
vqm_stream.write(epr_offset.to_bytes(8, byteorder='little'))
# SND
vqm_stream.write(vqm_meta["fs"].to_bytes(4, byteorder='little'))
vqm_stream.write(b'\x01\x00')
vqm_stream.write(vqm_meta["snd_id"].to_bytes(4, byteorder='little'))
vqm_stream.write(vqm_meta["snd"].to_bytes(8, byteorder='little'))
vqm_stream.write(b'\xFF'*0x10)
vqm_stream.write(str_to_data(vqm_meta["idx"]))
vqm_stream.write(str_to_data("GROWL"))
vqm_stream.write(str_to_data("vqm"))
return vqm_stream
| #!/bin/env python3
# I thought what I'd do was, I'd pretend I was one of those deaf-mutes.
from __future__ import annotations
ddi_footer = b'\x05\x00\x00\x00' + "voice".encode()
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
if text.startswith('R|'):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
class VQMMeta(TypedDict):
idx: str
epr: list[int]
snd_id: int
snd: int
fs: int
unknown1: str
pitch1: float
pitch2: float
unknown2: float
unknown3: float
dynamics: float
def byte_replace(src_bytes: bytes, offset: int, override_len: int, replace_bytes: bytes):
return src_bytes[:offset] + replace_bytes + src_bytes[offset + override_len:]
def parse_args(args=None): # : list[str]
# initialize parser
parser = argparse.ArgumentParser(formatter_class=SmartFormatter)
parser.add_argument('--src_path', required=True,
help='source ddi file path')
parser.add_argument('--mixins_path',
help='the mixins ddi file path. default to be same as src_path')
parser.add_argument('--dst_path',
help='output folder, '
'default to be "./[singer name]/mixins"')
parser.add_argument('--mixins_item',
choices=['vqm', 'sta2vqm'],
default='vqm',
help='R|mixins item, '
'default to be "vqm"\n'
'select from: \n'
' vqm: growl\n'
' sta2vqm: convert stationary entry to growl\n')
parser.add_argument('--sta2vqm_phoneme',
default="Grw",
help='phoneme for sta2vqm, will use this phoneme to generate growl, default to be "Grw"')
# parse args
args = parser.parse_args(args)
src_ddi_path: str = os.path.normpath(args.src_path)
if not os.path.exists(src_ddi_path):
raise Exception("ddi file not exists")
src_path = os.path.dirname(src_ddi_path)
src_singer_name = os.path.splitext(os.path.basename(src_ddi_path))[0]
mixins_ddi_path = args.mixins_path or src_ddi_path
mixins_ddi_path: str = os.path.normpath(mixins_ddi_path)
mixins_path = os.path.dirname(mixins_ddi_path)
mixins_singer_name = os.path.splitext(os.path.basename(mixins_ddi_path))[0]
dst_path: str = args.dst_path
if dst_path is None:
dst_path = os.path.join(src_path, "mixins")
dst_path: str = os.path.normpath(dst_path)
# make dirs
if not os.path.exists(dst_path):
os.makedirs(dst_path)
mixins_item = args.mixins_item
return src_path, src_singer_name, mixins_path, mixins_singer_name, dst_path, mixins_item, args
def _create_vqm_stream(vqm_meta_list: list[VQMMeta]):
# Create VQM struct
vqm_stream = io.BytesIO()
vqm_stream.write(b'\xFF'*8)
vqm_stream.write(b'VQM ')
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write(b'\xFF'*8)
vqm_stream.write(b'VQMu')
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write(len(vqm_meta_list).to_bytes(4, byteorder='little'))
vqm_stream.write(len(vqm_meta_list).to_bytes(4, byteorder='little'))
for vqm_meta in vqm_meta_list:
vqm_stream.write(b'\xFF'*8)
vqm_stream.write(b"VQMp")
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write(str_to_bytes(vqm_meta["unknown1"]))
vqm_stream.write(struct.pack("<f", 224.0)) # Unknown
vqm_stream.write(struct.pack("<f", vqm_meta["pitch2"]))
vqm_stream.write(struct.pack("<f", vqm_meta["unknown2"]))
vqm_stream.write(struct.pack("<f", vqm_meta["dynamics"]))
vqm_stream.write(struct.pack("<f", vqm_meta["unknown3"]))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
# EpR
vqm_stream.write(b'\xFF'*4)
vqm_stream.write(len(vqm_meta["epr"]).to_bytes(4, byteorder='little'))
for epr_offset in vqm_meta["epr"]:
vqm_stream.write(epr_offset.to_bytes(8, byteorder='little'))
# SND
vqm_stream.write(vqm_meta["fs"].to_bytes(4, byteorder='little'))
vqm_stream.write(b'\x01\x00')
vqm_stream.write(vqm_meta["snd_id"].to_bytes(4, byteorder='little'))
vqm_stream.write(vqm_meta["snd"].to_bytes(8, byteorder='little'))
vqm_stream.write(b'\xFF'*0x10)
vqm_stream.write(str_to_data(vqm_meta["idx"]))
vqm_stream.write(str_to_data("GROWL"))
vqm_stream.write(str_to_data("vqm"))
return vqm_stream
| def mixins_vqm(src_ddi_bytes: bytes, output_stream: io.BufferedWriter, mixins_ddi_model: DDIModel, mixins_ddb_stream: io.BufferedReader): | 0 | 2023-11-20 11:37:46+00:00 | 12k |
shercoo/RGDiffSR | text_super_resolution/interfaces/super_resolution.py | [
{
"identifier": "util",
"path": "utils/util.py",
"snippet": "def str_filt(str_, voc_type):\n def __init__(self, alphabet):\n def encode(self, text):\n def decode(self, t, length, raw=False):\n def __init__(self):\n def add(self, v):\n def reset(self):\n def val(self):\ndef oneHot(v, v_length, nc):\ndef loadData(v, data):\ndef prettyPrint(v):\ndef assureRatio(img):\nclass strLabelConverter(object):\nclass averager(object):"
},
{
"identifier": "ssim_psnr",
"path": "utils/ssim_psnr.py",
"snippet": "def calculate_psnr(img1, img2):\ndef weighted_calculate_psnr(img1, img2, weighted_mask):\ndef gaussian(window_size, sigma):\ndef create_window(window_size, channel):\ndef create_rect_window(window_H, window_W, channel):\ndef _ssim_weighted(img1_, img2_, window, window_size, channel, weighted_mask, size_average=True):\ndef _ssim(img1, img2, window, window_size, channel, size_average=True):\ndef _tri_ssim(img1, img2, img3, window, window_size, channel, size_average=True):\ndef _ssim_rect(img1, img2, window, window_size, channel, size_average=True):\n def __init__(self, size_average=True):\n def forward(self, img1, img2):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2, img3):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2, weighted_mask):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2):\ndef ssim(img1, img2, window_size=11, size_average=True):\ndef ssim_weighted(img1, img2, weighted_mask, window_size=11, size_average=True):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n H, W = window_size\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\nclass Distorted_SSIM(torch.nn.Module):\nclass SSIM(torch.nn.Module):\nclass TRI_SSIM(torch.nn.Module):\nclass SSIM_WEIGHTED(torch.nn.Module):\nclass SSIM_TSR(torch.nn.Module):"
},
{
"identifier": "base",
"path": "text_super_resolution/interfaces/base.py",
"snippet": "class TextBase(object):\nclass AsterInfo(object):\n def __init__(self, config, args, opt_TPG=None):\n def get_train_data(self):\n def get_val_data(self):\n def get_test_data(self, dir_):\n def generator_init(self, iter=-1, resume_in=None):\n def global_model_init(self, iter=-1):\n def optimizer_init(self, model, recognizer=None, global_model=None):\n def tripple_display(self, image_in, image_out, image_target, pred_str_lr, pred_str_sr, label_strs, index):\n def test_display(self, image_in, image_out, image_target, pred_str_lr, pred_str_sr, label_strs, str_filt):\n def save_checkpoint(self, netG_list, epoch, iters, best_acc_dict, best_model_info, is_best, converge_list, recognizer=None, prefix=\"acc\", global_model=None):\n def MORAN_init(self):\n def parse_moran_data(self, imgs_input):\n def CRNN_init(self, recognizer_path=None, opt=None):\n def CRNNRes18_init(self, recognizer_path=None, opt=None):\n def TPG_init(self, recognizer_path=None, opt=None):\n def parse_crnn_data(self, imgs_input_, ratio_keep=False):\n def parse_OPT_data(self, imgs_input_, ratio_keep=False):\n def Aster_init(self):\n def parse_aster_data(self, imgs_input):\n def VisionLAN_init(self, path=None):\n def parse_visionlan_data(self, imgs_input):\n def __init__(self, voc_type):\n MORAN = moran.MORAN(1, len(alphabet.split(':')), 256, 32, 100, BidirDecoder=True,\n inputDataType='torch.cuda.FloatTensor', CUDA=True)\n MORAN = MORAN.to(self.device)\n MORAN = torch.nn.DataParallel(MORAN, device_ids=range(cfg.ngpu))\n R = imgs_input[:, 0:1, :, :]\n G = imgs_input[:, 1:2, :, :]\n B = imgs_input[:, 2:3, :, :]\n R = imgs_input[:, 0:1, :, :]\n G = imgs_input[:, 1:2, :, :]\n B = imgs_input[:, 2:3, :, :]\n R = imgs_input[:, 0:1, :, :]\n G = imgs_input[:, 1:2, :, :]\n B = imgs_input[:, 2:3, :, :]"
},
{
"identifier": "AverageMeter",
"path": "utils/meters.py",
"snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count"
},
{
"identifier": "get_string_aster",
"path": "utils/metrics.py",
"snippet": "def get_string_aster(output, target, dataset=None):\n # label_seq\n assert output.dim() == 2 and target.dim() == 2\n\n end_label = dataset.char2id[dataset.EOS]\n unknown_label = dataset.char2id[dataset.UNKNOWN]\n num_samples, max_len_labels = output.size()\n num_classes = len(dataset.char2id.keys())\n assert num_samples == target.size(0) and max_len_labels == target.size(1)\n output = to_numpy(output)\n target = to_numpy(target)\n\n # list of char list\n pred_list, targ_list = [], []\n for i in range(num_samples):\n pred_list_i = []\n for j in range(max_len_labels):\n if output[i, j] != end_label:\n if output[i, j] != unknown_label:\n try:\n pred_list_i.append(dataset.id2char[output[i, j]])\n except:\n embed(header='problem')\n else:\n break\n pred_list.append(pred_list_i)\n\n for i in range(num_samples):\n targ_list_i = []\n for j in range(max_len_labels):\n if target[i, j] != end_label:\n if target[i, j] != unknown_label:\n targ_list_i.append(dataset.id2char[target[i, j]])\n else:\n break\n targ_list.append(targ_list_i)\n\n # char list to string\n # if dataset.lowercase:\n if True:\n # pred_list = [''.join(pred).lower() for pred in pred_list]\n # targ_list = [''.join(targ).lower() for targ in targ_list]\n pred_list = [_normalize_text(pred) for pred in pred_list]\n targ_list = [_normalize_text(targ) for targ in targ_list]\n else:\n pred_list = [''.join(pred) for pred in pred_list]\n targ_list = [''.join(targ) for targ in targ_list]\n\n return pred_list, targ_list"
},
{
"identifier": "get_string_crnn",
"path": "utils/metrics.py",
"snippet": "def get_string_crnn(outputs_, use_chinese, alphabet='-0123456789abcdefghijklmnopqrstuvwxyz'):\n outputs = outputs_.permute(1, 0, 2).contiguous()\n predict_result = []\n\n if use_chinese:\n alphabet = open(\"al_chinese.txt\", 'r').readlines()[0].replace(\"\\n\", \"\")\n\n for output in outputs:\n max_index = torch.max(output, 1)[1]\n\n out_str = \"\"\n last = \"\"\n for i in max_index:\n if alphabet[i] != last:\n if i != 0:\n out_str += alphabet[i]\n last = alphabet[i]\n else:\n last = \"\"\n\n predict_result.append(out_str)\n return predict_result"
},
{
"identifier": "Accuracy",
"path": "utils/metrics.py",
"snippet": "def Accuracy(output, target, dataset=None):\n pred_list, targ_list = get_string_aster(output, target, dataset)\n\n acc_list = [(pred == targ) for pred, targ in zip(pred_list, targ_list)]\n accuracy = 1.0 * sum(acc_list) / len(acc_list)\n return accuracy"
},
{
"identifier": "str_filt",
"path": "utils/util.py",
"snippet": "def str_filt(str_, voc_type):\n alpha_dict = {\n 'digit': string.digits,\n 'lower': string.digits + string.ascii_lowercase,\n 'upper': string.digits + string.ascii_letters,\n 'all': string.digits + string.ascii_letters + string.punctuation,\n 'chinese': open(\"al_chinese.txt\", \"r\").readlines()[0].replace(\"\\n\", \"\")\n }\n if voc_type == 'lower':\n str_ = str_.lower()\n\n if voc_type == 'chinese': # Chinese character only\n new_str = \"\"\n for ch in str_:\n if '\\u4e00' <= ch <= '\\u9fa5' or ch in string.digits + string.ascii_letters:\n new_str += ch\n str_ = new_str\n for char in str_:\n if char not in alpha_dict[voc_type]: #voc_type\n str_ = str_.replace(char, '')\n return str_"
},
{
"identifier": "utils_moran",
"path": "utils/utils_moran.py",
"snippet": "class strLabelConverterForAttention(object):\nclass averager(object):\n def __init__(self, alphabet, sep):\n def scan(self, text):\n def encode(self, text, scanned=True):\n def decode(self, t, length):\n def __init__(self):\n def add(self, v):\n def reset(self):\n def val(self):\ndef loadData(v, data):\ndef get_torch_version():"
},
{
"identifier": "gumbel_softmax",
"path": "text_super_resolution/model/gumbel_softmax.py",
"snippet": "def gumbel_softmax(logits, temperature=0.8):\n \"\"\"\n input: [*, n_class]\n return: [*, n_class] an one-hot vector\n \"\"\"\n y = gumbel_softmax_sample(logits, temperature)\n shape = y.size()\n _, ind = y.max(dim=-1)\n y_hard = torch.zeros_like(y).view(-1, shape[-1])\n y_hard.scatter_(1, ind.view(-1, 1), 1)\n y_hard = y_hard.view(*shape)\n return (y_hard - y).detach() + y"
},
{
"identifier": "SemanticLoss",
"path": "text_super_resolution/loss/semantic_loss.py",
"snippet": "class SemanticLoss(nn.Module):\n def __init__(self, margin=0.1):\n super(SemanticLoss, self).__init__()\n self.cos_sim = nn.CosineSimilarity(dim=-1, eps=1e-8)\n self.margin = margin\n\n self.lambda1 = 1.0\n self.lambda2 = 1.0\n\n self.kl_loss = torch.nn.KLDivLoss()\n\n def forward(self, pred_vec, gt_vec):\n # pred_vec: [N, C]\n # gt_vec: [N, C]\n # mean_sim = torch.mean(self.cos_sim(gt_vec, pred_vec))\n # sim_loss = 1 - mean_sim\n \n #noise = Variable(torch.rand(pred_vec.shape)) * 0.1 - 0.05\n\n #normed_pred_vec = pred_vec + noise.to(pred_vec.device)\n # print(\"pred_vec:\", pred_vec.shape)\n norm_vec = torch.abs(gt_vec - pred_vec)\n margin_loss = torch.mean(norm_vec) #\n\n # pr int(\"sem_loss:\", float(margin_loss.data), \"sim_loss:\", float(sim_loss.data))\n ce_loss = self.kl_loss(torch.log(pred_vec + 1e-20), gt_vec + 1e-20)\n # print(\"sem_loss:\", float(margin_loss.data), \"sim_loss:\", float(sim_loss.data))\n\n return self.lambda1 * margin_loss + self.lambda2 * ce_loss# ce_loss #margin_loss # + ce_loss # + sim_loss #margin_loss +\n\n def cross_entropy(self, pred_vec, gt_vec, l=1e-5):\n cal = gt_vec * torch.log(pred_vec+l) + (1 - gt_vec) * torch.log(1 - pred_vec+l)\n #print(\"cal:\", cal)\n return -cal"
},
{
"identifier": "StrokeFocusLoss",
"path": "text_super_resolution/loss/stroke_focus_loss.py",
"snippet": "class StrokeFocusLoss(nn.Module):\n def __init__(self, args):\n super(StrokeFocusLoss, self).__init__()\n self.args = args\n self.mse_loss = nn.MSELoss()\n self.ce_loss = nn.CrossEntropyLoss()\n self.l1_loss = nn.L1Loss()\n self.english_stroke_alphabet = '0123456789'\n self.english_stroke_dict = {}\n for index in range(len(self.english_stroke_alphabet)):\n self.english_stroke_dict[self.english_stroke_alphabet[index]] = index\n\n stroke_decompose_lines = open('./dataset/mydata/english_decomposition.txt',\n 'r').readlines()\n self.dic = {}\n for line in stroke_decompose_lines:\n line = line.strip()\n character, sequence = line.split()\n self.dic[character] = sequence\n\n self.build_up_transformer()\n\n def build_up_transformer(self):\n transformer = Transformer().cuda()\n transformer = nn.DataParallel(transformer)\n transformer.load_state_dict(torch.load('./dataset/mydata/pretrain_transformer_stroke_decomposition.pth'))\n transformer.eval()\n self.transformer = transformer\n\n def label_stroke_encoder(self, label):\n new_label_list = []\n for one_label in label:\n stroke_sequence = ''\n for character in one_label:\n if character not in self.dic:\n continue\n else:\n stroke_sequence += self.dic[character]\n stroke_sequence += '0'\n new_label_list.append(stroke_sequence)\n label = new_label_list\n\n batch = len(label)\n\n length = [len(i) for i in label]\n length_tensor = torch.Tensor(length).long().cuda()\n\n max_length = max(length)\n input_tensor = np.zeros((batch, max_length))\n for i in range(batch):\n for j in range(length[i] - 1):\n input_tensor[i][j + 1] = self.english_stroke_dict[label[i][j]]\n\n text_gt = []\n for i in label:\n for j in i:\n text_gt.append(self.english_stroke_dict[j])\n text_gt = torch.Tensor(text_gt).long().cuda()\n\n input_tensor = torch.from_numpy(input_tensor).long().cuda()\n return length_tensor, input_tensor, text_gt\n\n\n def forward(self,sr_img, hr_img, label):\n length_tensor, input_tensor, text_gt = self.label_stroke_encoder(label)\n # hr_pred, word_attention_map_gt = self.transformer(**dict(image=to_gray_tensor(hr_img), text_length=length_tensor,\n # text_input=input_tensor, test=False))\n sr_pred, word_attention_map_pred= self.transformer(**dict(image=to_gray_tensor(sr_img), text_length=length_tensor,\n text_input=input_tensor, test=False))\n return word_attention_map_pred"
},
{
"identifier": "Transformer",
"path": "text_super_resolution/loss/transformer_english_decomposition.py",
"snippet": "class Transformer(nn.Module):\n\n def __init__(self):\n super(Transformer, self).__init__()\n\n word_n_class = get_alphabet_len()\n self.embedding_word_with_upperword = Embeddings(512, word_n_class)\n self.pe = PositionalEncoding(d_model=512, dropout=0.1, max_len=5000)\n\n self.encoder = Encoder()\n self.decoder = Decoder()\n self.generator_word_with_upperword = Generator(1024, word_n_class)\n\n\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, image, text_length, text_input, test=False, attention_map=None):\n\n if image.shape[1] == 4:\n R = image[:, 0:1, :, :]\n G = image[:, 1:2, :, :]\n B = image[:, 2:3, :, :]\n image = 0.299 * R + 0.587 * G + 0.114 * B\n\n conv_feature = self.encoder(image) # batch, 1024, 8, 32\n text_embedding = self.embedding_word_with_upperword(text_input) # batch, text_max_length, 512\n postion_embedding = self.pe(torch.zeros(text_embedding.shape).cuda()).cuda() # batch, text_max_length, 512\n text_input_with_pe = torch.cat([text_embedding, postion_embedding], 2) # batch, text_max_length, 1024\n batch, seq_len, _ = text_input_with_pe.shape\n\n text_input_with_pe, word_attention_map = self.decoder(text_input_with_pe, conv_feature)\n\n\n word_decoder_result = self.generator_word_with_upperword(text_input_with_pe)\n\n # print('fuck',word_attention_map.shape)\n\n correct_list = []\n\n if not test:\n\n total_length = torch.sum(text_length).data\n probs_res = torch.zeros(total_length, get_alphabet_len()).type_as(word_decoder_result.data)\n start = 0\n for index, length in enumerate(text_length):\n length = length.data\n probs_res[start:start + length, :] = word_decoder_result[index, 0:0 + length, :]\n if (probs_res[start:start + length, :].max(1)[1][:-1] == text_input[index][1:length]).all():\n correct_list.append(True)\n else:\n correct_list.append(False)\n start = start + length\n\n # return probs_res, word_attention_map, correct_list\n return probs_res, word_attention_map # there is a bug\n else:\n return word_decoder_result"
},
{
"identifier": "TPSSpatialTransformer",
"path": "text_super_resolution/model/tps_spatial_transformer.py",
"snippet": "class TPSSpatialTransformer(nn.Module):\n\n def __init__(self, output_image_size=None, num_control_points=None, margins=None):\n super(TPSSpatialTransformer, self).__init__()\n self.output_image_size = output_image_size\n self.num_control_points = num_control_points\n self.margins = margins\n\n self.target_height, self.target_width = output_image_size\n target_control_points = build_output_control_points(num_control_points, margins)\n N = num_control_points\n # N = N - 4\n\n # create padded kernel matrix\n forward_kernel = torch.zeros(N + 3, N + 3)\n target_control_partial_repr = compute_partial_repr(target_control_points, target_control_points)\n forward_kernel[:N, :N].copy_(target_control_partial_repr)\n forward_kernel[:N, -3].fill_(1)\n forward_kernel[-3, :N].fill_(1)\n forward_kernel[:N, -2:].copy_(target_control_points)\n forward_kernel[-2:, :N].copy_(target_control_points.transpose(0, 1))\n # compute inverse matrix\n inverse_kernel = torch.inverse(forward_kernel)\n\n # create target cordinate matrix\n HW = self.target_height * self.target_width\n target_coordinate = list(itertools.product(range(self.target_height), range(self.target_width)))\n target_coordinate = torch.Tensor(target_coordinate) # HW x 2\n Y, X = target_coordinate.split(1, dim = 1)\n Y = Y / (self.target_height - 1)\n X = X / (self.target_width - 1)\n target_coordinate = torch.cat([X, Y], dim = 1) # convert from (y, x) to (x, y)\n target_coordinate_partial_repr = compute_partial_repr(target_coordinate, target_control_points)\n target_coordinate_repr = torch.cat([\n target_coordinate_partial_repr, torch.ones(HW, 1), target_coordinate\n ], dim = 1)\n\n # register precomputed matrices\n self.register_buffer('inverse_kernel', inverse_kernel)\n self.register_buffer('padding_matrix', torch.zeros(3, 2))\n self.register_buffer('target_coordinate_repr', target_coordinate_repr)\n self.register_buffer('target_control_points', target_control_points)\n\n def forward(self, input, source_control_points):\n assert source_control_points.ndimension() == 3\n assert source_control_points.size(1) == self.num_control_points\n assert source_control_points.size(2) == 2\n batch_size = source_control_points.size(0)\n\n Y = torch.cat([source_control_points, self.padding_matrix.expand(batch_size, 3, 2)], 1)\n mapping_matrix = torch.matmul(self.inverse_kernel, Y)\n source_coordinate = torch.matmul(self.target_coordinate_repr, mapping_matrix)\n\n grid = source_coordinate.view(-1, self.target_height, self.target_width, 2)\n grid = torch.clamp(grid, 0, 1) # the source_control_points may be out of [0, 1].\n # the input to grid_sample is normalized [-1, 1], but what we get is [0, 1]\n grid = 2.0 * grid - 1.0\n output_maps = grid_sample(input, grid, canvas=None)\n return output_maps, source_coordinate"
},
{
"identifier": "STNHead",
"path": "text_super_resolution/model/stn_head.py",
"snippet": "class STNHead(nn.Module):\n def __init__(self, in_planes, num_ctrlpoints, activation='none', input_size=(16, 64)):\n super(STNHead, self).__init__()\n\n self.in_planes = in_planes\n self.num_ctrlpoints = num_ctrlpoints\n self.activation = activation\n self.stn_convnet = nn.Sequential(\n # conv3x3_block(in_planes, 32), # 32*128\n # nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(in_planes, 32), # 16*64\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(32, 64), # 8*32\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(64, 128), # 4*16\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(128, 256), # 2*8\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(256, 256), # 1*4,\n nn.MaxPool2d(kernel_size=(1,2), stride=(1,2)),\n conv3x3_block(256, 256)) # 1*2\n\n flatten_width = int(input_size[1] / 32)\n # print(\"flw:\", input_size[1] / 32)\n self.stn_fc1 = nn.Sequential(\n nn.Linear(512, 512), #flatten_width*256\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True))\n self.stn_fc2 = nn.Linear(512, num_ctrlpoints*2)\n\n self.init_weights(self.stn_convnet)\n self.init_weights(self.stn_fc1)\n self.init_stn(self.stn_fc2)\n\n def init_weights(self, module):\n for m in module.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.001)\n m.bias.data.zero_()\n\n def init_stn(self, stn_fc2):\n margin = 0.01\n sampling_num_per_side = int(self.num_ctrlpoints / 2)\n ctrl_pts_x = np.linspace(margin, 1.-margin, sampling_num_per_side)\n ctrl_pts_y_top = np.ones(sampling_num_per_side) * margin\n ctrl_pts_y_bottom = np.ones(sampling_num_per_side) * (1-margin)\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)\n ctrl_points = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0).astype(np.float32)\n # print(ctrl_points.shape)\n if self.activation is 'none':\n pass\n elif self.activation == 'sigmoid':\n ctrl_points = -np.log(1. / ctrl_points - 1.)\n elif self.activation == 'relu':\n ctrl_points = F.relu(torch.Tensor(ctrl_points))\n stn_fc2.weight.data.zero_()\n stn_fc2.bias.data = torch.Tensor(ctrl_points).view(-1)\n\n def forward(self, x):\n x = self.stn_convnet(x)\n batch_size, _, h, w = x.size()\n x = x.view(batch_size, -1)\n\n # print(\"x:\", x.shape)\n\n img_feat = self.stn_fc1(x)\n x = self.stn_fc2(0.1 * img_feat)\n if self.activation == 'sigmoid':\n x = torch.sigmoid(x)\n if self.activation == 'relu':\n x = F.relu(x)\n x = x.view(-1, self.num_ctrlpoints, 2)\n return img_feat, x"
}
] | import torch
import sys
import time
import os
import math
import pickle
import copy
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import cv2
import random
import math
import numpy as np
import editdistance
import time
import lpips
from time import gmtime, strftime
from datetime import datetime
from tqdm import tqdm
from utils import util, ssim_psnr
from IPython import embed
from torchvision import transforms
from torch.autograd import Variable
from thop import profile
from PIL import Image
from text_super_resolution.interfaces import base
from utils.meters import AverageMeter
from utils.metrics import get_string_aster, get_string_crnn, Accuracy
from utils.util import str_filt
from utils import utils_moran
from text_super_resolution.model import gumbel_softmax
from text_super_resolution.loss.semantic_loss import SemanticLoss
from copy import deepcopy
from tensorboardX import SummaryWriter
from text_super_resolution.loss.stroke_focus_loss import StrokeFocusLoss
from text_super_resolution.loss.transformer_english_decomposition import Transformer
from text_super_resolution.model.tps_spatial_transformer import TPSSpatialTransformer
from text_super_resolution.model.stn_head import STNHead
from ptflops import get_model_complexity_info | 9,225 |
Args:
image (torch.Tensor): RGB Image to be converted to YUV with shape :math:`(*, 3, H, W)`.
Returns:
torch.Tensor: YUV version of the image with shape :math:`(*, 3, H, W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> output = rgb_to_yuv(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError("Input size must have a shape of (*, 3, H, W). Got {}"
.format(image.shape))
r: torch.Tensor = image[..., 0, :, :]
g: torch.Tensor = image[..., 1, :, :]
b: torch.Tensor = image[..., 2, :, :]
y: torch.Tensor = 0.299 * r + 0.587 * g + 0.114 * b
u: torch.Tensor = -0.147 * r - 0.289 * g + 0.436 * b
v: torch.Tensor = 0.615 * r - 0.515 * g - 0.100 * b
out: torch.Tensor = torch.stack([y, u, v], -3)
return out
def model_inference(self, images_lr, images_hr, model_list, aster, i, stroke_rec, label_strs, data_name):
ret_dict = {}
ret_dict["label_vecs"] = None
ret_dict["duration"] = 0
if self.args.arch == "tsrn":
before = time.time()
images_sr = model_list[0](images_lr)
after = time.time()
ret_dict["duration"] += (after - before)
if vis_feature:
# [N, C, H, W] -> [N, C, W]
block_feature = model_list[-1].block["7"].mean(2)
elif self.args.arch in ["tsrn_tl", "tsrn_tl_wmask"]:
###############################################
aster_dict_hr = self.parse_crnn_data(images_lr[:, :3, :, :])
label_vecs = aster[1](aster_dict_hr)
label_vecs = torch.nn.functional.softmax(label_vecs, -1)
ret_dict["label_vecs"] = label_vecs
'''
##############
# val: [T, B] <- [T, B, C]
label_val, label_indices = torch.max(label_vecs, -1)
label_indices = label_indices.view(label_indices.shape[0], label_indices.shape[1], 1)
new_label_vecs = torch.zeros(label_vecs.shape).float().to(label_vecs.device)
new_label_vecs.scatter_(2, label_indices, 1)
# label_vecs[label_vecs > 0.5] = 1.
noise = (torch.rand(label_vecs.shape) - 0.5) * 0.2
label_vecs = new_label_vecs.to(label_vecs.device) + noise.to(label_vecs.device)
##############
'''
# [T, B, C] -> [B, T, C] -> [B, 1, T, C]
label_vecs = label_vecs.permute(1, 0, 2).unsqueeze(1).permute(0, 3, 1, 2)
###############################################
images_sr = model_list[0](images_lr, label_vecs)
elif self.args.arch in ABLATION_SET:
cascade_images = images_lr
images_sr = []
if vis:
aster_dict_hr = self.parse_crnn_data(
images_lr[:, :3, :, :] if not self.args.y_domain else images_lrraw[:, :3, :, :])
# print("aster_dict_hr:", aster_dict_hr.shape)
label_vecs_lr = aster[0]['model'](aster_dict_hr)
label_vecs_lr = torch.nn.functional.softmax(label_vecs_lr, -1)
aster_dict_hr = self.parse_crnn_data(
images_hr[:, :3, :, :] if not self.args.y_domain else images_hrraw[:, :3, :, :])
label_vecs_hr = aster[0]['model'](aster_dict_hr)
label_vecs_hr = torch.nn.functional.softmax(label_vecs_hr, -1)
label_vecs_final_hr = label_vecs_hr.permute(1, 0, 2).unsqueeze(1).permute(0, 3, 1, 2)
ret_dict["label_vecs_hr"] = label_vecs_hr
for m_iter in range(self.args.stu_iter):
if self.args.tpg_share:
tpg_pick = 0
else:
tpg_pick = m_iter
stu_model = aster[1][tpg_pick]
aster_dict_lr = self.parse_crnn_data(
cascade_images[:, :3, :, :] if not self.args.y_domain else images_lrraw[:, :3, :,
:]) # cascade_images
before = time.time()
label_vecs_logits = stu_model(aster_dict_lr)
label_vecs = torch.nn.functional.softmax(label_vecs_logits, -1)
label_vecs_final = label_vecs.permute(1, 0, 2).unsqueeze(1).permute(0, 3, 1, 2)
ret_dict["label_vecs"] = label_vecs
# if data_name=='medium':
# print('images_lr.shape: {}\nimages_hr.shape: {}\nlabel_strs.shape: {}\n'.format(images_lr,images_hr,label_strs))
sr = nn.functional.interpolate(images_lr,(self.config.TRAIN.height,self.config.TRAIN.width),mode='bicubic')
|
lpips_vgg = lpips.LPIPS(net="vgg")
vis = False
vis_feature = False
# torch.backends.cudnn.enabled = False
TEST_MODEL = "MORAN"
sem_loss = SemanticLoss()
ctc_loss = torch.nn.CTCLoss(blank=0, reduction='none')
ssim = ssim_psnr.SSIM()
distorted_ssim = ssim_psnr.Distorted_SSIM()
tri_ssim = ssim_psnr.TRI_SSIM()
ABLATION_SET = ["tsrn_tl_cascade_sft", "tsrn_tl_cascade", "srcnn_tl",
"srresnet_tl", "rdn_tl", "vdsr_tl", "tranSR_v4",
"esrgan_tl", "scgan_tl", "tbsrn_tl", "tatt", "pcan_tl"]
_DEBUG = False
class TextSR(base.TextBase):
def SR_confence(self, image, angle):
pass
def rotate_img(self, image, angle):
# convert to cv2 image
if not angle == 0.0:
(h, w) = image.shape[:2]
scale = 1.0
# set the rotation center
center = (w / 2, h / 2)
# anti-clockwise angle in the function
M = cv2.getRotationMatrix2D(center, angle, scale)
image = cv2.warpAffine(image, M, (w, h))
# back to PIL image
return image
def loss_stablizing(self, loss_set, keep_proportion=0.7):
# acsending
sorted_val, sorted_ind = torch.sort(loss_set)
batch_size = loss_set.shape[0]
# print("batch_size:", loss_set, batch_size)
loss_set[sorted_ind[int(keep_proportion * batch_size)]:] = 0.0
return loss_set
def cal_all_models(self, model_list, recognizer_list):
macs = 0.
params = 0.
for model in model_list:
mac, param = get_model_complexity_info(model, (4, 16, 64), as_strings=True,
print_per_layer_stat=False, verbose=True)
print('model {:<30} {:<8}'.format('Computational complexity: ', mac))
print('model {:<30} {:<8}'.format('Number of parameters: ', param))
# macs += mac
# params += param
for recognizer in recognizer_list:
mac, param = get_model_complexity_info(recognizer, (1, 32, 100), as_strings=True,
print_per_layer_stat=False, verbose=True)
print('recognizer {:<30} {:<8}'.format('Computational complexity: ', mac))
print('recognizer {:<30} {:<8}'.format('Number of parameters: ', param))
# macs += mac
# params += param
print('{:<30} {:<8}'.format('Total computational complexity: ', macs))
print('{:<30} {:<8}'.format('Total number of parameters: ', params))
def torch_rotate_img(self, torch_image_batches, arc_batches, rand_offs, off_range=0.2):
# ratios: H / W
device = torch_image_batches.device
N, C, H, W = torch_image_batches.shape
ratios = H / float(W)
# rand_offs = random.random() * (1 - ratios)
ratios_mul = ratios + (rand_offs.unsqueeze(1) * off_range * 2) - off_range
a11, a12, a21, a22 = torch.cos(arc_batches), \
torch.sin(arc_batches), \
-torch.sin(arc_batches), \
torch.cos(arc_batches)
# print("rand_offs:", rand_offs.shape, a12.shape)
x_shift = torch.zeros_like(arc_batches)
y_shift = torch.zeros_like(arc_batches)
# print("device:", device)
affine_matrix = torch.cat([a11.unsqueeze(1), a12.unsqueeze(1) * ratios_mul, x_shift.unsqueeze(1),
a21.unsqueeze(1) / ratios_mul, a22.unsqueeze(1), y_shift.unsqueeze(1)], dim=1)
affine_matrix = affine_matrix.reshape(N, 2, 3).to(device)
affine_grid = F.affine_grid(affine_matrix, torch_image_batches.shape)
distorted_batches = F.grid_sample(torch_image_batches, affine_grid)
return distorted_batches
def yuv_to_rgb(self, image: torch.Tensor) -> torch.Tensor:
r"""Convert an YUV image to RGB.
The image data is assumed to be in the range of (0, 1).
Args:
image (torch.Tensor): YUV Image to be converted to RGB with shape :math:`(*, 3, H, W)`.
Returns:
torch.Tensor: RGB version of the image with shape :math:`(*, 3, H, W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> output = yuv_to_rgb(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError("Input size must have a shape of (*, 3, H, W). Got {}"
.format(image.shape))
y: torch.Tensor = image[..., 0, :, :]
u: torch.Tensor = image[..., 1, :, :]
v: torch.Tensor = image[..., 2, :, :]
r: torch.Tensor = y + 1.14 * v # coefficient for g is 0
g: torch.Tensor = y + -0.396 * u - 0.581 * v
b: torch.Tensor = y + 2.029 * u # coefficient for b is 0
out: torch.Tensor = torch.stack([r, g, b], -3)
return out
def yuv_to_rgb_cv(self, image: torch.Tensor) -> torch.Tensor:
im_device = image.device
image_np = image.data.cpu().numpy()
image_np = cv2.cvtColor(image_np, cv2.COLOR_YUV2RGB)
return torch.tensor(image_np).to(im_device)
def rgb_to_yuv(self, image: torch.Tensor) -> torch.Tensor:
r"""Convert an RGB image to YUV.
The image data is assumed to be in the range of (0, 1).
Args:
image (torch.Tensor): RGB Image to be converted to YUV with shape :math:`(*, 3, H, W)`.
Returns:
torch.Tensor: YUV version of the image with shape :math:`(*, 3, H, W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> output = rgb_to_yuv(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError("Input size must have a shape of (*, 3, H, W). Got {}"
.format(image.shape))
r: torch.Tensor = image[..., 0, :, :]
g: torch.Tensor = image[..., 1, :, :]
b: torch.Tensor = image[..., 2, :, :]
y: torch.Tensor = 0.299 * r + 0.587 * g + 0.114 * b
u: torch.Tensor = -0.147 * r - 0.289 * g + 0.436 * b
v: torch.Tensor = 0.615 * r - 0.515 * g - 0.100 * b
out: torch.Tensor = torch.stack([y, u, v], -3)
return out
def model_inference(self, images_lr, images_hr, model_list, aster, i, stroke_rec, label_strs, data_name):
ret_dict = {}
ret_dict["label_vecs"] = None
ret_dict["duration"] = 0
if self.args.arch == "tsrn":
before = time.time()
images_sr = model_list[0](images_lr)
after = time.time()
ret_dict["duration"] += (after - before)
if vis_feature:
# [N, C, H, W] -> [N, C, W]
block_feature = model_list[-1].block["7"].mean(2)
elif self.args.arch in ["tsrn_tl", "tsrn_tl_wmask"]:
###############################################
aster_dict_hr = self.parse_crnn_data(images_lr[:, :3, :, :])
label_vecs = aster[1](aster_dict_hr)
label_vecs = torch.nn.functional.softmax(label_vecs, -1)
ret_dict["label_vecs"] = label_vecs
'''
##############
# val: [T, B] <- [T, B, C]
label_val, label_indices = torch.max(label_vecs, -1)
label_indices = label_indices.view(label_indices.shape[0], label_indices.shape[1], 1)
new_label_vecs = torch.zeros(label_vecs.shape).float().to(label_vecs.device)
new_label_vecs.scatter_(2, label_indices, 1)
# label_vecs[label_vecs > 0.5] = 1.
noise = (torch.rand(label_vecs.shape) - 0.5) * 0.2
label_vecs = new_label_vecs.to(label_vecs.device) + noise.to(label_vecs.device)
##############
'''
# [T, B, C] -> [B, T, C] -> [B, 1, T, C]
label_vecs = label_vecs.permute(1, 0, 2).unsqueeze(1).permute(0, 3, 1, 2)
###############################################
images_sr = model_list[0](images_lr, label_vecs)
elif self.args.arch in ABLATION_SET:
cascade_images = images_lr
images_sr = []
if vis:
aster_dict_hr = self.parse_crnn_data(
images_lr[:, :3, :, :] if not self.args.y_domain else images_lrraw[:, :3, :, :])
# print("aster_dict_hr:", aster_dict_hr.shape)
label_vecs_lr = aster[0]['model'](aster_dict_hr)
label_vecs_lr = torch.nn.functional.softmax(label_vecs_lr, -1)
aster_dict_hr = self.parse_crnn_data(
images_hr[:, :3, :, :] if not self.args.y_domain else images_hrraw[:, :3, :, :])
label_vecs_hr = aster[0]['model'](aster_dict_hr)
label_vecs_hr = torch.nn.functional.softmax(label_vecs_hr, -1)
label_vecs_final_hr = label_vecs_hr.permute(1, 0, 2).unsqueeze(1).permute(0, 3, 1, 2)
ret_dict["label_vecs_hr"] = label_vecs_hr
for m_iter in range(self.args.stu_iter):
if self.args.tpg_share:
tpg_pick = 0
else:
tpg_pick = m_iter
stu_model = aster[1][tpg_pick]
aster_dict_lr = self.parse_crnn_data(
cascade_images[:, :3, :, :] if not self.args.y_domain else images_lrraw[:, :3, :,
:]) # cascade_images
before = time.time()
label_vecs_logits = stu_model(aster_dict_lr)
label_vecs = torch.nn.functional.softmax(label_vecs_logits, -1)
label_vecs_final = label_vecs.permute(1, 0, 2).unsqueeze(1).permute(0, 3, 1, 2)
ret_dict["label_vecs"] = label_vecs
# if data_name=='medium':
# print('images_lr.shape: {}\nimages_hr.shape: {}\nlabel_strs.shape: {}\n'.format(images_lr,images_hr,label_strs))
sr = nn.functional.interpolate(images_lr,(self.config.TRAIN.height,self.config.TRAIN.width),mode='bicubic')
| pred_label = get_string_crnn(label_vecs, use_chinese=False) | 5 | 2023-11-20 06:34:21+00:00 | 12k |
mjavadpur/mj_ONNX_SadTalker | src/facerender/animate_onnx.py | [
{
"identifier": "HEEstimator",
"path": "src/facerender/modules/keypoint_detector.py",
"snippet": "class HEEstimator(nn.Module):\n \"\"\"\n Estimating head pose and expression.\n \"\"\"\n\n def __init__(self, block_expansion, feature_channel, num_kp, image_channel, max_features, num_bins=66, estimate_jacobian=True):\n super(HEEstimator, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=image_channel, out_channels=block_expansion, kernel_size=7, padding=3, stride=2)\n self.norm1 = BatchNorm2d(block_expansion, affine=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.conv2 = nn.Conv2d(in_channels=block_expansion, out_channels=256, kernel_size=1)\n self.norm2 = BatchNorm2d(256, affine=True)\n\n self.block1 = nn.Sequential()\n for i in range(3):\n self.block1.add_module('b1_'+ str(i), ResBottleneck(in_features=256, stride=1))\n\n self.conv3 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=1)\n self.norm3 = BatchNorm2d(512, affine=True)\n self.block2 = ResBottleneck(in_features=512, stride=2)\n\n self.block3 = nn.Sequential()\n for i in range(3):\n self.block3.add_module('b3_'+ str(i), ResBottleneck(in_features=512, stride=1))\n\n self.conv4 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=1)\n self.norm4 = BatchNorm2d(1024, affine=True)\n self.block4 = ResBottleneck(in_features=1024, stride=2)\n\n self.block5 = nn.Sequential()\n for i in range(5):\n self.block5.add_module('b5_'+ str(i), ResBottleneck(in_features=1024, stride=1))\n\n self.conv5 = nn.Conv2d(in_channels=1024, out_channels=2048, kernel_size=1)\n self.norm5 = BatchNorm2d(2048, affine=True)\n self.block6 = ResBottleneck(in_features=2048, stride=2)\n\n self.block7 = nn.Sequential()\n for i in range(2):\n self.block7.add_module('b7_'+ str(i), ResBottleneck(in_features=2048, stride=1))\n\n self.fc_roll = nn.Linear(2048, num_bins)\n self.fc_pitch = nn.Linear(2048, num_bins)\n self.fc_yaw = nn.Linear(2048, num_bins)\n\n self.fc_t = nn.Linear(2048, 3)\n\n self.fc_exp = nn.Linear(2048, 3*num_kp)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.norm1(out)\n out = F.relu(out)\n out = self.maxpool(out)\n\n out = self.conv2(out)\n out = self.norm2(out)\n out = F.relu(out)\n\n out = self.block1(out)\n\n out = self.conv3(out)\n out = self.norm3(out)\n out = F.relu(out)\n out = self.block2(out)\n\n out = self.block3(out)\n\n out = self.conv4(out)\n out = self.norm4(out)\n out = F.relu(out)\n out = self.block4(out)\n\n out = self.block5(out)\n\n out = self.conv5(out)\n out = self.norm5(out)\n out = F.relu(out)\n out = self.block6(out)\n\n out = self.block7(out)\n\n out = F.adaptive_avg_pool2d(out, 1)\n out = out.view(out.shape[0], -1)\n\n yaw = self.fc_roll(out)\n pitch = self.fc_pitch(out)\n roll = self.fc_yaw(out)\n t = self.fc_t(out)\n exp = self.fc_exp(out)\n\n return {'yaw': yaw, 'pitch': pitch, 'roll': roll, 't': t, 'exp': exp}"
},
{
"identifier": "KPDetector",
"path": "src/facerender/modules/keypoint_detector.py",
"snippet": "class KPDetector(nn.Module):\n \"\"\"\n Detecting canonical keypoints. Return keypoint position and jacobian near each keypoint.\n \"\"\"\n\n def __init__(self, block_expansion, feature_channel, num_kp, image_channel, max_features, reshape_channel, reshape_depth,\n num_blocks, temperature, estimate_jacobian=False, scale_factor=1, single_jacobian_map=False):\n super(KPDetector, self).__init__()\n\n self.predictor = KPHourglass(block_expansion, in_features=image_channel,\n max_features=max_features, reshape_features=reshape_channel, reshape_depth=reshape_depth, num_blocks=num_blocks)\n\n # self.kp = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=7, padding=3)\n self.kp = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=3, padding=1)\n\n if estimate_jacobian:\n self.num_jacobian_maps = 1 if single_jacobian_map else num_kp\n # self.jacobian = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=9 * self.num_jacobian_maps, kernel_size=7, padding=3)\n self.jacobian = nn.Conv3d(in_channels=self.predictor.out_filters, out_channels=9 * self.num_jacobian_maps, kernel_size=3, padding=1)\n '''\n initial as:\n [[1 0 0]\n [0 1 0]\n [0 0 1]]\n '''\n self.jacobian.weight.data.zero_()\n self.jacobian.bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0, 0, 0, 1] * self.num_jacobian_maps, dtype=torch.float))\n else:\n self.jacobian = None\n\n self.temperature = temperature\n self.scale_factor = scale_factor\n if self.scale_factor != 1:\n self.down = AntiAliasInterpolation2d(image_channel, self.scale_factor)\n\n def gaussian2kp(self, heatmap):\n \"\"\"\n Extract the mean from a heatmap\n \"\"\"\n shape = heatmap.shape\n heatmap = heatmap.unsqueeze(-1)\n grid = make_coordinate_grid(shape[2:], heatmap.type()).unsqueeze_(0).unsqueeze_(0)\n value = (heatmap * grid).sum(dim=(2, 3, 4))\n kp = {'value': value}\n\n return kp\n\n def forward(self, x):\n if self.scale_factor != 1:\n x = self.down(x)\n\n feature_map = self.predictor(x)\n prediction = self.kp(feature_map)\n\n final_shape = prediction.shape\n heatmap = prediction.view(final_shape[0], final_shape[1], -1)\n heatmap = F.softmax(heatmap / self.temperature, dim=2)\n heatmap = heatmap.view(*final_shape)\n\n out = self.gaussian2kp(heatmap)\n\n if self.jacobian is not None:\n jacobian_map = self.jacobian(feature_map)\n jacobian_map = jacobian_map.reshape(final_shape[0], self.num_jacobian_maps, 9, final_shape[2],\n final_shape[3], final_shape[4])\n heatmap = heatmap.unsqueeze(2)\n\n jacobian = heatmap * jacobian_map\n jacobian = jacobian.view(final_shape[0], final_shape[1], 9, -1)\n jacobian = jacobian.sum(dim=-1)\n jacobian = jacobian.view(jacobian.shape[0], jacobian.shape[1], 3, 3)\n out['jacobian'] = jacobian\n\n return out"
},
{
"identifier": "MappingNet",
"path": "src/facerender/modules/mapping.py",
"snippet": "class MappingNet(nn.Module):\n def __init__(self, coeff_nc, descriptor_nc, layer, num_kp, num_bins):\n super( MappingNet, self).__init__()\n\n self.layer = layer\n nonlinearity = nn.LeakyReLU(0.1)\n\n self.first = nn.Sequential(\n torch.nn.Conv1d(coeff_nc, descriptor_nc, kernel_size=7, padding=0, bias=True))\n\n for i in range(layer):\n net = nn.Sequential(nonlinearity,\n torch.nn.Conv1d(descriptor_nc, descriptor_nc, kernel_size=3, padding=0, dilation=3))\n setattr(self, 'encoder' + str(i), net) \n\n self.pooling = nn.AdaptiveAvgPool1d(1)\n self.output_nc = descriptor_nc\n\n self.fc_roll = nn.Linear(descriptor_nc, num_bins)\n self.fc_pitch = nn.Linear(descriptor_nc, num_bins)\n self.fc_yaw = nn.Linear(descriptor_nc, num_bins)\n self.fc_t = nn.Linear(descriptor_nc, 3)\n self.fc_exp = nn.Linear(descriptor_nc, 3*num_kp)\n\n def forward(self, input_3dmm):\n out = self.first(input_3dmm)\n for i in range(self.layer):\n model = getattr(self, 'encoder' + str(i))\n out = model(out) + out[:,:,3:-3]\n out = self.pooling(out)\n out = out.view(out.shape[0], -1)\n #print('out:', out.shape)\n\n yaw = self.fc_yaw(out)\n pitch = self.fc_pitch(out)\n roll = self.fc_roll(out)\n t = self.fc_t(out)\n exp = self.fc_exp(out)\n\n return {'yaw': yaw, 'pitch': pitch, 'roll': roll, 't': t, 'exp': exp} "
},
{
"identifier": "OcclusionAwareGenerator",
"path": "src/facerender/modules/generator.py",
"snippet": "class OcclusionAwareGenerator(nn.Module):\n \"\"\"\n Generator follows NVIDIA architecture.\n \"\"\"\n\n def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth,\n num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):\n super(OcclusionAwareGenerator, self).__init__()\n\n if dense_motion_params is not None:\n self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel,\n estimate_occlusion_map=estimate_occlusion_map,\n **dense_motion_params)\n else:\n self.dense_motion_network = None\n\n self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(7, 7), padding=(3, 3))\n\n down_blocks = []\n for i in range(num_down_blocks):\n in_features = min(max_features, block_expansion * (2 ** i))\n out_features = min(max_features, block_expansion * (2 ** (i + 1)))\n down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))\n self.down_blocks = nn.ModuleList(down_blocks)\n\n self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)\n\n self.reshape_channel = reshape_channel\n self.reshape_depth = reshape_depth\n\n self.resblocks_3d = torch.nn.Sequential()\n for i in range(num_resblocks):\n self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))\n\n out_features = block_expansion * (2 ** (num_down_blocks))\n self.third = SameBlock2d(max_features, out_features, kernel_size=(3, 3), padding=(1, 1), lrelu=True)\n self.fourth = nn.Conv2d(in_channels=out_features, out_channels=out_features, kernel_size=1, stride=1)\n\n self.resblocks_2d = torch.nn.Sequential()\n for i in range(num_resblocks):\n self.resblocks_2d.add_module('2dr' + str(i), ResBlock2d(out_features, kernel_size=3, padding=1))\n\n up_blocks = []\n for i in range(num_down_blocks):\n in_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i)))\n out_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i - 1)))\n up_blocks.append(UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))\n self.up_blocks = nn.ModuleList(up_blocks)\n\n self.final = nn.Conv2d(block_expansion, image_channel, kernel_size=(7, 7), padding=(3, 3))\n self.estimate_occlusion_map = estimate_occlusion_map\n self.image_channel = image_channel\n\n def deform_input(self, inp, deformation):\n _, d_old, h_old, w_old, _ = deformation.shape\n _, _, d, h, w = inp.shape\n if d_old != d or h_old != h or w_old != w:\n deformation = deformation.permute(0, 4, 1, 2, 3)\n deformation = F.interpolate(deformation, size=(d, h, w), mode='trilinear')\n deformation = deformation.permute(0, 2, 3, 4, 1)\n return F.grid_sample(inp, deformation)\n\n def forward(self, source_image, kp_driving, kp_source):\n # Encoding (downsampling) part\n out = self.first(source_image)\n for i in range(len(self.down_blocks)):\n out = self.down_blocks[i](out)\n out = self.second(out)\n bs, c, h, w = out.shape\n # print(out.shape)\n feature_3d = out.view(bs, self.reshape_channel, self.reshape_depth, h ,w) \n feature_3d = self.resblocks_3d(feature_3d)\n\n # Transforming feature representation according to deformation and occlusion\n output_dict = {}\n if self.dense_motion_network is not None:\n dense_motion = self.dense_motion_network(feature=feature_3d, kp_driving=kp_driving,\n kp_source=kp_source)\n output_dict['mask'] = dense_motion['mask']\n\n if 'occlusion_map' in dense_motion:\n occlusion_map = dense_motion['occlusion_map']\n output_dict['occlusion_map'] = occlusion_map\n else:\n occlusion_map = None\n deformation = dense_motion['deformation']\n out = self.deform_input(feature_3d, deformation)\n\n bs, c, d, h, w = out.shape\n out = out.view(bs, c*d, h, w)\n out = self.third(out)\n out = self.fourth(out)\n\n if occlusion_map is not None:\n if out.shape[2] != occlusion_map.shape[2] or out.shape[3] != occlusion_map.shape[3]:\n occlusion_map = F.interpolate(occlusion_map, size=out.shape[2:], mode='bilinear')\n out = out * occlusion_map\n\n # output_dict[\"deformed\"] = self.deform_input(source_image, deformation) # 3d deformation cannot deform 2d image\n\n # Decoding part\n out = self.resblocks_2d(out)\n for i in range(len(self.up_blocks)):\n out = self.up_blocks[i](out)\n out = self.final(out)\n out = F.sigmoid(out)\n\n output_dict[\"prediction\"] = out\n\n return output_dict"
},
{
"identifier": "OcclusionAwareSPADEGenerator",
"path": "src/facerender/modules/generator.py",
"snippet": "class OcclusionAwareSPADEGenerator(nn.Module):\n\n def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth,\n num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):\n super(OcclusionAwareSPADEGenerator, self).__init__()\n\n if dense_motion_params is not None:\n self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel,\n estimate_occlusion_map=estimate_occlusion_map,\n **dense_motion_params)\n else:\n self.dense_motion_network = None\n\n self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(3, 3), padding=(1, 1))\n\n down_blocks = []\n for i in range(num_down_blocks):\n in_features = min(max_features, block_expansion * (2 ** i))\n out_features = min(max_features, block_expansion * (2 ** (i + 1)))\n down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))\n self.down_blocks = nn.ModuleList(down_blocks)\n\n self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)\n\n self.reshape_channel = reshape_channel\n self.reshape_depth = reshape_depth\n\n self.resblocks_3d = torch.nn.Sequential()\n for i in range(num_resblocks):\n self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))\n\n out_features = block_expansion * (2 ** (num_down_blocks))\n self.third = SameBlock2d(max_features, out_features, kernel_size=(3, 3), padding=(1, 1), lrelu=True)\n self.fourth = nn.Conv2d(in_channels=out_features, out_channels=out_features, kernel_size=1, stride=1)\n\n self.estimate_occlusion_map = estimate_occlusion_map\n self.image_channel = image_channel\n\n self.decoder = SPADEDecoder()\n\n def deform_input(self, inp, deformation):\n _, d_old, h_old, w_old, _ = deformation.shape\n _, _, d, h, w = inp.shape\n if d_old != d or h_old != h or w_old != w:\n deformation = deformation.permute(0, 4, 1, 2, 3)\n deformation = F.interpolate(deformation, size=(d, h, w), mode='trilinear')\n deformation = deformation.permute(0, 2, 3, 4, 1)\n return F.grid_sample(inp, deformation)\n\n def forward(self, source_image, kp_driving, kp_source):\n # Encoding (downsampling) part\n out = self.first(source_image)\n for i in range(len(self.down_blocks)):\n out = self.down_blocks[i](out)\n out = self.second(out)\n bs, c, h, w = out.shape\n # print(out.shape)\n feature_3d = out.view(bs, self.reshape_channel, self.reshape_depth, h ,w) \n feature_3d = self.resblocks_3d(feature_3d)\n\n # Transforming feature representation according to deformation and occlusion\n output_dict = {}\n if self.dense_motion_network is not None:\n dense_motion = self.dense_motion_network(feature=feature_3d, kp_driving=kp_driving,\n kp_source=kp_source)\n output_dict['mask'] = dense_motion['mask']\n\n # import pdb; pdb.set_trace()\n\n if 'occlusion_map' in dense_motion:\n occlusion_map = dense_motion['occlusion_map']\n output_dict['occlusion_map'] = occlusion_map\n else:\n occlusion_map = None\n deformation = dense_motion['deformation']\n out = self.deform_input(feature_3d, deformation)\n\n bs, c, d, h, w = out.shape\n out = out.view(bs, c*d, h, w)\n out = self.third(out)\n out = self.fourth(out)\n\n # occlusion_map = torch.where(occlusion_map < 0.95, 0, occlusion_map)\n \n if occlusion_map is not None:\n if out.shape[2] != occlusion_map.shape[2] or out.shape[3] != occlusion_map.shape[3]:\n occlusion_map = F.interpolate(occlusion_map, size=out.shape[2:], mode='bilinear')\n out = out * occlusion_map\n\n # Decoding part\n out = self.decoder(out)\n\n output_dict[\"prediction\"] = out\n \n return output_dict"
},
{
"identifier": "make_animation",
"path": "src/facerender/modules/make_animation.py",
"snippet": "def make_animation(source_image, source_semantics, target_semantics,\n generator, kp_detector, he_estimator, mapping, \n yaw_c_seq=None, pitch_c_seq=None, roll_c_seq=None,\n use_exp=True, use_half=False):\n with torch.no_grad():\n predictions = []\n\n kp_canonical = kp_detector(source_image)\n he_source = mapping(source_semantics)\n kp_source = keypoint_transformation(kp_canonical, he_source)\n \n for frame_idx in tqdm(range(target_semantics.shape[1]), 'Face Renderer:'):\n # still check the dimension\n # print(target_semantics.shape, source_semantics.shape)\n target_semantics_frame = target_semantics[:, frame_idx]\n he_driving = mapping(target_semantics_frame)\n if yaw_c_seq is not None:\n he_driving['yaw_in'] = yaw_c_seq[:, frame_idx]\n if pitch_c_seq is not None:\n he_driving['pitch_in'] = pitch_c_seq[:, frame_idx] \n if roll_c_seq is not None:\n he_driving['roll_in'] = roll_c_seq[:, frame_idx] \n \n kp_driving = keypoint_transformation(kp_canonical, he_driving)\n \n kp_norm = kp_driving\n out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)\n '''\n source_image_new = out['prediction'].squeeze(1)\n kp_canonical_new = kp_detector(source_image_new)\n he_source_new = he_estimator(source_image_new) \n kp_source_new = keypoint_transformation(kp_canonical_new, he_source_new, wo_exp=True)\n kp_driving_new = keypoint_transformation(kp_canonical_new, he_driving, wo_exp=True)\n out = generator(source_image_new, kp_source=kp_source_new, kp_driving=kp_driving_new)\n '''\n predictions.append(out['prediction'])\n predictions_ts = torch.stack(predictions, dim=1)\n return predictions_ts"
},
{
"identifier": "enhancer_generator_with_len",
"path": "src/utils/face_enhancer_deploy.py",
"snippet": "def enhancer_generator_with_len(images, method='gfpgan', bg_upsampler='realesrgan'):\n \"\"\" Provide a generator with a __len__ method so that it can passed to functions that\n call len()\"\"\"\n\n if os.path.isfile(images): # handle video to images\n images = load_video_to_cv2(images)\n\n gen = enhancer_generator_no_len(images, method=method, bg_upsampler=bg_upsampler)\n gen_with_len = GeneratorWithLen(gen, len(images))\n return gen_with_len"
},
{
"identifier": "enhancer_list",
"path": "src/utils/face_enhancer_deploy.py",
"snippet": "def enhancer_list(images, method='gfpgan', bg_upsampler='realesrgan'):\n gen = enhancer_generator_no_len(images, method=method, bg_upsampler=bg_upsampler)\n return list(gen)"
},
{
"identifier": "paste_pic",
"path": "src/utils/paste_pic.py",
"snippet": "def paste_pic(video_path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop=False):\n\n if not os.path.isfile(pic_path):\n raise ValueError('pic_path must be a valid path to video/image file')\n elif pic_path.split('.')[-1] in ['jpg', 'png', 'jpeg']:\n # loader for first frame\n full_img = cv2.imread(pic_path)\n else:\n # loader for videos\n video_stream = cv2.VideoCapture(pic_path)\n fps = video_stream.get(cv2.CAP_PROP_FPS)\n full_frames = [] \n while 1:\n still_reading, frame = video_stream.read()\n if not still_reading:\n video_stream.release()\n break \n break \n full_img = frame\n frame_h = full_img.shape[0]\n frame_w = full_img.shape[1]\n\n video_stream = cv2.VideoCapture(video_path)\n fps = video_stream.get(cv2.CAP_PROP_FPS)\n crop_frames = []\n while 1:\n still_reading, frame = video_stream.read()\n if not still_reading:\n video_stream.release()\n break\n crop_frames.append(frame)\n \n if len(crop_info) != 3:\n print(\"you didn't crop the image\")\n return\n else:\n r_w, r_h = crop_info[0]\n clx, cly, crx, cry = crop_info[1]\n lx, ly, rx, ry = crop_info[2]\n lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)\n # oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx\n # oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx\n\n if extended_crop:\n oy1, oy2, ox1, ox2 = cly, cry, clx, crx\n else:\n oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx\n\n tmp_path = str(uuid.uuid4())+'.mp4'\n out_tmp = cv2.VideoWriter(tmp_path, cv2.VideoWriter_fourcc(*'MP4V'), fps, (frame_w, frame_h))\n for crop_frame in tqdm(crop_frames, 'seamlessClone:'):\n p = cv2.resize(crop_frame.astype(np.uint8), (ox2-ox1, oy2 - oy1)) \n\n mask = 255*np.ones(p.shape, p.dtype)\n location = ((ox1+ox2) // 2, (oy1+oy2) // 2)\n gen_img = cv2.seamlessClone(p, full_img, mask, location, cv2.NORMAL_CLONE)\n out_tmp.write(gen_img)\n\n out_tmp.release()\n\n save_video_with_watermark(tmp_path, new_audio_path, full_video_path, watermark=False)\n os.remove(tmp_path)"
},
{
"identifier": "save_video_with_watermark",
"path": "src/utils/videoio.py",
"snippet": "def save_video_with_watermark(video, audio, save_path, watermark=False):\n temp_file = str(uuid.uuid4())+'.mp4'\n cmd = r'ffmpeg -y -hide_banner -loglevel error -i \"%s\" -i \"%s\" -vcodec copy \"%s\"' % (video, audio, temp_file)\n os.system(cmd)\n\n with open(temp_file, \"rb\") as file:\n video_data = base64.b64encode(file.read()).decode(\"utf-8\")\n print(f\" len of generated vidoe({save_path}):\" + str(len(video_data)))\n \n if watermark is False:\n shutil.move(temp_file, save_path)\n else:\n # watermark\n try:\n ##### check if stable-diffusion-webui\n import webui\n from modules import paths\n watarmark_path = paths.script_path+\"/extensions/SadTalker/docs/sadtalker_logo.png\"\n except:\n # get the root path of sadtalker.\n dir_path = os.path.dirname(os.path.realpath(__file__))\n watarmark_path = dir_path+\"/../../docs/sadtalker_logo.png\"\n\n cmd = r'ffmpeg -y -hide_banner -loglevel error -i \"%s\" -i \"%s\" -filter_complex \"[1]scale=100:-1[wm];[0][wm]overlay=(main_w-overlay_w)-10:10\" \"%s\"' % (temp_file, watarmark_path, save_path)\n os.system(cmd)\n os.remove(temp_file)"
}
] | import os
import cv2
import yaml
import numpy as np
import warnings
import safetensors
import safetensors.torch
import imageio
import torch
import torchvision
import webui # in webui
from skimage import img_as_ubyte
from src.facerender.modules.keypoint_detector import HEEstimator, KPDetector
from src.facerender.modules.mapping import MappingNet
from src.facerender.modules.generator import OcclusionAwareGenerator, OcclusionAwareSPADEGenerator
from src.facerender.modules.make_animation import make_animation
from pydub import AudioSegment
from src.utils.face_enhancer_deploy import enhancer_generator_with_len, enhancer_list
from src.utils.paste_pic import paste_pic
from src.utils.videoio import save_video_with_watermark | 8,630 | x_generator[k.replace('kp_extractor.', '')] = v
kp_detector.load_state_dict(x_generator)
if he_estimator is not None:
x_generator = {}
for k,v in checkpoint.items():
if 'he_estimator' in k:
x_generator[k.replace('he_estimator.', '')] = v
he_estimator.load_state_dict(x_generator)
return None
def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None,
kp_detector=None, he_estimator=None, optimizer_generator=None,
optimizer_discriminator=None, optimizer_kp_detector=None,
optimizer_he_estimator=None, device="cpu"):
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
if generator is not None:
generator.load_state_dict(checkpoint['generator'])
if kp_detector is not None:
kp_detector.load_state_dict(checkpoint['kp_detector'])
if he_estimator is not None:
he_estimator.load_state_dict(checkpoint['he_estimator'])
if discriminator is not None:
try:
discriminator.load_state_dict(checkpoint['discriminator'])
except:
print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')
if optimizer_generator is not None:
optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])
if optimizer_discriminator is not None:
try:
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
except RuntimeError as e:
print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')
if optimizer_kp_detector is not None:
optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])
if optimizer_he_estimator is not None:
optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])
return checkpoint['epoch']
def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None,
optimizer_mapping=None, optimizer_discriminator=None, device='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
if mapping is not None:
mapping.load_state_dict(checkpoint['mapping'])
if discriminator is not None:
discriminator.load_state_dict(checkpoint['discriminator'])
if optimizer_mapping is not None:
optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping'])
if optimizer_discriminator is not None:
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
return checkpoint['epoch']
def generate(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):
source_image=x['source_image'].type(torch.FloatTensor)
source_semantics=x['source_semantics'].type(torch.FloatTensor)
target_semantics=x['target_semantics_list'].type(torch.FloatTensor)
source_image=source_image.to(self.device)
source_semantics=source_semantics.to(self.device)
target_semantics=target_semantics.to(self.device)
if 'yaw_c_seq' in x:
yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)
yaw_c_seq = x['yaw_c_seq'].to(self.device)
else:
yaw_c_seq = None
if 'pitch_c_seq' in x:
pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)
pitch_c_seq = x['pitch_c_seq'].to(self.device)
else:
pitch_c_seq = None
if 'roll_c_seq' in x:
roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor)
roll_c_seq = x['roll_c_seq'].to(self.device)
else:
roll_c_seq = None
frame_num = x['frame_num']
predictions_video = make_animation(source_image, source_semantics, target_semantics,
self.generator, self.kp_extractor, self.he_estimator, self.mapping,
yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)
predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:])
predictions_video = predictions_video[:frame_num]
video = []
for idx in range(predictions_video.shape[0]):
image = predictions_video[idx]
image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)
video.append(image)
result = img_as_ubyte(video)
### the generated video is 256x256, so we keep the aspect ratio,
original_size = crop_info[0]
if original_size:
result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ]
video_name = x['video_name'] + '.mp4'
path = os.path.join(video_save_dir, 'temp_'+video_name)
imageio.mimsave(path, result, fps=float(25))
av_path = os.path.join(video_save_dir, video_name)
return_path = av_path
audio_path = x['audio_path']
audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]
new_audio_path = os.path.join(video_save_dir, audio_name+'.wav')
start_time = 0
# cog will not keep the .mp3 filename
sound = AudioSegment.from_file(audio_path)
frames = frame_num
end_time = start_time + frames*1/25*1000
word1=sound.set_frame_rate(16000)
word = word1[start_time:end_time]
word.export(new_audio_path, format="wav")
| warnings.filterwarnings('ignore')
try:
in_webui = True
except:
in_webui = False
class AnimateFromCoeff():
def __init__(self, sadtalker_path, device):
with open(sadtalker_path['facerender_yaml']) as f:
config = yaml.safe_load(f)
generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'],
**config['model_params']['common_params'])
kp_extractor = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
he_estimator = HEEstimator(**config['model_params']['he_estimator_params'],
**config['model_params']['common_params'])
mapping = MappingNet(**config['model_params']['mapping_params'])
generator.to(device)
kp_extractor.to(device)
he_estimator.to(device)
mapping.to(device)
for param in generator.parameters():
param.requires_grad = False
for param in kp_extractor.parameters():
param.requires_grad = False
for param in he_estimator.parameters():
param.requires_grad = False
for param in mapping.parameters():
param.requires_grad = False
if sadtalker_path is not None:
if 'checkpoint' in sadtalker_path: # use safe tensor
self.load_cpk_facevid2vid_safetensor(sadtalker_path['checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=None)
else:
self.load_cpk_facevid2vid(sadtalker_path['free_view_checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=he_estimator)
else:
raise AttributeError("Checkpoint should be specified for video head pose estimator.")
if sadtalker_path['mappingnet_checkpoint'] is not None:
self.load_cpk_mapping(sadtalker_path['mappingnet_checkpoint'], mapping=mapping)
else:
raise AttributeError("Checkpoint should be specified for video head pose estimator.")
self.kp_extractor = kp_extractor
self.generator = generator
self.he_estimator = he_estimator
self.mapping = mapping
self.kp_extractor.eval()
self.generator.eval()
self.he_estimator.eval()
self.mapping.eval()
self.device = device
def load_cpk_facevid2vid_safetensor(self, checkpoint_path, generator=None,
kp_detector=None, he_estimator=None,
device="cpu"):
checkpoint = safetensors.torch.load_file(checkpoint_path)
if generator is not None:
x_generator = {}
for k,v in checkpoint.items():
if 'generator' in k:
x_generator[k.replace('generator.', '')] = v
generator.load_state_dict(x_generator)
if kp_detector is not None:
x_generator = {}
for k,v in checkpoint.items():
if 'kp_extractor' in k:
x_generator[k.replace('kp_extractor.', '')] = v
kp_detector.load_state_dict(x_generator)
if he_estimator is not None:
x_generator = {}
for k,v in checkpoint.items():
if 'he_estimator' in k:
x_generator[k.replace('he_estimator.', '')] = v
he_estimator.load_state_dict(x_generator)
return None
def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None,
kp_detector=None, he_estimator=None, optimizer_generator=None,
optimizer_discriminator=None, optimizer_kp_detector=None,
optimizer_he_estimator=None, device="cpu"):
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
if generator is not None:
generator.load_state_dict(checkpoint['generator'])
if kp_detector is not None:
kp_detector.load_state_dict(checkpoint['kp_detector'])
if he_estimator is not None:
he_estimator.load_state_dict(checkpoint['he_estimator'])
if discriminator is not None:
try:
discriminator.load_state_dict(checkpoint['discriminator'])
except:
print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')
if optimizer_generator is not None:
optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])
if optimizer_discriminator is not None:
try:
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
except RuntimeError as e:
print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')
if optimizer_kp_detector is not None:
optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])
if optimizer_he_estimator is not None:
optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])
return checkpoint['epoch']
def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None,
optimizer_mapping=None, optimizer_discriminator=None, device='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
if mapping is not None:
mapping.load_state_dict(checkpoint['mapping'])
if discriminator is not None:
discriminator.load_state_dict(checkpoint['discriminator'])
if optimizer_mapping is not None:
optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping'])
if optimizer_discriminator is not None:
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
return checkpoint['epoch']
def generate(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):
source_image=x['source_image'].type(torch.FloatTensor)
source_semantics=x['source_semantics'].type(torch.FloatTensor)
target_semantics=x['target_semantics_list'].type(torch.FloatTensor)
source_image=source_image.to(self.device)
source_semantics=source_semantics.to(self.device)
target_semantics=target_semantics.to(self.device)
if 'yaw_c_seq' in x:
yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)
yaw_c_seq = x['yaw_c_seq'].to(self.device)
else:
yaw_c_seq = None
if 'pitch_c_seq' in x:
pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)
pitch_c_seq = x['pitch_c_seq'].to(self.device)
else:
pitch_c_seq = None
if 'roll_c_seq' in x:
roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor)
roll_c_seq = x['roll_c_seq'].to(self.device)
else:
roll_c_seq = None
frame_num = x['frame_num']
predictions_video = make_animation(source_image, source_semantics, target_semantics,
self.generator, self.kp_extractor, self.he_estimator, self.mapping,
yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)
predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:])
predictions_video = predictions_video[:frame_num]
video = []
for idx in range(predictions_video.shape[0]):
image = predictions_video[idx]
image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)
video.append(image)
result = img_as_ubyte(video)
### the generated video is 256x256, so we keep the aspect ratio,
original_size = crop_info[0]
if original_size:
result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ]
video_name = x['video_name'] + '.mp4'
path = os.path.join(video_save_dir, 'temp_'+video_name)
imageio.mimsave(path, result, fps=float(25))
av_path = os.path.join(video_save_dir, video_name)
return_path = av_path
audio_path = x['audio_path']
audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]
new_audio_path = os.path.join(video_save_dir, audio_name+'.wav')
start_time = 0
# cog will not keep the .mp3 filename
sound = AudioSegment.from_file(audio_path)
frames = frame_num
end_time = start_time + frames*1/25*1000
word1=sound.set_frame_rate(16000)
word = word1[start_time:end_time]
word.export(new_audio_path, format="wav")
| save_video_with_watermark(path, new_audio_path, av_path, watermark= False) | 9 | 2023-11-25 06:53:12+00:00 | 12k |
microsoft/Project-BayesDAG | src/causica/models/model.py | [
{
"identifier": "Variables",
"path": "src/causica/datasets/variables.py",
"snippet": "class Variables:\n \"\"\"\n This class represents any variables present in a model.\n \"\"\"\n\n def __init__(\n self,\n variables: List[Variable],\n auxiliary_variables: Optional[List[Variable]] = None,\n used_cols: Optional[List[int]] = None,\n ) -> None:\n \"\"\"\n Args:\n variables: A list Variable objects.\n auxiliary_variables: A list of Variable objects only used for input into VAE,\n not produced in output.\n These are assumed to be appended onto the end of the variables in the data.\n Defaults to None - no aux variables present.\n used_cols: A list of column ids that were used when processing the original data.\n \"\"\"\n if not auxiliary_variables:\n auxiliary_variables = []\n self.auxiliary_variables = auxiliary_variables\n self._variables = variables\n\n self._deduplicate_names()\n\n # Dictionary mapping from variable name to variable index.\n self.name_to_idx = {var.name: idx for idx, var in enumerate(self._variables)}\n\n # Lists containing query and target variable indices\n self.target_var_idxs = []\n self.not_target_var_idxs = []\n self.query_var_idxs = []\n self.not_query_var_idxs = []\n for idx, var in enumerate(self._variables):\n if var.query:\n self.query_var_idxs.append(idx)\n else:\n self.not_query_var_idxs.append(idx)\n if var.target:\n self.target_var_idxs.append(idx)\n else:\n self.not_target_var_idxs.append(idx)\n\n if len(self.target_var_idxs) > 0 and all(idx in self.query_var_idxs for idx in self.target_var_idxs):\n warnings.warn(\n \"All target variables are marked as queriable, it is likely that active learning will always \"\n \"select these variables first.\"\n )\n\n # Lists containing continuous (including text) and binary/categorical variable indices\n self.var_idxs_by_type: DefaultDict[str, List[int]] = defaultdict(list)\n for idx, var in enumerate(self._variables + self.auxiliary_variables):\n self.var_idxs_by_type[var.type_].append(idx)\n\n # List of lists, where self.unprocessed_cols[i] gives the columns occupied by the ith variable in the unprocessed\n # data.\n self.unprocessed_cols = []\n start_col = 0\n for var in self._all_variables:\n end_col = start_col + var.unprocessed_dim\n self.unprocessed_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.unprocessed_non_aux_cols[i] gives the columns occupied by the ith variable in the unprocessed\n # data (non-auxiliary).\n self.unprocessed_non_aux_cols = []\n start_col = 0\n for var in self._variables:\n end_col = start_col + var.unprocessed_dim\n self.unprocessed_non_aux_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.processed_cols[i] gives the columns occupied by the ith variable in the processed\n # data.\n self.processed_cols = []\n start_col = 0\n for var in self._all_variables:\n end_col = start_col + var.processed_dim\n self.processed_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.processed_non_aux_cols[i] gives the columns occupied by the ith variable in the processed\n # data (non-auxiliary).\n self.processed_non_aux_cols = []\n start_col = 0\n for var in self._variables:\n end_col = start_col + var.processed_dim\n self.processed_non_aux_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # Set of all query group names, maintaining order in which they are first encountered when iterating through\n # the variables list. This is the simplest way to do this since dictionaries are guaranteed to be\n # insertion-ordered since Python 3.7\n self.group_names = list(dict.fromkeys([var.group_name for var in self._variables]))\n\n # List containing indices for each query group, where the query group names are assumed to be in the same order\n # as self.group_names\n self.group_idxs = [\n [idx for idx, var in enumerate(self._variables) if var.group_name == group_name]\n for group_name in self.group_names\n ]\n\n # Remove groups containing no queriable variables from self.group_names and self.group_idxs, as\n # we can guarantee that we will never query these groups.\n is_group_queriable = [any(self._variables[idx].query for idx in idxs) for idxs in self.group_idxs]\n\n self.group_names = [name for group_idx, name in enumerate(self.group_names) if is_group_queriable[group_idx]]\n self.group_idxs = [idxs for group_idx, idxs in enumerate(self.group_idxs) if is_group_queriable[group_idx]]\n\n # Save the list of observed column ids\n default_used_cols = list(range(len(self._variables) + len(auxiliary_variables))) # All columns observed\n self.used_cols = used_cols if used_cols is not None else default_used_cols\n assert len(self.used_cols) == len(self._variables) + len(self.auxiliary_variables)\n\n self.col_id_to_var_index = {old: new for new, old in enumerate(self.used_cols)}\n\n def __repr__(self):\n return str(self._variables)\n\n def __iter__(self) -> Iterator[Variable]:\n \"\"\"\n Iterate through the variables within the container.\n Note - Now it iterate through all the variables within the container\n (including auxiliary variables, if they are present)\n \"\"\"\n for var in self._all_variables:\n yield var\n\n def __getitem__(self, idx):\n return (self._all_variables)[idx]\n\n def __len__(self) -> int:\n return len(self._variables) + len(self.auxiliary_variables)\n\n @classmethod\n def create_from_json(cls, path: str) -> Variables:\n return cls.create_from_dict(read_json_as(path, dict))\n\n @classmethod\n def create_from_dict(cls, variables_dict: Dict[str, List[Any]]) -> Variables:\n \"\"\"\n Create variables object from a dictionary\n \"\"\"\n variables = variables_dict[\"variables\"]\n for var in variables:\n # remove deprecated \"id\" key if present\n var.pop(\"id\", None)\n var_obj_list = [Variable(**var) for var in variables]\n\n auxiliary_vars = variables_dict.get(\"auxiliary_variables\", [])\n if len(auxiliary_vars) == 0:\n auxiliary_vars_obj = None\n else:\n for var in auxiliary_vars:\n # remove deprecated \"id\" key if present\n var.pop(\"id\", None)\n\n auxiliary_vars_obj = [Variable(**var) for var in auxiliary_vars]\n\n used_cols = variables_dict.get(\"used_cols\", None)\n\n return cls(var_obj_list, auxiliary_vars_obj, used_cols)\n\n @classmethod\n def create_from_data_and_dict(\n cls, data: np.ndarray, mask: np.ndarray, variables_dict: Optional[Dict[str, Any]] = None\n ) -> Variables:\n \"\"\"\n Create variables object from an input dictionary, inferring missing fields using `data` and `mask`.\n \"\"\"\n # Infer missing fields in variables_dict\n variables_dict = cls.infer_from_data(data, mask, variables_dict, True)\n variables = cls.create_from_dict(variables_dict)\n return variables\n\n @staticmethod\n def _metadata_from_dict(\n data, mask, variables_dict, variables_type=\"variables\"\n ) -> Tuple[List[Any], Union[List[Any], None]]:\n \"\"\"\n Infer variables_metadata from input data\n\n Args:\n data: NumPy array containing data\n mask: NumPy array containing 1 for observed data values, 0 for unobserved data values.\n variables_dict: Dictionary containing metadata for each variable (column) in the input data. Missing variables,\n or missing fields for a particular variable, will attempt to be inferred from the input data.\n variables_type: is it aux variables, or normal variables\n Returns:\n varaibles_metadata: inferred metadata from input data\n A list of column ids that were used when processing the original data.\n \"\"\"\n\n variables_metadata = []\n # Use None rather than {} as default since mutable default args are dangerous in Python.\n used_cols = variables_dict.get(\"used_cols\", None)\n if used_cols:\n used_cols = cast(List[int], used_cols)\n assert len(used_cols) == data.shape[1]\n\n for idx, variable_metadata in enumerate(variables_dict[variables_type]):\n if not all(\n k in variable_metadata for k in [\"name\", \"type\", \"lower\", \"upper\", \"query\", \"target\", \"always_observed\"]\n ):\n # If variable metadata fully specified, do not try to infer, as doing column indexing can be expensive\n # for CSR sparse matrices.\n var_data = data[:, idx]\n var_mask = mask[:, idx]\n if issparse(var_data):\n var_data = var_data.toarray()\n var_mask = var_mask.toarray()\n\n if \"name\" not in variable_metadata:\n if used_cols:\n variable_metadata[\"name\"] = str(used_cols[idx])\n else:\n variable_metadata[\"name\"] = f\"Column {idx}\"\n\n # If data type/min max/num categories specified explicitly, overwrite variables file\n if \"type\" not in variable_metadata:\n # Test if all unmasked elements are integers\n\n if np.all((var_data * var_mask) // 1 == var_data * var_mask):\n if (var_data * var_mask).max() <= 1:\n print(\n f'Type of variable {variable_metadata[\"name\"]} inferred as binary. This can be '\n \"changed manually in the dataset's variables.json file\"\n )\n variable_metadata[\"type\"] = \"binary\"\n else:\n # Note that we always infer integer values with a max value > 1 as categorical. This may want to be\n # reconsidered if support for ordinal variables is introduced at a later date.\n print(\n f'Type of variable {variable_metadata[\"name\"]} inferred as categorical. This can be'\n \" changed manually in the dataset's variables.json file\"\n )\n variable_metadata[\"type\"] = \"categorical\"\n else:\n variable_metadata[\"type\"] = \"continuous\"\n\n if \"lower\" not in variable_metadata:\n if variable_metadata[\"type\"] == \"binary\":\n inferred_lower = 0\n else:\n inferred_lower = min(var_data[np.where(var_mask == 1)]).item()\n variable_metadata[\"lower\"] = inferred_lower\n print(\n f'Minimum value of variable {variable_metadata[\"name\"]} inferred as {inferred_lower}. This'\n \" can be changed manually in the dataset's variables.json file\"\n )\n\n if \"upper\" not in variable_metadata:\n if variable_metadata[\"type\"] == \"binary\":\n inferred_upper = 1\n else:\n inferred_upper = max(var_data[np.where(var_mask == 1)]).item()\n variable_metadata[\"upper\"] = inferred_upper\n print(\n f'Max value of variable {variable_metadata[\"name\"]} inferred as {inferred_upper}. This can '\n \"be changed manually in the dataset's variables.json file\"\n )\n\n if \"query\" not in variable_metadata:\n # By default, assume all variables can be queried unless specified otherwise.\n if variables_type == \"auxiliary_variables\":\n variable_metadata[\"query\"] = False\n print(\n f'Variable {variable_metadata[\"name\"]} inferred to be a non-queriable variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"query\" field.'\n )\n else:\n variable_metadata[\"query\"] = True\n print(\n f'Variable {variable_metadata[\"name\"]} inferred to be a queriable variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"query\" field.'\n )\n\n if \"target\" not in variable_metadata:\n # By default, assume variable is a target if and only if it is not queriable.\n variable_metadata[\"target\"] = not variable_metadata[\"query\"]\n fill_string = \"not \" if not variable_metadata[\"target\"] else \"\"\n print(\n f'Variable {variable_metadata[\"name\"]} inferred as {fill_string}an active learning target variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"target\" field.'\n )\n\n if \"always_observed\" not in variable_metadata:\n # By default, assume variable is always observed if there is no missing in the mask.\n if np.sum((var_mask - 1) ** 2) == 0:\n variable_metadata[\"always_observed\"] = True\n else:\n variable_metadata[\"always_observed\"] = False\n fill_string = \"not \" if not variable_metadata[\"always_observed\"] else \"\"\n print(\n f'Variable {variable_metadata[\"name\"]} inferred as {fill_string}an always observed target variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"always_observed\" field.'\n )\n\n variables_metadata.append(variable_metadata)\n\n return variables_metadata, used_cols\n\n @staticmethod\n def infer_from_data(data, mask, variables_dict=None, infer_aux_variables=False) -> Dict[str, List[Any]]:\n \"\"\"\n Infer missing values in an input variables dictionary, using the input data.\n\n Args:\n data: NumPy array containing data\n mask: NumPy array containing 1 for observed data values, 0 for unobserved data values.\n variables_dict: Dictionary containing metadata for each variable (column) in the input data. Missing variables,\n or missing fields for a particular variable, will attempt to be inferred from the input data.\n infer_aux_variables: infer auxiliary variables for GINA or not.\n Returns:\n variables_dict: Updated version of the input variables_dict, with missing variables and fields inferred from the\n data.\n \"\"\"\n\n if variables_dict is None:\n variables_dict = {}\n\n # NOTE this assumes all variables have only one column in unprocessed data, which should always be the case when\n # inferring from a dataset.\n if \"auxiliary_variables\" not in variables_dict:\n variables_dict[\"auxiliary_variables\"] = []\n\n if \"variables\" not in variables_dict or variables_dict[\"variables\"] == []:\n num_var_cols = data.shape[1] - len(variables_dict[\"auxiliary_variables\"])\n variables_dict[\"variables\"] = [{} for _ in range(num_var_cols)]\n\n variables_metadata, used_cols = Variables._metadata_from_dict(\n data, mask, variables_dict, variables_type=\"variables\"\n )\n variables_dict = {\n \"variables\": variables_metadata,\n \"auxiliary_variables\": variables_dict[\"auxiliary_variables\"],\n \"used_cols\": used_cols,\n }\n if infer_aux_variables:\n aux_variables_metadata, used_cols = Variables._metadata_from_dict(\n data, mask, variables_dict, variables_type=\"auxiliary_variables\"\n )\n variables_dict = {\n \"variables\": variables_metadata,\n \"auxiliary_variables\": aux_variables_metadata,\n \"used_cols\": used_cols,\n }\n\n return variables_dict\n\n @property\n def _all_variables(self):\n return self._variables + self.auxiliary_variables\n\n @property\n def has_auxiliary(self) -> bool:\n \"\"\"\n True if there are aux variables present.\n \"\"\"\n return len(self.auxiliary_variables) > 0\n\n @property\n def binary_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all binary variables.\n \"\"\"\n return self.var_idxs_by_type[\"binary\"]\n\n @property\n def categorical_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all categorical variables.\n \"\"\"\n return self.var_idxs_by_type[\"categorical\"]\n\n @property\n def discrete_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all discrete (i.e. binary or categorical) variables. We sort to ensure that the\n combined list is in ascending order.\n \"\"\"\n return sorted(self.var_idxs_by_type[\"categorical\"] + self.var_idxs_by_type[\"binary\"])\n\n @property\n def continuous_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all continuous variables.\n \"\"\"\n return self.var_idxs_by_type[\"continuous\"]\n\n @property\n def text_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all text variables.\n \"\"\"\n return self.var_idxs_by_type[\"text\"]\n\n @property\n def non_text_idxs(self) -> List[bool]:\n \"\"\"Helper method. Returns list of booleans, where an element\n at index i indicates whether a variable at index i is non-text or not\n e.g. For Variables object of [...\"continous\"..., ...\"text\"..., \"continuous\"],\n the result would be [True, False, True]\n \"\"\"\n unproc_cols_by_type = self.unprocessed_cols_by_type\n if \"text\" not in unproc_cols_by_type:\n return [True for _ in range(len(self))]\n return (~np.in1d(range(len(self)), unproc_cols_by_type[\"text\"])).tolist()\n\n @property\n def num_unprocessed_cols(self) -> int:\n \"\"\"\n Return number of columns in the unprocessed data represented by all variables\n \"\"\"\n return sum(len(idxs) for idxs in self.unprocessed_cols)\n\n @property\n def num_unprocessed_non_aux_cols(self) -> int:\n \"\"\"\n Return number of columns in the unprocessed data represented by non auxiliary variables\n \"\"\"\n return sum(len(idxs) for idxs in self.unprocessed_non_aux_cols)\n\n @property\n def num_processed_cols(self) -> int:\n \"\"\"\n Return number of columns in the processed data represented by all variables\n \"\"\"\n return sum(len(idxs) for idxs in self.processed_cols)\n\n @property\n def num_processed_non_aux_cols(self) -> int:\n \"\"\"\n Return number of columns in the processed data represented by non auxiliary variables\n \"\"\"\n return sum(len(idxs) for idxs in self.processed_non_aux_cols)\n\n @property\n def num_groups(self) -> int:\n \"\"\"\n Return the number of unique query groups in the variables object.\n \"\"\"\n return len(self.group_names)\n\n @property\n def group_mask(self) -> np.ndarray:\n \"\"\"\n Return a mask of shape (num_groups, num_processed_cols) indicating which column\n corresponds to which group.\n \"\"\"\n mask = np.zeros((self.num_groups, self.num_processed_cols), dtype=bool)\n for group_idx, group in enumerate(self.group_idxs):\n for var in group:\n for proc_col in self.processed_cols[var]:\n mask[group_idx, proc_col] = 1\n return mask\n\n @property\n def proc_always_observed_list(self) -> List[Optional[bool]]:\n \"\"\"\n The mask that indicates if the variable is always observed (for processed data)\n \"\"\"\n return sum(([var.always_observed] * var.processed_dim for var in self._all_variables), [])\n\n @property\n def processed_cols_by_type(self) -> Dict[str, List[List[int]]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list of lists, where each\n sublist represents indices in the processed (i.e. one-hot) data associated with each variable of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [[0,1,2], [3,4,5]], 'binary': [[6]]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[List[int]]] = defaultdict(list)\n for var, cols in zip(self._all_variables, self.processed_cols):\n grouped_vars[var.type_].append(cols)\n return grouped_vars\n\n @property\n def processed_non_aux_cols_by_type(self) -> Dict[str, List[List[int]]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list of lists, where each\n sublist represents indices in the processed (i.e. one-hot) data (w/o aux variables) associated with each\n variable of that type.\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [[0,1,2], [3,4,5]], 'binary': [[6]]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[List[int]]] = defaultdict(list)\n for var, cols in zip(self._variables, self.processed_cols):\n grouped_vars[var.type_].append(cols)\n return grouped_vars\n\n @property\n def unprocessed_cols_by_type(self) -> DefaultDict[str, List[int]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list containing the column\n indices in the unprocessed data for all variables of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [0, 1], 'binary': [2]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[int]] = defaultdict(list)\n i = 0\n for var, cols in zip(self._all_variables, self.unprocessed_cols):\n grouped_vars[var.type_] += cols\n i += var.unprocessed_dim\n return grouped_vars\n\n @property\n def unprocessed_non_aux_cols_by_type(self) -> DefaultDict[str, List[int]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list containing the column\n indices in the unprocessed data for all variables of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [0, 1], 'binary': [2]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[int]] = defaultdict(list)\n i = 0\n for var, cols in zip(self._variables, self.unprocessed_cols):\n grouped_vars[var.type_] += cols\n i += var.unprocessed_dim\n return grouped_vars\n\n def subset(self, idxs: List[int], auxiliary_idxs: Optional[List[int]] = None) -> Variables:\n \"\"\"\n Returns a new Variables object containing only the Variable objects whose indices are given in `idxs`.\n Note that this currently ignores metadata variables.\n \"\"\"\n if auxiliary_idxs is None:\n auxiliary_idxs = []\n\n variables_list = [self._variables[idx] for idx in idxs]\n auxiliary_variables_list = [self.auxiliary_variables[idx] for idx in auxiliary_idxs]\n return Variables(variables_list, auxiliary_variables_list)\n\n def to_dict(self) -> Dict[str, Any]:\n variables_list = [var.to_json() for var in self._variables]\n if self.auxiliary_variables is None:\n auxiliary_vars_list = []\n else:\n auxiliary_vars_list = [var.to_json() for var in self.auxiliary_variables]\n\n variables_json = {\n \"variables\": variables_list,\n \"auxiliary_variables\": auxiliary_vars_list,\n \"used_cols\": [int(col) for col in self.used_cols],\n }\n return variables_json\n\n def save(self, path: str) -> None:\n variables_json = self.to_dict()\n save_json(variables_json, path)\n\n def as_list(self) -> List[Variable]:\n return self._variables\n\n def get_idxs_from_name_list(self, variable_names: List[Union[str, int]]) -> np.ndarray:\n \"\"\"\n Get a binary array of shape (variable_count,), where for each index the array value is 1 if the corresponding\n variable is named in `variable_names`, and 0 otherwise.\n \"\"\"\n variables_to_query = np.zeros((len(self._variables),))\n # Look up indices of specified variables and mark as queriable.\n for variable_name in variable_names:\n # Cast name to string in case numeric names (e.g. question ids) have been input as integers.\n variable_name = str(variable_name)\n variable_idx = self.name_to_idx[variable_name]\n variables_to_query[variable_idx] = 1\n\n return variables_to_query\n\n def get_observable_groups(self, data_mask_row: np.ndarray, obs_mask_row: np.ndarray) -> List[int]:\n \"\"\"\n Get list of indices for groups that are still observable in the current row\n Args:\n data_mask_row: 1D numpy array containing 1 for observed variables and 0 for unobserved in the underlying data\n obs_mask_row: 1D numpy array containing 1 for variables observed during active learning and 0 for ones unobserved\n\n Returns:\n list of indices of groups that can be observed, where the indices correspond to the corresponding group\n names in `self.group_names`.\n \"\"\"\n observable_variables_idxs = self.get_observable_variable_idxs(data_mask_row, obs_mask_row)\n observable_groups_idxs: List[int] = []\n for group_idx, idxs in enumerate(self.group_idxs):\n if any(i in observable_variables_idxs for i in idxs):\n observable_groups_idxs.append(group_idx)\n return observable_groups_idxs\n\n def get_observable_variable_idxs(self, data_mask_row: np.ndarray, obs_mask_row: np.ndarray) -> List[int]:\n \"\"\"\n Get list of variable idxs for variables that are still observable in the current row.\n Args:\n data_mask_row: 1D numpy array containing 1 for observed variables and 0 for unobserved in the underlying data\n obs_mask_row: 1D numpy array containing 1 for variables observed during active learning and 0 for ones unobserved\n\n Returns:\n observable_vars: List of indices of variables that can be observed.\n \"\"\"\n if data_mask_row.ndim != 1:\n raise ValueError(f\"Test mask should be 1D, had {data_mask_row.ndim} dims and shape {data_mask_row.shape}.\")\n if obs_mask_row.ndim != 1:\n raise ValueError(\n f\"Observation mask should be 1D, had {obs_mask_row.ndim} dims and shape {obs_mask_row.shape}.\"\n )\n if len(obs_mask_row) != len(data_mask_row) or len(data_mask_row) != len(self._variables):\n # One likely cause is accidentally passing 'processed' masks, which may be longer\n # if some variables are categorical.\n raise ValueError(\n f\"Lengths of obs_mask_row {len(obs_mask_row)}, data_mask_row {len(data_mask_row)}, \"\n f\"and variables list {len(self._variables)} should all be the same.\"\n )\n # Get ids where there is an underlying data value (test_mask == 1) and that we haven't yet queried (obs_mask == 0)\n unobserved_idxs = np.where((data_mask_row == 1) & (obs_mask_row == 0))[0]\n\n # Intersection of these and query_var_idxs.\n observable_idx_set = set(unobserved_idxs).intersection(set(self.query_var_idxs))\n return list(observable_idx_set)\n\n def get_var_cols_from_data(self, var_idx, data):\n \"\"\"\n Get data from an array for a single variable only.\n\n Args:\n var_idx: Index of variable we want data for.\n data (shape (batch_size, variable_count)): Array to get variable info from.\n\n Returns:\n var_data (shape (observed_count, processed_dim)): Values only for\n the corresponding variable.\n \"\"\"\n return data[:, self.processed_cols[var_idx]]\n\n def get_variables_to_observe(self, data_mask: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Return a boolean tensor of length num_variables, where each element indicates whether the corresponding variable\n can be queried during active learning (i.e. the variable is queriable and has at least one observed value in\n the data).\n Args:\n data_mask (shape (batch_size, num_processed_cols)): Processed mask\n\n Returns:\n torch.Tensor (shape (variable_count,)): True where it's a query-able variable and we have at least one\n observed value\n \"\"\"\n cols_with_data = data_mask.sum(dim=0).to(torch.bool)\n\n # data_mask may have multiple columns for a single variable, if it's a categorical variable. Pick first entry per variable\n ii = torch.tensor([cols[0] for cols in self.processed_cols], dtype=torch.long, device=cols_with_data.device)\n cols_with_data = torch.index_select(cols_with_data, 0, ii)\n is_query_id = torch.zeros(len(self), dtype=torch.bool, device=cols_with_data.device)\n is_query_id[\n tuple(self.query_var_idxs),\n ] = True\n return is_query_id * cols_with_data\n\n def _deduplicate_names(self):\n # Produce warning if var name is reused and add an increasing integer to the end until it is unique.\n var_names = set()\n for var in self._all_variables:\n i = 2\n original_name = var.name\n while var.name in var_names:\n new_name = f\"{original_name}_{i}\"\n var.name = new_name\n i += 1\n if var.name != original_name:\n # Do the warning in a separate block to the while loop so that we only raise one warning if we have to\n # try appending several different integers to the name.\n warnings.warn(\n f\"Name {original_name} has already been used, renaming to {var.name}\",\n UserWarning,\n )\n var_names.add(var.name)\n\n # TODO: Maybe create Variables.Utils for methods like the below one\n @staticmethod\n def create_empty_data(variables: Variables) -> np.ndarray:\n var_count = len(variables)\n empty_data = np.zeros((1, var_count), dtype=object)\n for i in range(var_count):\n if variables[i].type_ == \"text\":\n empty_data[:, i] = \"empty str\"\n return empty_data"
},
{
"identifier": "write_git_info",
"path": "src/causica/utils/helper_functions.py",
"snippet": "def write_git_info(directory: str, exist_ok: bool = False):\n \"\"\"\n Write sys.argv, git hash, git diff to <directory>/git_info.txt\n\n directory: where to write git_info.txt. This directory must already exist\n exist_ok: if set to True, may silently overwrite old git info\n \"\"\"\n assert os.path.exists(directory)\n try:\n repo = git.Repo(search_parent_directories=True)\n\n except git.InvalidGitRepositoryError as exc:\n # Likely to happen if we are in an AzureML run.\n raise ValueError(\"Not running inside a Git repo.\") from exc\n commit = repo.head.commit\n diff = repo.git.diff(None)\n mode = \"w\" if exist_ok else \"x\"\n with open(os.path.join(directory, \"git_info.txt\"), mode, encoding=\"utf-8\") as f:\n f.write(f\"sys.argv: {sys.argv}\\n\")\n f.write(\"Git commit: \" + str(commit) + \"\\n\")\n try:\n f.write(\"Active branch: \" + str(repo.active_branch) + \"\\n\")\n except TypeError:\n # Happens in PR build, detached head state\n pass\n f.write(\"Git diff:\\n\" + str(diff))"
},
{
"identifier": "IModel",
"path": "src/causica/models/imodel.py",
"snippet": "class IModel(ABC):\n \"\"\"\n Interface for model:\n create: Create an instance of the concrete class.\n load: Load an instance of the concrete class from a given directory.\n save: Save any data needed to load the model.\n name: Name of objective, to use when finding model to use from string.\n run_train: Train the model.\n impute: Impute missing values:\n \"\"\"\n\n def __init__(self, model_id: str, variables: Variables, save_dir: str) -> None:\n \"\"\"\n Args:\n model_id: Unique model ID for referencing this model instance.\n variables: Information about variables/features used by this model.\n save_dir: Location to save any information about this model, including training data.\n It will be created if it doesn't exist.\n \"\"\"\n self.model_id = model_id\n self.save_dir = save_dir\n self.variables = variables\n self.data_processor = DataProcessor(variables)\n\n @classmethod\n @abstractmethod\n def create(\n cls,\n model_id: str,\n save_dir: str,\n variables: Variables,\n model_config_dict: Dict[str, Any],\n device: Union[str, int],\n ) -> IModel:\n \"\"\"\n Create a new instance of a model with given type.\n\n Args:\n model_id (str): Unique model ID for referencing this model instance.\n save_dir (str): Location to save all model information to.\n device (str or int): Name of device to load the model on. Valid options are 'cpu', 'gpu', or a device ID\n (e.g. 0 or 1 on a two-GPU machine).\n variables (Variables): Information about variables/features used\n by this model.\n model_config_dict (dictionary): Any other parameters needed by a specific concrete class. Of\n the form {arg_name: arg_value}. e.g. {\"embedding_dim\": 10, \"latent_dim\": 20}\n\n Returns:\n model: Instance of concrete implementation of `Model` class.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def load(cls, model_id: str, save_dir: str, device: Union[str, int]) -> IModel:\n \"\"\"\n Load an instance of a model.\n\n Args:\n model_id (str): Unique model ID for referencing this model instance.\n save_dir (str): Save directory for this model.\n device (str or int): Name of device to load the model on. Valid options are 'cpu', 'gpu', or a device ID\n (e.g. 0 or 1 on a two-GPU machine).\n variables (Variables): Information about variables/features used\n by this model.\n\n Returns:\n Instance of concrete implementation of `Model` class.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def name(cls) -> str:\n \"\"\"\n Name of the model implemented in abstract class.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def save(self) -> None:\n \"\"\"\n Save the model.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def run_train(\n self,\n dataset: Dataset,\n train_config_dict: Optional[Dict[str, Any]] = None,\n report_progress_callback: Optional[Callable[[str, int, int], None]] = None,\n ) -> None:\n \"\"\"\n Train the model.\n Training results will be saved.\n\n Args:\n dataset: Dataset object with data and masks in unprocessed form.\n train_config_dict (dictionary): Any other parameters needed by a specific concrete class. Of\n the form {arg_name: arg_value}. e.g. {\"learning_rate\": 1e-3, \"epochs\": 100}\n report_progress_callback: Function to report model progress for API.\n \"\"\"\n raise NotImplementedError()"
}
] | import os
import numpy as np
from typing import Tuple
from ..datasets.variables import Variables
from ..utils.helper_functions import write_git_info
from .imodel import IModel | 8,707 | # This is required in python 3 to allow return types of the same class.
from __future__ import annotations
class Model(IModel):
"""
Abstract base model class.
"""
def __init__(self, model_id: str, variables: Variables, save_dir: str) -> None:
"""
Args:
model_id: Unique model ID for referencing this model instance.
variables: Information about variables/features used by this model.
save_dir: Location to save any information about this model, including training data.
It will be created if it doesn't exist.
"""
super().__init__(model_id, variables, save_dir)
os.makedirs(self.save_dir, exist_ok=True)
try:
| # This is required in python 3 to allow return types of the same class.
from __future__ import annotations
class Model(IModel):
"""
Abstract base model class.
"""
def __init__(self, model_id: str, variables: Variables, save_dir: str) -> None:
"""
Args:
model_id: Unique model ID for referencing this model instance.
variables: Information about variables/features used by this model.
save_dir: Location to save any information about this model, including training data.
It will be created if it doesn't exist.
"""
super().__init__(model_id, variables, save_dir)
os.makedirs(self.save_dir, exist_ok=True)
try: | write_git_info(self.save_dir) | 1 | 2023-11-21 12:55:08+00:00 | 12k |
camenduru/Video-LLaVA-hf | llava/model/multimodal_encoder/languagebind/depth/modeling_depth.py | [
{
"identifier": "LanguageBindDepthConfig",
"path": "llava/model/multimodal_encoder/languagebind/depth/configuration_depth.py",
"snippet": "class LanguageBindDepthConfig(PretrainedConfig):\n r\"\"\"\n [`CLIPConfig`] is the configuration class to store the configuration of a [`CLIPModel`]. It is used to instantiate\n a CLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating\n a configuration with the defaults will yield a similar configuration to that of the CLIP\n [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n text_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`CLIPTextConfig`].\n vision_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`CLIPVisionConfig`].\n projection_dim (`int`, *optional*, defaults to 512):\n Dimentionality of text and vision projection layers.\n logit_scale_init_value (`float`, *optional*, defaults to 2.6592):\n The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation.\n kwargs (*optional*):\n Dictionary of keyword arguments.\n\n Example:\n\n ```python\n >>> from transformers import CLIPConfig, CLIPModel\n\n >>> # Initializing a CLIPConfig with openai/clip-vit-base-patch32 style configuration\n >>> configuration = CLIPConfig()\n\n >>> # Initializing a CLIPModel (with random weights) from the openai/clip-vit-base-patch32 style configuration\n >>> model = CLIPModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n\n >>> # We can also initialize a CLIPConfig from a CLIPTextConfig and a CLIPVisionConfig\n >>> from transformers import CLIPTextConfig, CLIPVisionConfig\n\n >>> # Initializing a CLIPText and CLIPVision configuration\n >>> config_text = CLIPTextConfig()\n >>> config_vision = CLIPVisionConfig()\n\n >>> config = CLIPConfig.from_text_vision_configs(config_text, config_vision)\n ```\"\"\"\n\n model_type = \"LanguageBindDepth\"\n is_composition = True\n\n def __init__(\n self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs\n ):\n # If `_config_dict` exist, we use them for the backward compatibility.\n # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot\n # of confusion!).\n text_config_dict = kwargs.pop(\"text_config_dict\", None)\n vision_config_dict = kwargs.pop(\"vision_config_dict\", None)\n\n super().__init__(**kwargs)\n\n # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in\n # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most\n # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.\n if text_config_dict is not None:\n if text_config is None:\n text_config = {}\n\n # This is the complete result when using `text_config_dict`.\n _text_config_dict = CLIPTextConfig(**text_config_dict).to_dict()\n\n # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.\n for key, value in _text_config_dict.items():\n if key in text_config and value != text_config[key] and key not in [\"transformers_version\"]:\n # If specified in `text_config_dict`\n if key in text_config_dict:\n message = (\n f\"`{key}` is found in both `text_config_dict` and `text_config` but with different values. \"\n f'The value `text_config_dict[\"{key}\"]` will be used instead.'\n )\n # If inferred from default argument values (just to be super careful)\n else:\n message = (\n f\"`text_config_dict` is provided which will be used to initialize `CLIPTextConfig`. The \"\n f'value `text_config[\"{key}\"]` will be overriden.'\n )\n logger.warning(message)\n\n # Update all values in `text_config` with the ones in `_text_config_dict`.\n text_config.update(_text_config_dict)\n\n if vision_config_dict is not None:\n if vision_config is None:\n vision_config = {}\n\n # This is the complete result when using `vision_config_dict`.\n _vision_config_dict = CLIPVisionConfig(**vision_config_dict).to_dict()\n # convert keys to string instead of integer\n if \"id2label\" in _vision_config_dict:\n _vision_config_dict[\"id2label\"] = {\n str(key): value for key, value in _vision_config_dict[\"id2label\"].items()\n }\n\n # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.\n for key, value in _vision_config_dict.items():\n if key in vision_config and value != vision_config[key] and key not in [\"transformers_version\"]:\n # If specified in `vision_config_dict`\n if key in vision_config_dict:\n message = (\n f\"`{key}` is found in both `vision_config_dict` and `vision_config` but with different \"\n f'values. The value `vision_config_dict[\"{key}\"]` will be used instead.'\n )\n # If inferred from default argument values (just to be super careful)\n else:\n message = (\n f\"`vision_config_dict` is provided which will be used to initialize `CLIPVisionConfig`. \"\n f'The value `vision_config[\"{key}\"]` will be overriden.'\n )\n logger.warning(message)\n\n # Update all values in `vision_config` with the ones in `_vision_config_dict`.\n vision_config.update(_vision_config_dict)\n\n if text_config is None:\n text_config = {}\n logger.info(\"`text_config` is `None`. Initializing the `CLIPTextConfig` with default values.\")\n\n if vision_config is None:\n vision_config = {}\n logger.info(\"`vision_config` is `None`. initializing the `CLIPVisionConfig` with default values.\")\n\n self.text_config = CLIPTextConfig(**text_config)\n self.vision_config = CLIPVisionConfig(**vision_config)\n\n self.projection_dim = projection_dim\n self.logit_scale_init_value = logit_scale_init_value\n self.initializer_factor = 1.0\n\n @classmethod\n def from_text_vision_configs(cls, text_config: CLIPTextConfig, vision_config: CLIPVisionConfig, **kwargs):\n r\"\"\"\n Instantiate a [`CLIPConfig`] (or a derived class) from clip text model configuration and clip vision model\n configuration.\n\n Returns:\n [`CLIPConfig`]: An instance of a configuration object\n \"\"\"\n\n return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].\n\n Returns:\n `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n output[\"text_config\"] = self.text_config.to_dict()\n output[\"vision_config\"] = self.vision_config.to_dict()\n output[\"model_type\"] = self.__class__.model_type\n return output"
},
{
"identifier": "CLIPVisionConfig",
"path": "llava/model/multimodal_encoder/languagebind/depth/configuration_depth.py",
"snippet": "class CLIPVisionConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`CLIPVisionModel`]. It is used to instantiate a\n CLIP vision encoder according to the specified arguments, defining the model architecture. Instantiating a\n configuration with the defaults will yield a similar configuration to that of the vision encoder of the CLIP\n [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n image_size (`int`, *optional*, defaults to 224):\n The size (resolution) of each image.\n patch_size (`int`, *optional*, defaults to 32):\n The size (resolution) of each patch.\n hidden_act (`str` or `function`, *optional*, defaults to `\"quick_gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` ``\"quick_gelu\"` are supported.\n layer_norm_eps (`float`, *optional*, defaults to 1e-5):\n The epsilon used by the layer normalization layers.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n initializer_factor (`float`, *optional*, defaults to 1):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n\n Example:\n\n ```python\n >>> from transformers import CLIPVisionConfig, CLIPVisionModel\n\n >>> # Initializing a CLIPVisionConfig with openai/clip-vit-base-patch32 style configuration\n >>> configuration = CLIPVisionConfig()\n\n >>> # Initializing a CLIPVisionModel (with random weights) from the openai/clip-vit-base-patch32 style configuration\n >>> model = CLIPVisionModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n\n model_type = \"clip_vision_model\"\n\n def __init__(\n self,\n hidden_size=768,\n intermediate_size=3072,\n projection_dim=512,\n num_hidden_layers=12,\n num_attention_heads=12,\n num_channels=3,\n image_size=224,\n patch_size=32,\n hidden_act=\"quick_gelu\",\n layer_norm_eps=1e-5,\n attention_dropout=0.0,\n initializer_range=0.02,\n initializer_factor=1.0,\n\n add_time_attn=False, ################################\n num_frames=1, ################################\n force_patch_dropout=0.0, ################################\n lora_r=2, ################################\n lora_alpha=16, ################################\n lora_dropout=0.0, ################################\n num_mel_bins=0.0, ################################\n target_length=0.0, ################################\n max_depth=10,\n video_decode_backend='decord', #########################\n **kwargs,\n ):\n super().__init__(**kwargs)\n\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.projection_dim = projection_dim\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.num_channels = num_channels\n self.patch_size = patch_size\n self.image_size = image_size\n self.initializer_range = initializer_range\n self.initializer_factor = initializer_factor\n self.attention_dropout = attention_dropout\n self.layer_norm_eps = layer_norm_eps\n self.hidden_act = hidden_act\n\n self.add_time_attn = add_time_attn ################\n self.num_frames = num_frames ################\n self.force_patch_dropout = force_patch_dropout ################\n self.lora_r = lora_r ################\n self.lora_alpha = lora_alpha ################\n self.lora_dropout = lora_dropout ################\n self.num_mel_bins = num_mel_bins ################\n self.target_length = target_length ################\n self.max_depth = max_depth ################\n self.video_decode_backend = video_decode_backend ################\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n cls._set_token_in_kwargs(kwargs)\n\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the vision config dict if we are loading from CLIPConfig\n if config_dict.get(\"model_type\") == \"clip\":\n config_dict = config_dict[\"vision_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)"
},
{
"identifier": "CLIPTextConfig",
"path": "llava/model/multimodal_encoder/languagebind/depth/configuration_depth.py",
"snippet": "class CLIPTextConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`CLIPTextModel`]. It is used to instantiate a CLIP\n text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration\n with the defaults will yield a similar configuration to that of the text encoder of the CLIP\n [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n vocab_size (`int`, *optional*, defaults to 49408):\n Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by\n the `inputs_ids` passed when calling [`CLIPModel`].\n hidden_size (`int`, *optional*, defaults to 512):\n Dimensionality of the encoder layers and the pooler layer.\n intermediate_size (`int`, *optional*, defaults to 2048):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 8):\n Number of attention heads for each attention layer in the Transformer encoder.\n max_position_embeddings (`int`, *optional*, defaults to 77):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n hidden_act (`str` or `function`, *optional*, defaults to `\"quick_gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` `\"quick_gelu\"` are supported.\n layer_norm_eps (`float`, *optional*, defaults to 1e-5):\n The epsilon used by the layer normalization layers.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n initializer_factor (`float`, *optional*, defaults to 1):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n\n Example:\n\n ```python\n >>> from transformers import CLIPTextConfig, CLIPTextModel\n\n >>> # Initializing a CLIPTextConfig with openai/clip-vit-base-patch32 style configuration\n >>> configuration = CLIPTextConfig()\n\n >>> # Initializing a CLIPTextModel (with random weights) from the openai/clip-vit-base-patch32 style configuration\n >>> model = CLIPTextModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n model_type = \"clip_text_model\"\n\n def __init__(\n self,\n vocab_size=49408,\n hidden_size=512,\n intermediate_size=2048,\n projection_dim=512,\n num_hidden_layers=12,\n num_attention_heads=8,\n max_position_embeddings=77,\n hidden_act=\"quick_gelu\",\n layer_norm_eps=1e-5,\n attention_dropout=0.0,\n initializer_range=0.02,\n initializer_factor=1.0,\n # This differs from `CLIPTokenizer`'s default and from openai/clip\n # See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538\n pad_token_id=1,\n bos_token_id=49406,\n eos_token_id=49407,\n **kwargs,\n ):\n super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)\n\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.projection_dim = projection_dim\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.max_position_embeddings = max_position_embeddings\n self.layer_norm_eps = layer_norm_eps\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.initializer_factor = initializer_factor\n self.attention_dropout = attention_dropout\n self.add_time_attn = False ######################################\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n cls._set_token_in_kwargs(kwargs)\n\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the text config dict if we are loading from CLIPConfig\n if config_dict.get(\"model_type\") == \"clip\":\n config_dict = config_dict[\"text_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)"
}
] | import math
import torch
from typing import Optional, Tuple, Union
from einops import rearrange
from peft import LoraConfig, get_peft_model
from torch import nn
from torch.nn import functional as F
from transformers import PreTrainedModel, add_start_docstrings
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from transformers.models.clip.modeling_clip import CLIPMLP, CLIPAttention, CLIPTextEmbeddings, CLIPVisionEmbeddings, \
CLIPVisionModelWithProjection, CLIPTextModelWithProjection, _expand_mask, CLIPOutput, clip_loss
from transformers.utils import add_start_docstrings_to_model_forward, replace_return_docstrings
from .configuration_depth import LanguageBindDepthConfig, CLIPVisionConfig, CLIPTextConfig | 7,874 | Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`CLIPEncoderLayer`].
Args:
config: CLIPConfig
"""
def __init__(self, config: LanguageBindDepthConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
causal_attention_mask,
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
class CLIPTextTransformer(nn.Module):
|
class PatchDropout(nn.Module):
"""
https://arxiv.org/abs/2212.00794
"""
def __init__(self, prob, exclude_first_token=True):
super().__init__()
assert 0 <= prob < 1.
self.prob = prob
self.exclude_first_token = exclude_first_token # exclude CLS token
def forward(self, x, B, T):
if not self.training or self.prob == 0.:
return x
if self.exclude_first_token:
cls_tokens, x = x[:, :1], x[:, 1:]
else:
cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
batch = x.size()[0]
num_tokens = x.size()[1]
batch_indices = torch.arange(batch)
batch_indices = batch_indices[..., None]
keep_prob = 1 - self.prob
num_patches_keep = max(1, int(num_tokens * keep_prob))
if T == 1:
rand = torch.randn(batch, num_tokens)
patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
else:
rand = torch.randn(B, num_tokens)
patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
patch_indices_keep = patch_indices_keep.unsqueeze(1).repeat(1, T, 1)
patch_indices_keep = rearrange(patch_indices_keep, 'b t n -> (b t) n')
x = x[batch_indices, patch_indices_keep]
if self.exclude_first_token:
x = torch.cat((cls_tokens, x), dim=1)
return x
class CLIPEncoderLayer(nn.Module):
def __init__(self, config: LanguageBindDepthConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = CLIPAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = CLIPMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.add_time_attn = config.add_time_attn
if self.add_time_attn:
self.t = config.num_frames
self.temporal_embedding = nn.Parameter(torch.zeros(1, config.num_frames, config.hidden_size))
nn.init.normal_(self.temporal_embedding, std=config.hidden_size ** -0.5)
self.embed_dim = config.hidden_size
self.temporal_attn = CLIPAttention(config)
self.temporal_layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.temporal_mlp = CLIPMLP(config)
self.temporal_layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
causal_attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
if self.add_time_attn:
bt, n, d = hidden_states.shape
t = self.t
# time embed
if t != 1:
n = hidden_states.shape[1]
hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t)
hidden_states = hidden_states + self.temporal_embedding[:, :t, :]
hidden_states = rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n)
# time attn
residual = hidden_states
hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t)
# hidden_states = self.layer_norm1(hidden_states) # share layernorm
hidden_states = self.temporal_layer_norm1(hidden_states)
hidden_states, attn_weights = self.temporal_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n)
residual = hidden_states
hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t)
# hidden_states = self.layer_norm2(hidden_states) # share layernorm
hidden_states = self.temporal_layer_norm2(hidden_states)
hidden_states = self.temporal_mlp(hidden_states)
hidden_states = residual + rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n)
# spatial attn
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class CLIPPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LanguageBindDepthConfig
base_model_prefix = "clip"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, CLIPTextEmbeddings):
module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
elif isinstance(module, CLIPVisionEmbeddings):
factor = self.config.initializer_factor
nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
elif isinstance(module, CLIPAttention):
factor = self.config.initializer_factor
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
out_proj_std = (module.embed_dim**-0.5) * factor
nn.init.normal_(module.q_proj.weight, std=in_proj_std)
nn.init.normal_(module.k_proj.weight, std=in_proj_std)
nn.init.normal_(module.v_proj.weight, std=in_proj_std)
nn.init.normal_(module.out_proj.weight, std=out_proj_std)
elif isinstance(module, CLIPMLP):
factor = self.config.initializer_factor
in_proj_std = (
(module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
)
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
nn.init.normal_(module.fc1.weight, std=fc_std)
nn.init.normal_(module.fc2.weight, std=in_proj_std)
elif isinstance(module, LanguageBindDepth):
nn.init.normal_(
module.text_projection.weight,
std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
)
nn.init.normal_(
module.visual_projection.weight,
std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
)
elif isinstance(module, CLIPVisionModelWithProjection):
nn.init.normal_(
module.visual_projection.weight,
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
)
elif isinstance(module, CLIPTextModelWithProjection):
nn.init.normal_(
module.text_projection.weight,
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
)
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, CLIPEncoder):
module.gradient_checkpointing = value
CLIP_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CLIP_TEXT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
CLIP_VISION_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
CLIP_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class CLIPEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`CLIPEncoderLayer`].
Args:
config: CLIPConfig
"""
def __init__(self, config: LanguageBindDepthConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
causal_attention_mask,
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
class CLIPTextTransformer(nn.Module): | def __init__(self, config: CLIPTextConfig): | 2 | 2023-11-21 14:33:54+00:00 | 12k |
Sysys242/DMDPY | modules/features.py | [
{
"identifier": "scrapping_task",
"path": "logics/member_scrapper.py",
"snippet": "def scrapping_task():\n token = logger.delay_input('Please enter the token to scrap on: ')\n gId = logger.delay_input('Please enter the guild id to scrap on: ')\n cId = logger.delay_input('Please enter the channel id to scrap on: ')\n\n ws = WebSocket()\n ws.connect('wss://gateway.discord.gg/?v=9&encoding=json')\n\n ws.send(\n dumps(\n {\"op\":2,\"d\":{\"token\": token,\"capabilities\":8189,\"properties\":{\"os\":\"Windows\",\"browser\":\"Chrome\",\"device\":\"\",\"system_locale\":\"fr-FR\",\"browser_user_agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36\",\"browser_version\":\"113.0.0.0\",\"os_version\":\"10\",\"referrer\":\"\",\"referring_domain\":\"\",\"referrer_current\":\"\",\"referring_domain_current\":\"\",\"release_channel\":\"stable\",\"client_build_number\":201332,\"client_event_source\":None},\"presence\":{\"status\":\"online\",\"since\":0,\"activities\":[],\"afk\":False},\"compress\":False,\"client_state\":{\"guild_versions\":{},\"highest_last_message_id\":\"0\",\"read_state_version\":0,\"user_guild_settings_version\":-1,\"user_settings_version\":-1,\"private_channels_version\":\"0\",\"api_code_version\":0}}}\n )\n )\n ws.recv()\n\n ws.send('{\"op\":4,\"d\":{\"guild_id\":null,\"channel_id\":null,\"self_mute\":true,\"self_deaf\":false,\"self_video\":false}}')\n ws.recv()\n\n finished = False\n index = 0\n scrapped = []\n ids = []\n\n ws.send(dumps({\n \"op\": 14,\n \"d\": {\n \"guild_id\": gId,\n \"typing\": True,\n \"threads\": True,\n \"activities\": True,\n }\n }))\n try:\n while not finished:\n x = []\n if index == 0:\n x = [[0, 99]]\n elif index == 1:\n x = [[0, 99], [100, 199]]\n elif index == 2:\n x = [[0, 99], [100, 199], [200, 299]]\n else:\n x = [[0, 99], [100, 199], [index * 100, (index * 100) + 99]]\n\n ws.send(dumps({\n \"op\":14,\n \"d\":{\n \"guild_id\": gId,\n \"channels\":{\n cId:x,\n }\n }\n }))\n while True:\n resp = loads(ws.recv())\n if resp['t'] == \"GUILD_MEMBER_LIST_UPDATE\":\n break\n if resp['t'] == \"GUILD_MEMBER_LIST_UPDATE\":\n for op in resp['d']['ops']:\n try:\n if len(op['items']) == 0 and op['op'] == \"SYNC\":\n finished = True\n except:\n pass\n\n for op in resp['d']['ops']:\n if op['op'] == \"SYNC\":\n for item in op['items']:\n if not \"group\" in item:\n try:\n if item['member']['user']['discriminator'] == '0':\n scrapped.append(item[\"member\"]['user']['username'] + \"#null\")\n else:\n scrapped.append(item[\"member\"]['user']['username'] + \"#\" + item['member']['user']['discriminator'])\n ids.append(item[\"member\"]['user']['id'])\n except Exception as e:\n print(e)\n print(item)\n\n logger.success(f'Scrapped Member | Index: {index} | Scrapped Amount: {len(scrapped)}')\n Utils.set_title({\n 'Module': 'Member Scrapper',\n 'Index': str(index),\n 'Scrapped': str(len(scrapped))\n }, time())\n \n index += 1\n sleep(Utils.get_config(True)['scrapper']['dellay']/1000)\n except Exception as e:\n logger.error(str(e))\n ws.close()\n logger.success(f\"Finished Scrapping | Scrapped: {len(scrapped)}\")\n logger.debug(f\"Writting id to the users.txt file...\")\n with open('input/users.txt', 'w', encoding=\"latin-1\", errors=\"ignore\") as f:\n for id in scrapped:\n f.write(f'\\n{id}')\n f.close()\n with open('input/ids.txt', 'w', encoding=\"latin-1\", errors=\"ignore\") as f:\n for id in ids:\n f.write(f'\\n{id}')\n f.close()\n logger.success('Finished scrapping jobs, exported ids & users, press enter to get back to main menu !')\n input('')\n Utils.set_title({\n 'Module': 'Main Menu'\n }, time())"
},
{
"identifier": "display_task",
"path": "logics/display_changer.py",
"snippet": "def display_task():\n logger.clear()\n logger.print_banner('Starting display changing job!')\n global sucesss, failed\n global finished, tokens\n\n sucesss, failed = 0, 0\n finished = False\n\n tokens = tokens_file.get_lines(True)\n proxies = proxies_file.get_lines(True)\n names = names_file.get_lines(True)\n\n def title_thread():\n global sucesss, failed\n global finished, tokens\n tokens_len = len(tokens)\n timestamp = time()\n while not finished:\n Utils.set_title({\n 'Module': 'Display Name Changer',\n 'Accepted': sucesss,\n 'Failed': failed,\n 'Total': f'{tokens_len-len(tokens)}/{tokens_len}',\n 'Token Left': len(tokens)\n }, timestamp)\n\n def display_thread(unformatted_token:str, name:str, proxy:str=None):\n global sucesss, failed\n token = Utils.get_token_from_str(unformatted_token)\n while True:\n try:\n discord = Discord(token, proxy)\n break\n except Exception as e:\n logger.error(f'{token[:-10]}********** {e}')\n discord.connect_to_ws()\n accepted = discord.change_at_me({'global_name': name})\n while accepted not in [True, 'locked', 'captcha']:\n logger.error(f'{token[:-10]}********** {accepted}')\n accepted = discord.change_at_me({'global_name': name})\n match accepted:\n case True:\n logger.success(f'{token[:-10]}********** Changed display name')\n case \"captcha\":\n logger.error(f'{token[:-10]}********** Captcha Detected')\n case \"locked\":\n logger.error(f'{token[:-10]}********** Token Locked')\n case _:\n logger.error(f'{token[:-10]}********** {accepted}')\n\n\n \n thread_list = []\n Thread(target=title_thread).start()\n while len(tokens) > 0:\n while len(thread_list) >= config['thread']:\n sleep(0.1)\n for thread in thread_list:\n if not thread.is_alive():\n thread_list.remove(thread)\n \n token = tokens.pop(0)\n thread = Thread(target=display_thread, args=[token, choice(names), choice(proxies)])\n thread.start()\n thread_list.append(thread)\n for thread in thread_list:\n thread.join()\n finished = True\n logger.success('Finished display changing job, press enter to get back to main menu !')\n input('')\n Utils.set_title({\n 'Module': 'Main Menu'\n }, time())"
},
{
"identifier": "avatar_task",
"path": "logics/avatar_changer.py",
"snippet": "def avatar_task():\n logger.clear()\n logger.print_banner('Starting avatar changing job!')\n global sucesss, failed\n global finished, tokens\n\n sucesss, failed = 0, 0\n finished = False\n\n tokens = tokens_file.get_lines(True)\n proxies = proxies_file.get_lines(True)\n\n def title_thread():\n global sucesss, failed\n global finished, tokens\n tokens_len = len(tokens)\n timestamp = time()\n while not finished:\n Utils.set_title({\n 'Module': 'Avatar Changer',\n 'Accepted': sucesss,\n 'Failed': failed,\n 'Total': f'{tokens_len-len(tokens)}/{tokens_len}',\n 'Token Left': len(tokens)\n }, timestamp)\n\n def avatar_thread(unformatted_token:str, avatar:str, proxy:str=None):\n global sucesss, failed\n token = Utils.get_token_from_str(unformatted_token)\n while True:\n try:\n discord = Discord(token, proxy)\n break\n except Exception as e:\n logger.error(f'{token[:-10]}********** {e}')\n discord.connect_to_ws()\n accepted = discord.change_at_me({'avatar': avatar})\n while accepted not in [True, 'locked', 'captcha']:\n logger.error(f'{token[:-10]}********** {accepted}')\n accepted = discord.change_at_me({'avatar': avatar})\n match accepted:\n case True:\n logger.success(f'{token[:-10]}********** Changed avatar')\n case \"captcha\":\n logger.error(f'{token[:-10]}********** Captcha Detected')\n case \"locked\":\n logger.error(f'{token[:-10]}********** Token Locked')\n case _:\n logger.error(f'{token[:-10]}********** {accepted}')\n\n\n \n thread_list = []\n Thread(target=title_thread).start()\n while len(tokens) > 0:\n while len(thread_list) >= config['thread']:\n sleep(0.1)\n for thread in thread_list:\n if not thread.is_alive():\n thread_list.remove(thread)\n \n token = tokens.pop(0)\n thread = Thread(target=avatar_thread, args=[token, choice(avatar_b64), choice(proxies)])\n thread.start()\n thread_list.append(thread)\n for thread in thread_list:\n thread.join()\n finished = True\n logger.success('Finished avatar changing job, press enter to get back to main menu !')\n input('')\n Utils.set_title({\n 'Module': 'Main Menu'\n }, time())"
},
{
"identifier": "checking_task",
"path": "logics/token_checker.py",
"snippet": "def checking_task():\n global tokens, proxies, session, finished\n global valid, unverified, invalid, _valid, _unverified, _invalid\n\n valid, unverified, invalid = [], [], []\n _valid, _unverified, _invalid = 0, 0, 0\n finished = False\n\n config = Utils.get_config(True)\n\n tokens = tokens_file.get_lines(True)\n proxies = proxies_file.get_lines(True)\n\n session = Utils.get_tls_client('')\n\n def title_thread():\n global sucesss, failed\n global finished, tokens, _valid, _unverified, _invalid\n tokens_len = len(tokens)\n timestamp = time()\n while not finished:\n Utils.set_title({\n 'Module': 'Token Checker',\n 'Valid': _valid,\n 'Unverified': _unverified,\n 'Invalid': _invalid,\n 'Total': f'{tokens_len-len(tokens)}/{tokens_len}',\n 'Token Left': len(tokens)\n }, timestamp)\n sleep(0.5)\n\n def writting_thread():\n global valid, unverified, invalid, finished\n while not finished:\n with open('input/checker/invalids.txt', 'a') as f:\n while len(invalid) > 0:\n token = invalid.pop(0)\n f.write(f'{token}\\n')\n \n with open('input/checker/unverified.txt', 'a') as f:\n while len(unverified) > 0:\n token = unverified.pop(0)\n f.write(f'{token}\\n')\n \n with open('input/checker/valids.txt', 'a') as f:\n while len(valid) > 0:\n token = valid.pop(0)\n f.write(f'{token}\\n')\n sleep(0.5)\n \n with open('input/checker/invalids.txt', 'a') as f:\n while len(invalid) > 0:\n token = invalid.pop(0)\n f.write(f'{token}\\n')\n \n with open('input/checker/unverified.txt', 'a') as f:\n while len(unverified) > 0:\n token = unverified.pop(0)\n f.write(f'{token}\\n')\n \n with open('input/checker/valids.txt', 'a') as f:\n while len(valid) > 0:\n token = valid.pop(0)\n f.write(f'{token}\\n')\n\n def checking_thread():\n global tokens, proxies, session\n global valid, unverified, invalid, _valid, _unverified, _invalid\n\n while len(tokens) > 0:\n token = tokens.pop(0)\n _token = Utils.get_token_from_str(token)\n\n try:\n response = session.get(\n 'https://discord.com/api/v9/users/@me/burst-credits',\n headers={\n 'Authorization': _token\n },\n proxy={\n \"http\": f\"http://{choice(proxies)}\",\n \"https\": f\"http://{choice(proxies)}\"\n }\n )\n match response.status_code:\n case 200:\n valid.append(_token); _valid += 1\n logger.success(f'{_token} Valid')\n case 401:\n invalid.append(token); _invalid += 1\n logger.error(f'{_token} Invalid')\n case 403:\n unverified.append(token); _unverified += 1\n logger.error(f'{_token} Locked')\n case 429:\n with open('input/checker/errors.txt', 'a') as f:\n f.write(f'{token}\\n')\n logger.error(f'{_token} Rate limited')\n case _:\n with open('input/checker/errors.txt', 'a') as f:\n f.write(f'{token}\\n')\n logger.error(f'{_token} {response.text} | {response.status_code}')\n except Exception as e:\n logger.error(f'{_token} {e}')\n with open('input/checker/errors.txt', 'a') as f:\n f.write(f'{token}\\n')\n \n thread_list = []\n Thread(target=title_thread).start()\n Thread(target=writting_thread).start()\n while len(tokens) > 0:\n while len(thread_list) >= config['token-checker']['thread']:\n sleep(0.1)\n for thread in thread_list:\n if not thread.is_alive():\n thread_list.remove(thread)\n \n thread = Thread(target=checking_thread)\n thread.start()\n thread_list.append(thread)\n for thread in thread_list:\n thread.join()\n finished = True\n logger.success('Finished checking job, look at input/checker, press enter to get back to main menu !')\n input('')\n Utils.set_title({\n 'Module': 'Main Menu'\n }, time())"
},
{
"identifier": "fast_friending_task",
"path": "logics/fast_friender.py",
"snippet": "def fast_friending_task():\n logger.clear()\n logger.print_banner('Starting friending job!')\n global sucesss, failed, captcha\n global users, finished, tokens\n\n sucesss, failed, captcha = 0, 0, 0\n finished = False\n\n users = users_file.get_lines(True)\n tokens = tokens_file.get_lines(True)\n proxies = proxies_file.get_lines(True)\n\n def title_thread():\n global sucesss, failed, captcha\n global users, finished, tokens\n user_len = len(users)\n timestamp = time()\n while not finished:\n Utils.set_title({\n 'Module': 'Fast Friender',\n 'Sent': sucesss,\n 'Captcha': captcha,\n 'Failed': failed,\n 'Total': f'{user_len-len(users)}/{user_len}',\n 'Token Left': len(tokens)\n }, timestamp)\n\n def friending_thread(unformatted_token:str, proxy:str=None):\n global sucesss, failed, captcha\n global users, added\n added = 0\n\n token = Utils.get_token_from_str(unformatted_token)\n while True:\n try:\n discord = Discord(token, proxy)\n break\n except Exception as e:\n logger.error(f'{token[:-10]}********** {e}')\n discord.connect_to_ws()\n \n _thread_list = []\n for _ in range(config['friender']['number']):\n user = users.pop(0)\n while \"#\" not in user:\n user = users.pop(0)\n\n username, discrim = user.split('#')\n if discrim == \"null\":\n discrim = None\n\n def send(username, discrim):\n global sucesss, failed, captcha\n global users, added\n res = discord.add_relationship(username, discrim)\n match res:\n case True:\n added += 1\n sucesss += 1\n logger.success(f'{token[:-10]}********** Added {user} | {added}')\n case 'captcha':\n captcha += 1\n logger.error(f'{token[:-10]}********** Failed to add {user} | {res} | {added}')\n case _:\n failed += 1\n logger.error(f'{token[:-10]}********** Failed to add {user} | {res} | {added}')\n\n _thread = Thread(target=send, args=[username, discrim])\n _thread.start()\n _thread_list.append(_thread)\n for _thread in _thread_list:\n _thread.join()\n \n logger.info(f'{token[:-10]}********** Sent {added} friends requests | {added}')\n \n thread_list = []\n Thread(target=title_thread).start()\n while len(tokens) > 0 and len(users) > 0:\n while len(thread_list) >= config['thread']:\n sleep(0.1)\n for thread in thread_list:\n if not thread.is_alive():\n thread_list.remove(thread)\n \n token = tokens.pop(0)\n thread = Thread(target=friending_thread, args=[token, choice(proxies)])\n thread.start()\n thread_list.append(thread)\n for thread in thread_list:\n thread.join()\n finished = True\n logger.success('Finished fast friending job, press enter to get back to main menu !')\n input('')\n Utils.set_title({\n 'Module': 'Main Menu'\n }, time())"
},
{
"identifier": "joining_task",
"path": "logics/server_joiner.py",
"snippet": "def joining_task():\n invite = logger.delay_input('Please enter your invite code [discord.gg/**code**]: ')\n\n logger.clear()\n logger.print_banner('Starting joining job!')\n global sucesss, failed, captcha\n global finished, tokens\n\n sucesss, failed, captcha = 0, 0, 0\n finished = False\n\n tokens = tokens_file.get_lines(True)\n proxies = proxies_file.get_lines(True)\n\n def title_thread():\n global sucesss, failed, captcha\n global finished, tokens\n tokens_len = len(tokens)\n timestamp = time()\n while not finished:\n Utils.set_title({\n 'Module': 'Joiner',\n 'Sent': sucesss,\n 'Captcha': captcha,\n 'Failed': failed,\n 'Total': f'{tokens_len-len(tokens)}/{tokens_len}',\n 'Token Left': len(tokens)\n }, timestamp)\n\n def joining_thread(unformatted_token:str, info:list, proxy:str):\n global sucesss, failed, captcha\n global added\n added = 0\n\n token = Utils.get_token_from_str(unformatted_token)\n while True:\n try:\n discord = Discord(token, proxy)\n break\n except Exception as e:\n logger.error(f'{token[:-10]}********** {e}')\n discord.connect_to_ws()\n\n finished = False\n tried = 0\n rqtoken = None\n captcha_key = None\n while not finished:\n response = discord.join_server(\n invite=info[0],\n context=info[1],\n rqtoken=rqtoken,\n captcha_key=captcha_key,\n tries=tried\n )\n match response:\n case True:\n finished = True\n logger.success(f'{token[:-10]}********** Joined {info[0]}') \n case _:\n if 'rate_limited' in response:\n time_to_sleep = int(response.split('rate_limited_')[1])\n logger.info(f'{token[:-10]}********** Sleeping {time_to_sleep} and retrying...')\n sleep(time_to_sleep)\n elif 'captcha_solve' in response:\n _captcha_key = solver.solve(proxy, 'a9b5fb07-92ff-493f-86fe-352a2803b3df', response.split('_')[3])\n if _captcha_key != 'not_solving':\n if _captcha_key == \"error\":\n logger.error(f'{token[:-10]}********** Failed to solve captcha!')\n else:\n rqtoken = response.split('_')[2]\n captcha_key = _captcha_key \n else:\n finished = True\n logger.error(f'{token[:-10]}********** Failed to join {info[0]}: captcha_failed')\n tried += 1\n else:\n finished = True\n logger.error(f'{token[:-10]}********** Failed to join {info[0]}: {response}')\n \n thread_list = []\n Thread(target=title_thread).start()\n\n context = get(f'https://discord.com/api/v9/invites/{invite}?with_counts=true&with_expiration=true')\n if context.status_code == 200:\n guildId = context.json()['guild']['id']\n channelId = context.json()['channel']['id']\n context = b64encode(dumps({\"location\":\"Join Guild\",\"location_guild_id\":guildId,\"location_channel_id\":channelId,\"location_channel_type\":0}, separators=(',', ':')).encode()).decode()\n elif context.status_code == 429:\n logger.error('Rate limited while checking invite, please try using a vpn !')\n \n finished = True\n logger.success('Finished joining job, press enter to get back to main menu !')\n input('')\n Utils.set_title({\n 'Module': 'Main Menu'\n }, time())\n return\n elif context.status_code == 404:\n logger.error(f'Unknown invite')\n\n finished = True\n logger.success('Finished joining job, press enter to get back to main menu !')\n input('')\n Utils.set_title({\n 'Module': 'Main Menu'\n }, time())\n return\n else:\n logger.error(f'Error while checking invite | {context.text}')\n\n finished = True\n logger.success('Finished joining job, press enter to get back to main menu !')\n input('')\n Utils.set_title({\n 'Module': 'Main Menu'\n }, time())\n return\n\n while len(tokens) > 0:\n while len(thread_list) >= config['thread']:\n sleep(0.1)\n for thread in thread_list:\n if not thread.is_alive():\n thread_list.remove(thread)\n \n token = tokens.pop(0)\n thread = Thread(target=joining_thread, args=[token, [invite, context], choice(proxies)])\n thread.start()\n thread_list.append(thread)\n for thread in thread_list:\n thread.join()\n \n finished = True\n logger.success('Finished joining job, press enter to get back to main menu !')\n input('')\n Utils.set_title({\n 'Module': 'Main Menu'\n }, time())"
},
{
"identifier": "tos_task",
"path": "logics/tos_accepter.py",
"snippet": "def tos_task():\n logger.clear()\n logger.print_banner('Starting tos accepting job!')\n global sucesss, failed\n global finished, tokens\n\n sucesss, failed = 0, 0\n finished = False\n\n tokens = tokens_file.get_lines(True)\n proxies = proxies_file.get_lines(True)\n\n def title_thread():\n global sucesss, failed\n global finished, tokens\n tokens_len = len(tokens)\n timestamp = time()\n while not finished:\n Utils.set_title({\n 'Module': 'Tos Accepter',\n 'Accepted': sucesss,\n 'Failed': failed,\n 'Total': f'{tokens_len-len(tokens)}/{tokens_len}',\n 'Token Left': len(tokens)\n }, timestamp)\n\n def tos_thread(unformatted_token:str, proxy:str=None):\n global sucesss, failed\n token = Utils.get_token_from_str(unformatted_token)\n while True:\n try:\n discord = Discord(token, proxy)\n break\n except Exception as e:\n logger.error(f'{token[:-10]}********** {e}')\n discord.connect_to_ws()\n accepted = discord.unflag()\n while accepted != True:\n logger.error(f'{token[:-10]}********** {accepted}')\n accepted = discord.unflag()\n logger.success(f'{token[:-10]}********** Accepted tos')\n \n thread_list = []\n Thread(target=title_thread).start()\n while len(tokens) > 0:\n while len(thread_list) >= config['thread']:\n sleep(0.1)\n for thread in thread_list:\n if not thread.is_alive():\n thread_list.remove(thread)\n \n token = tokens.pop(0)\n thread = Thread(target=tos_thread, args=[token, choice(proxies)])\n thread.start()\n thread_list.append(thread)\n for thread in thread_list:\n thread.join()\n finished = True\n logger.success('Finished tos job, press enter to get back to main menu !')\n input('')\n Utils.set_title({\n 'Module': 'Main Menu'\n }, time())"
},
{
"identifier": "bio_task",
"path": "logics/bio_changer.py",
"snippet": "def bio_task():\n logger.clear()\n logger.print_banner('Starting bio job!')\n global sucesss, failed\n global finished, tokens\n\n sucesss, failed = 0, 0\n finished = False\n\n tokens = tokens_file.get_lines(True)\n proxies = proxies_file.get_lines(True)\n bios = bios_file.get_lines(True)\n\n def title_thread():\n global sucesss, failed\n global finished, tokens\n tokens_len = len(tokens)\n timestamp = time()\n while not finished:\n Utils.set_title({\n 'Module': 'Bio Changer',\n 'Accepted': sucesss,\n 'Failed': failed,\n 'Total': f'{tokens_len-len(tokens)}/{tokens_len}',\n 'Token Left': len(tokens)\n }, timestamp)\n\n def bio_thread(unformatted_token:str, bio:str, proxy:str=None):\n global sucesss, failed\n token = Utils.get_token_from_str(unformatted_token)\n while True:\n try:\n discord = Discord(token, proxy)\n break\n except Exception as e:\n logger.error(f'{token[:-10]}********** {e}')\n discord.connect_to_ws()\n accepted = discord.change_profile({'bio': bio})\n while accepted not in [True, 'locked', 'captcha']:\n logger.error(f'{token[:-10]}********** {accepted}')\n accepted = discord.change_at_me({'bio': bio})\n match accepted:\n case True:\n logger.success(f'{token[:-10]}********** Changed bio')\n case \"captcha\":\n logger.error(f'{token[:-10]}********** Captcha Detected')\n case \"locked\":\n logger.error(f'{token[:-10]}********** Token Locked')\n case _:\n logger.error(f'{token[:-10]}********** {accepted}')\n\n\n \n thread_list = []\n Thread(target=title_thread).start()\n while len(tokens) > 0:\n while len(thread_list) >= config['thread']:\n sleep(0.1)\n for thread in thread_list:\n if not thread.is_alive():\n thread_list.remove(thread)\n \n token = tokens.pop(0)\n thread = Thread(target=bio_thread, args=[token, choice(bios), choice(proxies)])\n thread.start()\n thread_list.append(thread)\n for thread in thread_list:\n thread.join()\n finished = True\n logger.success('Finished bio job, press enter to get back to main menu !')\n input('')\n Utils.set_title({\n 'Module': 'Main Menu'\n }, time())"
},
{
"identifier": "itu_task",
"path": "logics/id_to_user.py",
"snippet": "def itu_task():\n logger.clear()\n logger.print_banner('Starting converting job!')\n global finished, tokens, ids\n\n finished = False\n\n tokens = tokens_file.get_lines(True)\n proxies = proxies_file.get_lines(True)\n ids = ids_file.get_lines(True)\n\n def title_thread():\n global finished, tokens\n ids_len = len(ids)\n timestamp = time()\n while not finished:\n Utils.set_title({\n 'Module': 'Id To User',\n 'Total': f'{ids_len-len(ids)}/{ids_len}'\n }, timestamp)\n\n def itu_thread(unformatted_token:str, proxy:str=None):\n global ids\n token = Utils.get_token_from_str(unformatted_token)\n while True:\n try:\n discord = Discord(token, proxy)\n break\n except Exception as e:\n logger.error(f'{token[:-10]}********** {e}')\n discord.connect_to_ws()\n while len(ids) > 0:\n id = ids.pop(0)\n try:\n username = discord.get_user_from_id(id)\n logger.success(f'{token[:-10]}********** {username}')\n with open('input/users.txt', 'a') as f:\n f.write(f'{username}\\n')\n except Exception as e:\n logger.error(f'{token[:-10]}********** {e}')\n ids.append(id)\n \n thread_list = []\n Thread(target=title_thread).start()\n while len(tokens) > 0:\n while len(thread_list) >= config['thread']:\n sleep(0.1)\n for thread in thread_list:\n if not thread.is_alive():\n thread_list.remove(thread)\n \n token = tokens.pop(0)\n thread = Thread(target=itu_thread, args=[token, choice(proxies)])\n thread.start()\n thread_list.append(thread)\n for thread in thread_list:\n thread.join()\n finished = True\n logger.success('Finished converting, press enter to get back to main menu !')\n input('')\n Utils.set_title({\n 'Module': 'Main Menu'\n }, time())"
},
{
"identifier": "friending_task",
"path": "logics/friender.py",
"snippet": "def friending_task():\n logger.clear()\n logger.print_banner('Starting friending job!')\n global sucesss, failed, captcha\n global users, finished, tokens\n\n sucesss, failed, captcha = 0, 0, 0\n finished = False\n\n users = users_file.get_lines(True)\n tokens = tokens_file.get_lines(True)\n proxies = proxies_file.get_lines(True)\n\n def title_thread():\n global sucesss, failed, captcha\n global users, finished, tokens\n user_len = len(users)\n timestamp = time()\n while not finished:\n Utils.set_title({\n 'Module': 'Friender',\n 'Sent': sucesss,\n 'Captcha': captcha,\n 'Failed': failed,\n 'Total': f'{user_len-len(users)}/{user_len}',\n 'Token Left': len(tokens)\n }, timestamp)\n\n def friending_thread(unformatted_token:str, proxy:str=None):\n global sucesss, failed, captcha\n global users\n token = Utils.get_token_from_str(unformatted_token)\n while True:\n try:\n discord = Discord(token, proxy)\n break\n except Exception as e:\n logger.error(f'{token[:-10]}********** {e}')\n discord.connect_to_ws()\n added, retry = 0, 0\n\n for _ in range(config['friender']['number']):\n user = users.pop(0)\n while \"#\" not in user:\n user = users.pop(0)\n\n username, discrim = user.split('#')\n if discrim == \"null\":\n discrim = None\n\n res = discord.add_relationship(username, discrim)\n match res:\n case True:\n added += 1\n sucesss += 1\n logger.success(f'{token[:-10]}********** Added {user} | {added}')\n case 'captcha':\n captcha += 1\n logger.error(f'{token[:-10]}********** Failed to add {user} | {res} | {added}')\n if retry > config['friender']['retry-on-cap']:\n break\n if config['friender']['retry-on-cap'] != 0:\n retry += 1\n case _:\n failed += 1\n logger.error(f'{token[:-10]}********** Failed to add {user} | {res} | {added}')\n\n logger.info(f'{token[:-10]}********** Sent {added} friends requests | {added}')\n\n thread_list = []\n Thread(target=title_thread).start()\n while len(tokens) > 0 and len(users) > 0:\n while len(thread_list) >= config['thread']:\n sleep(0.1)\n for thread in thread_list:\n if not thread.is_alive():\n thread_list.remove(thread)\n \n token = tokens.pop(0)\n thread = Thread(target=friending_thread, args=[token, choice(proxies)])\n thread.start()\n thread_list.append(thread)\n for thread in thread_list:\n thread.join()\n finished = True\n logger.success('Finished friending job, press enter to get back to main menu !')\n input('')\n Utils.set_title({\n 'Module': 'Main Menu'\n }, time())"
},
{
"identifier": "dming_task",
"path": "logics/mass_dm.py",
"snippet": "def dming_task():\n logger.clear()\n logger.print_banner('Starting friending job!')\n global ids, unlocked, locked, sent, failed, captcha, solver, finished\n global tokens, used\n\n tokens = tokens_file.get_lines(True)\n proxies = proxies_file.get_lines(True)\n ids = ids_file.get_lines(True)\n\n config = Utils.get_config(True)\n\n solver = Solver(\n config['captcha']['key'],\n config['captcha']['service'],\n )\n\n unlocked, locked, sent, failed, captcha, used = len(tokens), 0, 0, 0, 0, 0\n finished = False\n\n config = Utils.get_config()\n\n def title_thread():\n global sent, failed, captcha, ids, used\n global finished, tokens, unlocked, locked\n ids_len = len(ids)\n timestamp = time()\n while not finished:\n if used != 0:\n average = f'{sent/used}'\n else:\n average = \"None\"\n Utils.set_title({\n 'Module': 'MassDm',\n 'Sent': sent,\n 'Captcha': captcha,\n 'Failed': failed,\n\n 'Unlocked': unlocked,\n 'Locked': locked,\n\n 'Average': average,\n 'Total': f'{ids_len-len(ids)}/{ids_len}',\n 'Token Left': len(ids)\n }, timestamp)\n\n def dming_thread(unformatted_token:str, proxy:str):\n global used, sent, locked, unlocked, ids, failed, captcha\n _finished = False\n _sent = 0\n\n token = Utils.get_token_from_str(unformatted_token)\n while True:\n try:\n discord = Discord(token, proxy)\n break\n except Exception as e:\n logger.error(f'{token[:-10]}********** {e}')\n discord.connect_to_ws()\n used += 1\n \n while len(ids) > 0 and not _finished:\n id = ''\n while id == '':\n id = ids.pop(0)\n\n channel_id = discord.open_channel(id)\n match channel_id:\n case \"locked\":\n logger.error(f'{token[:-10]}********** Locked, stopping thread...')\n ids.append(id)\n _finished = True\n case \"sleep\":\n logger.info(f'{token[:-10]}********** Rate limited, sleeping {config[\"mass-dm\"][\"rate-limit-time\"]}')\n ids.append(id)\n sleep(config[\"mass-dm\"][\"rate-limit-time\"])\n case _:\n if channel_id[0] == False:\n logger.error(f'{token[:-10]}********** Failed to open channel ({id}): {channel_id[1]}')\n ids.append(id)\n else:\n captcha_dict = None\n success = False\n while not success:\n message_response = discord.send_message(\n config['mass-dm']['content'],\n channel_id[1],\n captcha_dict\n )\n match message_response:\n case True:\n logger.success(f'{token[:-10]}********** Successfully sent message to {id} | {_sent}')\n _sent += 1\n sent += 1\n success = True\n sleep(config['mass-dm']['sleep-time'])\n case \"locked\":\n logger.error(f'{token[:-10]}********** Locked, stopping thread...')\n ids.append(id)\n _finished = True\n locked += 1\n unlocked -= 1\n success = True\n case \"sleep\":\n logger.info(f'{token[:-10]}********** Rate limited, sleeping {config[\"mass-dm\"][\"rate-limit-time\"]}')\n sleep(config[\"mass-dm\"][\"rate-limit-time\"])\n case _:\n if 'captcha' in message_response:\n captcha += 1\n _captcha_key = solver.solve(proxy, 'e2f713c5-b5ce-41d0-b65f-29823df542cf', message_response.split('_')[3])\n if _captcha_key != 'not_solving':\n if _captcha_key == \"error\":\n logger.error(f'{token[:-10]}********** Failed to solve captcha!')\n else:\n captcha_dict = {\n \"X-Captcha-Rqtoken\": message_response.split('_')[2],\n \"X-Captcha-Key\": _captcha_key\n }\n logger.info(f'{token[:-10]}********** Solved captcha!')\n else:\n logger.error(f'{token[:-10]}********** Unknown error: {message_response}')\n failed += 1\n success = True\n \n thread_list = []\n Thread(target=title_thread).start()\n\n while len(tokens) > 0:\n while len(thread_list) >= config['thread']:\n sleep(0.1)\n for thread in thread_list:\n if not thread.is_alive():\n thread_list.remove(thread)\n \n token = tokens.pop(0)\n thread = Thread(target=dming_thread, args=[token, choice(proxies)])\n thread.start()\n thread_list.append(thread)\n for thread in thread_list:\n thread.join()\n \n finished = True\n logger.success('Finished joining job, press enter to get back to main menu !')\n input('')\n Utils.set_title({\n 'Module': 'Main Menu'\n }, time())"
}
] | from logics.member_scrapper import scrapping_task
from logics.display_changer import display_task
from logics.avatar_changer import avatar_task
from logics.token_checker import checking_task
from logics.fast_friender import fast_friending_task
from logics.server_joiner import joining_task
from logics.tos_accepter import tos_task
from logics.bio_changer import bio_task
from logics.id_to_user import itu_task
from logics.friender import friending_task
from logics.mass_dm import dming_task | 9,502 |
def soon():
print("Soon...")
features = {
'Friender': friending_task,
'Fast Friender': fast_friending_task,
'Joiner': joining_task,
'Mass Dm': dming_task,
'Member Scapper': scrapping_task,
|
def soon():
print("Soon...")
features = {
'Friender': friending_task,
'Fast Friender': fast_friending_task,
'Joiner': joining_task,
'Mass Dm': dming_task,
'Member Scapper': scrapping_task,
| 'Tos Accepter': tos_task, | 6 | 2023-11-19 10:02:14+00:00 | 12k |
ymp5078/AI-SAM | segment_anything/automatic_mask_generator.py | [
{
"identifier": "Sam",
"path": "segment_anything/modeling/sam.py",
"snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\n \"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False\n )\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack(\n [self.preprocess(x[\"image\"]) for x in batched_input], dim=0\n )\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(\n masks, original_size, mode=\"bilinear\", align_corners=False\n )\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x"
},
{
"identifier": "SamPredictor",
"path": "segment_anything/predictor.py",
"snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[\n None, :, :, :\n ]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) before mask prediction.\"\n )\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(\n point_coords, dtype=torch.float, device=self.device\n )\n labels_torch = torch.as_tensor(\n point_labels, dtype=torch.int, device=self.device\n )\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(\n mask_input, dtype=torch.float, device=self.device\n )\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) before mask prediction.\"\n )\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(\n low_res_masks, self.input_size, self.original_size\n )\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert (\n self.features is not None\n ), \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None"
},
{
"identifier": "MaskData",
"path": "segment_anything/utils/amg.py",
"snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()"
},
{
"identifier": "area_from_rle",
"path": "segment_anything/utils/amg.py",
"snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])"
},
{
"identifier": "batch_iterator",
"path": "segment_anything/utils/amg.py",
"snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]"
},
{
"identifier": "batched_mask_to_box",
"path": "segment_anything/utils/amg.py",
"snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out"
},
{
"identifier": "box_xyxy_to_xywh",
"path": "segment_anything/utils/amg.py",
"snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh"
},
{
"identifier": "build_all_layer_point_grids",
"path": "segment_anything/utils/amg.py",
"snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer"
},
{
"identifier": "calculate_stability_score",
"path": "segment_anything/utils/amg.py",
"snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions"
},
{
"identifier": "coco_encode_rle",
"path": "segment_anything/utils/amg.py",
"snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle"
},
{
"identifier": "generate_crop_boxes",
"path": "segment_anything/utils/amg.py",
"snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs"
},
{
"identifier": "is_box_near_crop_edge",
"path": "segment_anything/utils/amg.py",
"snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)"
},
{
"identifier": "mask_to_rle_pytorch",
"path": "segment_anything/utils/amg.py",
"snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out"
},
{
"identifier": "remove_small_regions",
"path": "segment_anything/utils/amg.py",
"snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True"
},
{
"identifier": "rle_to_mask",
"path": "segment_anything/utils/amg.py",
"snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order"
},
{
"identifier": "uncrop_boxes_xyxy",
"path": "segment_anything/utils/amg.py",
"snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset"
},
{
"identifier": "uncrop_masks",
"path": "segment_anything/utils/amg.py",
"snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)"
},
{
"identifier": "uncrop_points",
"path": "segment_anything/utils/amg.py",
"snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset"
}
] | import numpy as np
import torch
import cv2 # type: ignore # noqa: F401
from torchvision.ops.boxes import batched_nms, box_area # type: ignore
from typing import Any, Dict, List, Optional, Tuple
from .modeling import Sam
from .predictor import SamPredictor
from .utils.amg import (
MaskData,
area_from_rle,
batch_iterator,
batched_mask_to_box,
box_xyxy_to_xywh,
build_all_layer_point_grids,
calculate_stability_score,
coco_encode_rle,
generate_crop_boxes,
is_box_near_crop_edge,
mask_to_rle_pytorch,
remove_small_regions,
rle_to_mask,
uncrop_boxes_xyxy,
uncrop_masks,
uncrop_points,
)
from pycocotools import mask as mask_utils # type: ignore # noqa: F401 | 9,871 | Using a SAM model, generates masks for the entire image.
Generates a grid of point prompts over the image, then filters
low quality and duplicate masks. The default settings are chosen
for SAM with a ViT-H backbone.
Arguments:
model (Sam): The SAM model to use for mask prediction.
points_per_side (int or None): The number of points to be sampled
along one side of the image. The total number of points is
points_per_side**2. If None, 'point_grids' must provide explicit
point sampling.
points_per_batch (int): Sets the number of points run simultaneously
by the model. Higher numbers may be faster but use more GPU memory.
pred_iou_thresh (float): A filtering threshold in [0,1], using the
model's predicted mask quality.
stability_score_thresh (float): A filtering threshold in [0,1], using
the stability of the mask under changes to the cutoff used to binarize
the model's mask predictions.
stability_score_offset (float): The amount to shift the cutoff when
calculated the stability score.
box_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks.
crop_n_layers (int): If >0, mask prediction will be run again on
crops of the image. Sets the number of layers to run, where each
layer has 2**i_layer number of image crops.
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks between different crops.
crop_overlap_ratio (float): Sets the degree to which crops overlap.
In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
crop_n_points_downscale_factor (int): The number of points-per-side
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
point_grids (list(np.ndarray) or None): A list over explicit grids
of points used for sampling, normalized to [0,1]. The nth grid in the
list is used in the nth crop layer. Exclusive with points_per_side.
min_mask_region_area (int): If >0, postprocessing will be applied
to remove disconnected regions and holes in masks with area smaller
than min_mask_region_area. Requires opencv.
output_mode (str): The form masks are returned in. Can be 'binary_mask',
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
For large resolutions, 'binary_mask' may consume large amounts of
memory.
"""
assert (points_per_side is None) != (
point_grids is None
), "Exactly one of points_per_side or point_grid must be provided."
if points_per_side is not None:
self.point_grids = build_all_layer_point_grids(
points_per_side,
crop_n_layers,
crop_n_points_downscale_factor,
)
elif point_grids is not None:
self.point_grids = point_grids
else:
raise ValueError("Can't have both points_per_side and point_grid be None.")
assert output_mode in [
"binary_mask",
"uncompressed_rle",
"coco_rle",
], f"Unknown output_mode {output_mode}."
if output_mode == "coco_rle":
if min_mask_region_area > 0:
self.predictor = SamPredictor(model)
self.points_per_batch = points_per_batch
self.pred_iou_thresh = pred_iou_thresh
self.stability_score_thresh = stability_score_thresh
self.stability_score_offset = stability_score_offset
self.box_nms_thresh = box_nms_thresh
self.crop_n_layers = crop_n_layers
self.crop_nms_thresh = crop_nms_thresh
self.crop_overlap_ratio = crop_overlap_ratio
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
self.min_mask_region_area = min_mask_region_area
self.output_mode = output_mode
@torch.no_grad()
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
"""
Generates masks for the given image.
Arguments:
image (np.ndarray): The image to generate masks for, in HWC uint8 format.
Returns:
list(dict(str, any)): A list over records for masks. Each record is
a dict containing the following keys:
segmentation (dict(str, any) or np.ndarray): The mask. If
output_mode='binary_mask', is an array of shape HW. Otherwise,
is a dictionary containing the RLE.
bbox (list(float)): The box around the mask, in XYWH format.
area (int): The area in pixels of the mask.
predicted_iou (float): The model's own prediction of the mask's
quality. This is filtered by the pred_iou_thresh parameter.
point_coords (list(list(float))): The point coordinates input
to the model to generate this mask.
stability_score (float): A measure of the mask's quality. This
is filtered on using the stability_score_thresh parameter.
crop_box (list(float)): The crop of the image used to generate
the mask, given in XYWH format.
"""
# Generate masks
mask_data = self._generate_masks(image)
# Filter small disconnected regions and holes in masks
if self.min_mask_region_area > 0:
mask_data = self.postprocess_small_regions(
mask_data,
self.min_mask_region_area,
max(self.box_nms_thresh, self.crop_nms_thresh),
)
# Encode masks
if self.output_mode == "coco_rle":
mask_data["segmentations"] = [
| # -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class SamAutomaticMaskGenerator:
def __init__(
self,
model: Sam,
points_per_side: Optional[int] = 32,
points_per_batch: int = 64,
pred_iou_thresh: float = 0.88,
stability_score_thresh: float = 0.95,
stability_score_offset: float = 1.0,
box_nms_thresh: float = 0.7,
crop_n_layers: int = 0,
crop_nms_thresh: float = 0.7,
crop_overlap_ratio: float = 512 / 1500,
crop_n_points_downscale_factor: int = 1,
point_grids: Optional[List[np.ndarray]] = None,
min_mask_region_area: int = 0,
output_mode: str = "binary_mask",
) -> None:
"""
Using a SAM model, generates masks for the entire image.
Generates a grid of point prompts over the image, then filters
low quality and duplicate masks. The default settings are chosen
for SAM with a ViT-H backbone.
Arguments:
model (Sam): The SAM model to use for mask prediction.
points_per_side (int or None): The number of points to be sampled
along one side of the image. The total number of points is
points_per_side**2. If None, 'point_grids' must provide explicit
point sampling.
points_per_batch (int): Sets the number of points run simultaneously
by the model. Higher numbers may be faster but use more GPU memory.
pred_iou_thresh (float): A filtering threshold in [0,1], using the
model's predicted mask quality.
stability_score_thresh (float): A filtering threshold in [0,1], using
the stability of the mask under changes to the cutoff used to binarize
the model's mask predictions.
stability_score_offset (float): The amount to shift the cutoff when
calculated the stability score.
box_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks.
crop_n_layers (int): If >0, mask prediction will be run again on
crops of the image. Sets the number of layers to run, where each
layer has 2**i_layer number of image crops.
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks between different crops.
crop_overlap_ratio (float): Sets the degree to which crops overlap.
In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
crop_n_points_downscale_factor (int): The number of points-per-side
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
point_grids (list(np.ndarray) or None): A list over explicit grids
of points used for sampling, normalized to [0,1]. The nth grid in the
list is used in the nth crop layer. Exclusive with points_per_side.
min_mask_region_area (int): If >0, postprocessing will be applied
to remove disconnected regions and holes in masks with area smaller
than min_mask_region_area. Requires opencv.
output_mode (str): The form masks are returned in. Can be 'binary_mask',
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
For large resolutions, 'binary_mask' may consume large amounts of
memory.
"""
assert (points_per_side is None) != (
point_grids is None
), "Exactly one of points_per_side or point_grid must be provided."
if points_per_side is not None:
self.point_grids = build_all_layer_point_grids(
points_per_side,
crop_n_layers,
crop_n_points_downscale_factor,
)
elif point_grids is not None:
self.point_grids = point_grids
else:
raise ValueError("Can't have both points_per_side and point_grid be None.")
assert output_mode in [
"binary_mask",
"uncompressed_rle",
"coco_rle",
], f"Unknown output_mode {output_mode}."
if output_mode == "coco_rle":
if min_mask_region_area > 0:
self.predictor = SamPredictor(model)
self.points_per_batch = points_per_batch
self.pred_iou_thresh = pred_iou_thresh
self.stability_score_thresh = stability_score_thresh
self.stability_score_offset = stability_score_offset
self.box_nms_thresh = box_nms_thresh
self.crop_n_layers = crop_n_layers
self.crop_nms_thresh = crop_nms_thresh
self.crop_overlap_ratio = crop_overlap_ratio
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
self.min_mask_region_area = min_mask_region_area
self.output_mode = output_mode
@torch.no_grad()
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
"""
Generates masks for the given image.
Arguments:
image (np.ndarray): The image to generate masks for, in HWC uint8 format.
Returns:
list(dict(str, any)): A list over records for masks. Each record is
a dict containing the following keys:
segmentation (dict(str, any) or np.ndarray): The mask. If
output_mode='binary_mask', is an array of shape HW. Otherwise,
is a dictionary containing the RLE.
bbox (list(float)): The box around the mask, in XYWH format.
area (int): The area in pixels of the mask.
predicted_iou (float): The model's own prediction of the mask's
quality. This is filtered by the pred_iou_thresh parameter.
point_coords (list(list(float))): The point coordinates input
to the model to generate this mask.
stability_score (float): A measure of the mask's quality. This
is filtered on using the stability_score_thresh parameter.
crop_box (list(float)): The crop of the image used to generate
the mask, given in XYWH format.
"""
# Generate masks
mask_data = self._generate_masks(image)
# Filter small disconnected regions and holes in masks
if self.min_mask_region_area > 0:
mask_data = self.postprocess_small_regions(
mask_data,
self.min_mask_region_area,
max(self.box_nms_thresh, self.crop_nms_thresh),
)
# Encode masks
if self.output_mode == "coco_rle":
mask_data["segmentations"] = [ | coco_encode_rle(rle) for rle in mask_data["rles"] | 9 | 2023-11-26 23:42:53+00:00 | 12k |
sophiaalthammer/alforrankers | matchmaker/train.py | [
{
"identifier": "DynamicTeacher",
"path": "matchmaker/distillation/dynamic_teacher.py",
"snippet": "class DynamicTeacher():\n '''\n Wraps a trained model checkpoint and the training batch queue to score (inference only) samples from the batch\n '''\n\n def __init__(self,\n config: Dict[str,Any],\n dataloader:DataLoader,\n logger):\n\n super().__init__()\n self.config = config\n self.dynamic_teacher_path = config[\"dynamic_teacher_path\"]\n self.dynamic_teacher_in_batch_scoring = config[\"dynamic_teacher_in_batch_scoring\"]\n self.dynamic_teacher_per_term_scores = config.get(\"dynamic_teacher_per_term_scores\",False)\n\n self.wrapped_dataloader = dataloader\n\n self.cuda_device = torch.cuda.device_count() - 1 # [torch.cuda.device_count() - 2,torch.cuda.device_count() - 1] # take the last gpu \n self.logger = logger\n\n def __iter__(self) -> Iterator[TensorDict]:\n\n ctx = mp.get_context(\"spawn\") # need spawn here, otherwise CUDA fails \n\n queue: mp.JoinableQueue = ctx.JoinableQueue(1000)\n worker = ctx.Process(\n target=self.dynamic_teacher_subprocess, args=(queue,), #daemon=True\n )\n worker.start()\n\n try:\n #i=0\n for batch, worker_error in iter(queue.get, (None, None)):\n #i+=1\n #if i % 100 == 0:\n # print(\"teacher-queued:\",queue.qsize())\n if worker_error is not None:\n e, tb = worker_error\n raise WorkerError(e, tb)\n\n yield batch\n queue.task_done()\n finally:\n if hasattr(queue, \"close\"): # for compat with different Python versions.\n queue.close() # type: ignore[attr-defined]\n if worker.is_alive():\n worker.terminate()\n\n\n def dynamic_teacher_subprocess(self, queue):\n \n try:\n console = Console()\n\n console.log(\"[DynamicTeacher] Load teacher model from: \" + self.dynamic_teacher_path)\n\n #\n # load model\n #\n model_config = get_config_single(self.dynamic_teacher_path)\n word_embedder, padding_idx = get_word_embedder(model_config)\n model, encoder_type = get_model(model_config,word_embedder,padding_idx)\n model = build_model(model,encoder_type,word_embedder,model_config)\n model.is_teacher_model = True\n if model_config.get(\"model_checkpoint_from_huggingface\",False):\n model_path = cached_path(hf_bucket_url(self.dynamic_teacher_path, WEIGHTS_NAME))\n else:\n model_path = os.path.join(self.dynamic_teacher_path, \"best-model.pytorch-state-dict\")\n load_result = model.load_state_dict(torch.load(model_path,map_location=\"cpu\"),strict=False)\n self.logger.info('[DynamicTeacher] Warmstart init model from: %s', model_path)\n self.logger.info(load_result)\n console.log(\"[DynamicTeacher] Warmstart Result:\",load_result)\n model = model.eval()\n\n use_multi = False\n if type(self.cuda_device) == int:\n model = model.cuda(self.cuda_device)\n\n #\n # multi gpu \n #\n else:\n use_multi = True\n model = model.cuda(self.cuda_device[0])\n replicas = data_parallel_prepare(model,self.cuda_device)\n\n use_fp16 = model_config[\"use_fp16\"]\n concated_sequences = False\n if model_config[\"token_embedder_type\"] == \"bert_cat\":\n concated_sequences = True\n #use_title_body_sep = model_config[\"use_title_body_sep\"]\n #train_sparsity = model_config[\"minimize_sparsity\"]\n #train_qa_spans = model_config[\"train_qa_spans\"]\n\n #\n # connect to pipeline\n #\n console.log(\"[DynamicTeacher] Run Teacher Inference ...\")\n\n with torch.no_grad(), torch.cuda.amp.autocast(enabled=use_fp16):\n for orig_batch in self.wrapped_dataloader:\n\n if use_multi:\n batch = move_to_device(copy.deepcopy(orig_batch), self.cuda_device[0])\n batch_neg = move_to_device(copy.deepcopy(orig_batch), self.cuda_device[1])\n else:\n batch = move_to_device(copy.deepcopy(orig_batch), self.cuda_device)\n batch_neg = batch\n\n pos_in = []\n neg_in = []\n if concated_sequences:\n pos_in.append(batch[\"doc_pos_tokens\"]) \n neg_in.append(batch_neg[\"doc_neg_tokens\"])\n else:\n pos_in += [batch[\"query_tokens\"],batch[\"doc_pos_tokens\"]]\n neg_in += [batch_neg[\"query_tokens\"],batch_neg[\"doc_neg_tokens\"]]\n\n #if use_title_body_sep:\n # pos_in.append(batch[\"title_pos_tokens\"])\n # neg_in.append(batch_neg[\"title_neg_tokens\"])\n\n #if train_qa_spans: # add start positions for qa training (used to anchor end logits on the start ground truth)\n # pos_in.append(batch[\"pos_qa_start\"])\n\n #\n # run model forward\n #\n if use_multi:\n output_pos, output_neg = parallel_apply(replicas, [pos_in,neg_in], [{\"use_fp16\": use_fp16},{\"use_fp16\": use_fp16}], self.cuda_device)\n #output_neg = data_parallel_forward(replicas, *neg_in, device_ids=cuda_device, use_fp16 = use_fp16)\n output_pos, query_vecs_pos, doc_vecs_pos = output_pos\n output_neg, query_vecs_neg, doc_vecs_neg = output_neg\n # colbert model\n ib_output_pos = model.forward_inbatch_aggregation(query_vecs_pos,batch[\"query_tokens\"][\"attention_mask\"], doc_vecs_pos, batch[\"doc_pos_tokens\"][\"attention_mask\"])\n ib_output_neg = model.forward_inbatch_aggregation(query_vecs_neg,batch[\"query_tokens\"][\"attention_mask\"], doc_vecs_neg, batch[\"doc_neg_tokens\"][\"attention_mask\"])\n orig_batch[\"dyn_teacher_scores_pos\"] = ib_output_pos.cpu()\n orig_batch[\"dyn_teacher_scores_neg\"] = ib_output_neg.cpu()\n\n else:\n output_pos = model.forward(*pos_in, use_fp16 = use_fp16)\n output_neg = model.forward(*neg_in, use_fp16 = use_fp16)\n\n #if train_qa_spans:\n # output,answerability,qa_logits_start,qa_logits_end = output \n #answerability = answerability.cpu().float()\n #qa_logits_start = qa_logits_start.cpu().float()\n #qa_logits_end = qa_logits_end.cpu().float()\n\n #if train_sparsity:\n # output, cache_parts_out, sparsity_vec,sparsity_stats = output\n if self.dynamic_teacher_per_term_scores:\n (output_pos, per_term_scores_pos) = output_pos\n (output_neg, per_term_scores_neg) = output_neg\n\n orig_batch[\"dyn_teacher_per_term_scores_pos\"] = per_term_scores_pos.cpu()\n orig_batch[\"dyn_teacher_per_term_scores_neg\"] = per_term_scores_neg.cpu()\n\n if self.dynamic_teacher_in_batch_scoring:\n\n output_pos, query_vecs_pos, doc_vecs_pos = output_pos\n output_neg, query_vecs_neg, doc_vecs_neg = output_neg\n\n # colbert model\n ib_output_pos = model.forward_inbatch_aggregation(query_vecs_pos,batch[\"query_tokens\"][\"attention_mask\"], doc_vecs_pos, batch[\"doc_pos_tokens\"][\"attention_mask\"])\n ib_output_neg = model.forward_inbatch_aggregation(query_vecs_neg,batch[\"query_tokens\"][\"attention_mask\"], doc_vecs_neg, batch[\"doc_neg_tokens\"][\"attention_mask\"])\n\n orig_batch[\"dyn_teacher_scores_pos\"] = ib_output_pos.cpu()\n orig_batch[\"dyn_teacher_scores_neg\"] = ib_output_neg.cpu()\n\n #else:\n orig_batch[\"dyn_teacher_pair_scores_pos\"] = output_pos.cpu()\n orig_batch[\"dyn_teacher_pair_scores_neg\"] = output_neg.cpu()\n\n queue.put((orig_batch,None)) # this moves the tensors in to shared memory\n\n except Exception as e:\n queue.put((None, (repr(e), traceback.format_exc())))\n \n queue.put((None, None))\n # Wait until this process can safely exit.\n queue.join()"
},
{
"identifier": "RunningAverage",
"path": "matchmaker/utils/running_average.py",
"snippet": "class RunningAverage():\n \"\"\"\n module to hold a running average list as tensor on a gpu \n \"\"\"\n\n def __init__(self, size):\n super().__init__()\n self.entries = torch.zeros(size).cuda()\n self.current_entry_idx = 0\n\n def add_entry(self, value):\n if self.current_entry_idx == self.entries.shape[0]:\n self.current_entry_idx = 0\n \n self.entries[self.current_entry_idx] = value.detach()\n self.current_entry_idx += 1\n\n def get_average(self):\n return self.entries.mean()"
},
{
"identifier": "get_model",
"path": "matchmaker/models/all.py",
"snippet": "def get_model(config,word_embedder,padding_idx):\n encoder_type = NeuralIR_Encoder\n\n model_conf = config[\"model\"]\n\n wrap_max_p = False\n if model_conf.startswith(\"maxP->\"):\n wrap_max_p = True\n model_conf=model_conf.replace(\"maxP->\",\"\")\n\n wrap_mean_p = False\n if model_conf.startswith(\"meanP->\"):\n wrap_mean_p = True\n model_conf=model_conf.replace(\"meanP->\",\"\")\n\n #\n # pour published models\n #\n if model_conf == \"TK\": model = ECAI20_TK.from_config(config,word_embedder.get_output_dim())\n elif model_conf == \"TKL\": model = TKL_sigir20.from_config(config,word_embedder.get_output_dim())\n elif model_conf == \"TK_Sparse\": model = CIKM20_TK_Sparse.from_config(config,word_embedder.get_output_dim())\n elif model_conf == \"Bert_patch\" or model_conf == \"IDCM\":\n model = IDCM.from_config(config,padding_idx=padding_idx)\n encoder_type = None\n\n #\n # baselines with text only\n #\n elif model_conf == \"knrm\": model = KNRM.from_config(config,word_embedder.get_output_dim())\n elif model_conf == \"conv_knrm\": model = Conv_KNRM.from_config(config,word_embedder.get_output_dim())\n elif model_conf == \"match_pyramid\": model = MatchPyramid.from_config(config,word_embedder.get_output_dim())\n elif model_conf == \"drmm\": model = DRMM(word_embedder,10)\n\n #\n # baseline models with idf use\n #\n elif model_conf == \"pacrr\":\n model = PACRR.from_config(config,word_embedder.get_output_dim())\n encoder_type = NeuralIR_Encoder_WithIdfs\n elif model_conf == \"co_pacrr\":\n model = CO_PACRR.from_config(config,word_embedder.get_output_dim())\n encoder_type = NeuralIR_Encoder_WithIdfs\n elif model_conf == \"duet\":\n model = Duet.from_config(config,word_embedder.get_output_dim())\n encoder_type = NeuralIR_Encoder_WithIdfs\n\n #\n # bert models\n #\n else:\n encoder_type = None\n \n if model_conf == \"bert_cls\" or model_conf == \"bert_cat\": model = BERT_Cat.from_config(config)\n elif model_conf == \"bert_tower\" or model_conf == \"bert_dot\": model = BERT_Dot.from_config(config)\n #elif model_conf == \"QA_Bert_cat\": model = QA_Bert_cat(bert_model = config[\"bert_pretrained_model\"],trainable=config[\"bert_trainable\"])\n elif model_conf == \"bert_dot_qa\":\n model = Bert_dot_qa(rcr_main_compress_dim=config[\"rcr_main_compress_dim\"],rcr_residual_compress_dim=config[\"rcr_residual_compress_dim\"],\n bert_model = config[\"bert_pretrained_model\"],return_vecs=config.get(\"in_batch_negatives\",False) or config.get(\"dynamic_teacher_per_term_scores\",False),trainable=config[\"bert_trainable\"])\n\n elif model_conf == \"bert_dot_dualencoder\":\n model = Bert_dot_dualencoder(bert_model_document= config[\"bert_pretrained_model\"],bert_model_query=config[\"bert_pretrained_model_secondary\"],return_vecs=config[\"in_batch_negatives\"],trainable=config[\"bert_trainable\"])\n\n elif model_conf == \"ColBERT\":\n model = ColBERT.from_config(config)\n elif model_conf == \"ColBERT_v2\":\n model = ColBERT_v2.from_config(config)\n elif model_conf == \"ColBERTer\":\n model = ColBERTer.from_config(config)\n elif model_conf == \"CoColBERTer\":\n model = CoColBERTer.from_config(config)\n elif model_conf == \"CoCoColBERTer\":\n model = CoCoColBERTer.from_config(config)\n\n elif model_conf == \"PreTTR\" or model_conf == \"Bert_Split\":\n model = PreTTR.from_pretrained(config[\"bert_pretrained_model\"])\n\n elif model_conf == \"Parade\":\n model = Parade.from_config(config,padding_idx=padding_idx)\n\n else:\n print(\"Model %s not known\",config[\"model\"])\n exit(1)\n\n if wrap_max_p or wrap_mean_p:\n if \"inner_model_path\" in config:\n load_result = model.load_state_dict(torch.load(config[\"inner_model_path\"],map_location=\"cpu\"),strict=False)\n logger.info('Warmstart inner model from: %s', config[\"inner_model_path\"])\n logger.info(load_result)\n print(\"Inner-Warmstart Result:\",load_result)\n if wrap_max_p:\n model = MaxPAdapter.from_config(config,inner_model=model,padding_idx=padding_idx)\n if wrap_mean_p:\n model = MeanPAdapter.from_config(config,inner_model=model,padding_idx=padding_idx)\n\n return model, encoder_type"
},
{
"identifier": "get_word_embedder",
"path": "matchmaker/models/all.py",
"snippet": "def get_word_embedder(config):\n\n padding_idx = 0\n word_embedder = None\n # embedding layer (use pre-trained, but make it trainable as well)\n if config[\"token_embedder_type\"] == \"embedding\":\n vocab = Vocabulary.from_files(config[\"vocab_directory\"])\n tokens_embedder = Embedding(vocab=vocab,\n pretrained_file= config[\"pre_trained_embedding\"],\n embedding_dim=config[\"pre_trained_embedding_dim\"],\n trainable=config[\"train_embedding\"],\n padding_index=0,\n sparse=config[\"sparse_gradient_embedding\"])\n word_embedder = BasicTextFieldEmbedder({\"tokens\": tokens_embedder})\n \n elif config[\"token_embedder_type\"] == \"bert_embedding\":\n vocab = None\n bert_embedding = BertEmbeddingTokenEmbedder(config[\"bert_pretrained_model\"],pos_embeddings=config[\"bert_emb_pos\"],keep_layers=config[\"bert_emb_keep_layers\"])\n bert_embedding.bert_embeddings.word_embeddings.sparse = config[\"sparse_gradient_embedding\"]\n bert_embedding.bert_embeddings.token_type_embeddings.sparse = config[\"sparse_gradient_embedding\"]\n word_embedder = BasicTextFieldEmbedder({\"tokens\":bert_embedding},\n allow_unmatched_keys = True,\n embedder_to_indexer_map={\"tokens\":{\"tokens\":\"tokens\",\"offsets\":\"tokens-offsets\",\"token_type_ids\":\"tokens-type-ids\"}})\n elif config[\"token_embedder_type\"] == \"bert_vectors\":\n vocab = None\n bert_embedding = PretrainedTransformerEmbedder(config[\"bert_pretrained_model\"],requires_grad=config[\"train_embedding\"])#,top_layer_only=True)\n\n #if config[\"bert_emb_layers\"] > -1:\n # bert_embedding.bert_model.encoder.layer = bert_embedding.bert_model.encoder.layer[:config[\"bert_emb_layers\"]]\n\n word_embedder = BasicTextFieldEmbedder({\"tokens\":bert_embedding},\n allow_unmatched_keys = True,\n embedder_to_indexer_map={\"tokens\":{\"input_ids\":\"tokens\",\"offsets\":\"tokens-offsets\",\"token_type_ids\":\"tokens-type-ids\"}})\n elif config[\"token_embedder_type\"] == \"huggingface_bpe\":\n files = config[\"bpe_vocab_files\"].split(\";\")\n tok = CharBPETokenizer(files[0],files[1])\n padding_idx = tok.token_to_id(\"<pad>\")\n tokens_embedder = Embedding(num_embeddings=tok.get_vocab_size(),\n embedding_dim= config[\"pre_trained_embedding_dim\"],\n trainable= config[\"train_embedding\"],\n padding_index=padding_idx,\n sparse=config[\"sparse_gradient_embedding\"])\n word_embedder = BasicTextFieldEmbedder({\"tokens\": tokens_embedder})\n\n elif config[\"token_embedder_type\"] in [\"bert_cat\",\"bert_cls\",\"bert_dot\",\"bert_tower\"]:\n model = config[\"bert_pretrained_model\"]\n if \"facebook/dpr\" in config[\"bert_pretrained_model\"]: # ugh .. \n model= \"bert-base-uncased\" # should be identical (judging from paper + huggingface doc)\n padding_idx = PretrainedTransformerIndexer(model_name=model)._tokenizer.pad_token_id\n else:\n logger.error(\"token_embedder_type %s not known\",config[\"token_embedder_type\"])\n exit(1)\n\n return word_embedder,padding_idx"
},
{
"identifier": "build_model",
"path": "matchmaker/models/all.py",
"snippet": "def build_model(model,encoder_type,word_embedder,config):\n if encoder_type == None:\n pass\n elif encoder_type == NeuralIR_Encoder_WithIdfs or encoder_type == NeuralIR_Encoder_WithIdfs_PassThrough:\n idf_embedder = None\n if config[\"token_embedder_type\"] == \"embedding\":\n idf_embedder = Embedding(vocab=vocab,\n pretrained_file= config[\"idf_path\"],\n embedding_dim=1,\n trainable=config[\"idf_trainable\"],\n padding_index=0,\n sparse=config[\"sparse_gradient_embedding\"])\n idf_embedder = BasicTextFieldEmbedder({\"tokens\":idf_embedder})#, \n #allow_unmatched_keys = True, \n #embedder_to_indexer_map={\"tokens\":{\"tokens\":\"tokens\"}})\n model = encoder_type(word_embedder, idf_embedder, model) \n else:\n model = encoder_type(word_embedder, model)\n\n return model"
},
{
"identifier": "get_loss",
"path": "matchmaker/losses/all.py",
"snippet": "def get_loss(config):\n\n use_list_loss=False\n use_inbatch_list_loss=False\n qa_loss=None\n inbatch_loss=None\n inbatch_loss_query = None\n inbatch_loss_query_length = None\n\n if config[\"loss\"] == \"margin-mse\":\n loss = MSMarginLoss()\n elif config[\"loss\"] == \"MSETeacherPointwise\":\n loss = MSETeacherPointwise()\n elif config[\"loss\"] == \"MSETeacherPointwisePassages\":\n loss = MSETeacherPointwisePassages()\n elif config[\"loss\"] == \"MarginMSE_InterPassageLoss\":\n loss = MarginMSE_InterPassageLoss()\n elif config[\"loss\"] == \"KLDivTeacherPointwise\":\n loss = KLDivTeacherPointwise()\n elif config[\"loss\"] == \"RankNetTeacher\":\n loss = RankNetTeacher()\n elif config[\"loss\"] == \"MSERanknetTeacher\":\n loss = MSERanknetTeacher()\n elif config[\"loss\"] == \"MLMLoss\":\n loss = MLMLoss()\n\n elif config[\"loss\"] == \"ranknet\":\n loss = RankNetLoss()\n elif config[\"loss\"] == \"margin\":\n loss = torch.nn.MarginRankingLoss(margin=1, reduction='mean')\n elif config[\"loss\"] == \"weighted-hinge\":\n loss = WeightedHingeLoss()\n elif config[\"loss\"] == \"mrr\":\n loss = SmoothMRRLoss()\n use_list_loss = True\n elif config[\"loss\"] == \"listnet\":\n loss = ListNetLoss()\n use_list_loss = True\n elif config[\"loss\"] == \"lambdarank\":\n loss = LambdaLoss(\"ndcgLoss2_scheme\")\n use_list_loss = True\n elif config[\"loss\"] == \"qg-cross-entropy\":\n loss = CrossEntropyPointLoss(config)\n elif config[\"loss\"] == \"qg-cross-entropy-pn\":\n loss = CrossEntropyPNLoss(config)\n elif config[\"loss\"] == \"qg-cross-entropy-kd\":\n loss = CrossEntropyScoresLoss(config)\n elif config[\"loss\"] == \"qg-cross-entropy-kd-pn\":\n loss = CrossEntropyScoresPNLoss(config)\n elif config[\"loss\"] == \"qg-cross-entropy-kl\":\n loss = KLDiv()\n elif config[\"loss\"] == \"claps\":\n loss = Claps()\n else:\n raise Exception(\"Loss not known\")\n\n if config[\"train_qa_spans\"]:\n if config[\"qa_loss\"] == \"StartEndCrossEntropy\":\n qa_loss = QA_StartEndCrossEntropy()\n else:\n raise Exception(\"QA-Loss not known, qa_loss must be set with train_qa_spans\")\n\n\n if config[\"in_batch_negatives\"]:\n if config[\"in_batch_neg_loss\"] == \"ranknet\":\n inbatch_loss = RankNetLoss()\n elif config[\"in_batch_neg_loss\"] == \"margin\":\n inbatch_loss = torch.nn.MarginRankingLoss(margin=1, reduction='mean')\n elif config[\"in_batch_neg_loss\"] == \"margin-mse\":\n inbatch_loss = MSMarginLoss()\n elif config[\"in_batch_neg_loss\"] == \"KLDivTeacherList\":\n inbatch_loss = KLDivTeacherList()\n use_inbatch_list_loss = True\n elif config[\"in_batch_neg_loss\"] == \"listnet\":\n inbatch_loss = ListNetLoss()\n use_inbatch_list_loss = True\n elif config[\"in_batch_neg_loss\"] == \"lambdarank\":\n inbatch_loss = LambdaLossTeacher(\"ndcgLoss2_scheme\")\n use_inbatch_list_loss = True\n elif config[\"in_batch_neg_loss\"] == \"qg-cross-entropy\":\n inbatch_loss = CrossEntropyPointLoss(config)\n elif config[\"in_batch_neg_loss\"] == \"qg-cross-entropy-kd\":\n inbatch_loss = CrossEntropyScoresLoss(config)\n elif config[\"in_batch_neg_loss\"] == \"qg-kl\":\n inbatch_loss = KLDiv()\n else:\n raise Exception(\"In-batch-Loss not known, in_batch_neg_loss must be set with in_batch_negatives\")\n\n if config[\"in_batch_queries\"]:\n if config[\"in_batch_query_loss\"] == \"divquery\":\n inbatch_loss_query = DivQueryLoss()\n else:\n raise Exception(\"In-batch-Query-Loss not known, in_batch_query_loss must be set with in_batch_queries\")\n\n if config[\"intra_batch_queries\"]:\n if config[\"intra_batch_query_loss\"] == \"divquery\":\n inbatch_loss_query = DivQueryLoss()\n else:\n raise Exception(\"Intra-batch-Query-Loss not known, intra_batch_query_loss must be set with intra_batch_query_loss\")\n\n if config[\"in_batch_queries_length\"]:\n if config[\"in_batch_query_length_loss\"] == \"maxlength\":\n inbatch_loss_query_length = MaxLengthLoss()\n else:\n raise Exception(\"In-batch-Query-Length-Loss not known, in_batch_query_length_loss must be set with in_batch_query_length_loss\")\n\n return loss, qa_loss, inbatch_loss, use_list_loss,use_inbatch_list_loss, inbatch_loss_query, inbatch_loss_query_length"
},
{
"identifier": "merge_loss",
"path": "matchmaker/losses/all.py",
"snippet": "def merge_loss(losses, log_vars):\n loss = torch.zeros(1,device=log_vars.device)\n weighted_losses = []\n for l in range(len(losses)):\n precision = torch.exp(-log_vars[l])\n wl = torch.sum(precision * losses[l] + log_vars[l], -1)\n loss += wl\n weighted_losses.append(wl.detach())\n return torch.mean(loss),weighted_losses"
}
] | from typing import Dict, Tuple, List
from contextlib import nullcontext
from transformers import logging
from allennlp.common import Params, Tqdm
from torch.optim import *
from torch.optim.lr_scheduler import *
from torch import nn as nn
from allennlp.nn.util import move_to_device
from matchmaker.utils.utils import *
from matchmaker.utils.config import *
from matchmaker.distillation.dynamic_teacher import DynamicTeacher
from matchmaker.utils.running_average import RunningAverage
from matchmaker.models.all import get_model, get_word_embedder, build_model
from matchmaker.losses.all import get_loss,merge_loss
from matchmaker.active_learning.generate_training_subset import *
from matchmaker.autolabel_domain.robust04_nfoldcrosstrain import create_nfold_train_test_data
from matchmaker.utils.cross_experiment_cache import *
from matchmaker.utils.input_pipeline import *
from matchmaker.utils.performance_monitor import *
from matchmaker.eval import *
from torch.utils.tensorboard import SummaryWriter
from rich.console import Console
from rich.live import Live
import os
import warnings
import gc
import time
import sys,traceback
import itertools
import torch
import torch.distributed as dist
import numpy
import random
import transformers
import sys ,subprocess | 7,212 |
console = Console()
if __name__ == "__main__":
#
# config
#
args = get_parser().parse_args()
from_scratch = True
train_mode = "Train"
if args.continue_folder:
train_mode = "Evaluate"
from_scratch = False
run_folder = args.continue_folder
config = get_config_single(os.path.join(run_folder, "config.yaml"), args.config_overwrites)
else:
if not args.run_name:
raise Exception("--run-name must be set (or continue-folder)")
config = get_config(args.config_file, args.config_overwrites)
run_folder = prepare_experiment(args, config)
logger = get_logger_to_file(run_folder, "main")
logger.info("Running: %s", str(sys.argv))
tb_writer = SummaryWriter(run_folder)
print_hello(config,run_folder,train_mode)
#
# random seeds
#
torch.manual_seed(config["random_seed"])
numpy.random.seed(config["random_seed"])
random.seed(config["random_seed"])
logger.info("Torch seed: %i ",torch.initial_seed())
# hardcode gpu usage
cuda_device = 0 # always take the first -> set others via cuda flag in bash
perf_monitor = PerformanceMonitor.get()
perf_monitor.start_block("startup")
#
# create the training subset in case of subset training
# -------------------------------
#
train_subset = config.get("train_subset", False)
train_subset_incrementally = config.get('train_subset_incrementally', False)
train_subset_warmstart = config.get('train_subset_warmstart', False)
train_subset_control_topic_no = config.get('train_subset_control_topic_no', -1)
train_subset_firstk = config.get('train_subset_firstk', False)
triplet_no_per_topic = config.get('triplet_no_per_topic', 10)
if train_subset:
if not train_subset_incrementally:
if train_subset_control_topic_no > -1:
train_file_path = generate_subset_control_topic_no(config["train_tsv"], run_folder, config['train_subset_control_topic_no'], config["random_seed"], triplet_no_per_topic)
elif train_subset_firstk:
train_file_path = generate_train_subset_from_train_firstk(config["train_tsv"], run_folder, config['train_data_size'])
else:
train_file_path = generate_train_subset_from_train(config["train_tsv"], run_folder, config['train_data_size'], config["random_seed"])
config["train_tsv"] = train_file_path
else:
if not train_subset_warmstart:
if train_subset_control_topic_no > -1:
train_file_path = generate_subset_control_incrementally(config["train_tsv"], run_folder, config['train_subset_control_topic_no'],
config["random_seed"], triplet_no_per_topic,
config['expirement_base_path'],
config['previous_exp_name'])
else:
train_file_path = generate_train_subset_incrementally(config["train_tsv"], run_folder, config['train_data_size'],
config["random_seed"], config['expirement_base_path'], config['previous_exp_name'])
else:
if train_subset_control_topic_no > -1:
train_file_path, warmstart_path = generate_subset_control_incrementally(config["train_tsv"], run_folder, config['train_subset_control_topic_no'],
config["random_seed"], triplet_no_per_topic,
config['expirement_base_path'],
config['previous_exp_name'], warmstart_model=True)
else:
train_file_path, warmstart_path = generate_train_subset_incrementally(config["train_tsv"], run_folder, config['train_data_size'],
config["random_seed"], config['expirement_base_path'], config['previous_exp_name'], warmstart_model=True)
config["warmstart_model_path"] = warmstart_path
config["train_tsv"] = train_file_path
#
# create the training nfold subset
# -------------------------------
#
train_subset_nfold = config.get("train_subset_nfold", False)
if train_subset_nfold:
if config['nfold_sampling'] == 'nfold':
print('use nfolds from {} to {}'.format(config["fold_lb"], config["fold_ub"]))
else:
print('use random sampling for fold')
# now i use the config["train_tsv"] to subsample the size!
create_nfold_train_test_data(config['collection'], config['queries'], run_folder, config['candidate_path'],
config['validation_cont']['qrels'], sampling=config['nfold_sampling'],
fold_lb=config.get("fold_lb", 0), fold_ub=config.get('fold_ub', 0),
train_size=config.get('fold_train_size', 0), index_dir=config.get('index_dir', ''),
n_samples_per_query=config.get('n_samples_per_query', -1))
config["train_tsv"] = os.path.join(run_folder, 'train_triples_nfold.tsv')
config["validation_cont"]["tsv"] = os.path.join(run_folder, 'test_nfold_queries_rerank.tsv')
config["test"]["top1000_description"]["tsv"] = os.path.join(run_folder, 'test_nfold_queries.tsv')
#
# create (and load) model instance
# -------------------------------
#
# load candidate set for efficient cs@N validation
validation_cont_candidate_set = None
if from_scratch and "candidate_set_path" in config["validation_cont"]:
validation_cont_candidate_set = parse_candidate_set(config["validation_cont"]["candidate_set_path"],config["validation_cont"]["candidate_set_from_to"][1])
word_embedder, padding_idx = get_word_embedder(config)
| #
# train a neural-ir model
# -------------------------------
os.environ['PYTHONHASHSEED'] = "42" # very important to keep set operations deterministic
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["OMP_NUM_THREADS"] = "1"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # needed because of the scann library
#try:
# from grad_cache import GradCache
# _grad_cache_available = True
#except ModuleNotFoundError:
# _grad_cache_available = False
logging.set_verbosity_warning()
sys.path.append(os.getcwd())
console = Console()
if __name__ == "__main__":
#
# config
#
args = get_parser().parse_args()
from_scratch = True
train_mode = "Train"
if args.continue_folder:
train_mode = "Evaluate"
from_scratch = False
run_folder = args.continue_folder
config = get_config_single(os.path.join(run_folder, "config.yaml"), args.config_overwrites)
else:
if not args.run_name:
raise Exception("--run-name must be set (or continue-folder)")
config = get_config(args.config_file, args.config_overwrites)
run_folder = prepare_experiment(args, config)
logger = get_logger_to_file(run_folder, "main")
logger.info("Running: %s", str(sys.argv))
tb_writer = SummaryWriter(run_folder)
print_hello(config,run_folder,train_mode)
#
# random seeds
#
torch.manual_seed(config["random_seed"])
numpy.random.seed(config["random_seed"])
random.seed(config["random_seed"])
logger.info("Torch seed: %i ",torch.initial_seed())
# hardcode gpu usage
cuda_device = 0 # always take the first -> set others via cuda flag in bash
perf_monitor = PerformanceMonitor.get()
perf_monitor.start_block("startup")
#
# create the training subset in case of subset training
# -------------------------------
#
train_subset = config.get("train_subset", False)
train_subset_incrementally = config.get('train_subset_incrementally', False)
train_subset_warmstart = config.get('train_subset_warmstart', False)
train_subset_control_topic_no = config.get('train_subset_control_topic_no', -1)
train_subset_firstk = config.get('train_subset_firstk', False)
triplet_no_per_topic = config.get('triplet_no_per_topic', 10)
if train_subset:
if not train_subset_incrementally:
if train_subset_control_topic_no > -1:
train_file_path = generate_subset_control_topic_no(config["train_tsv"], run_folder, config['train_subset_control_topic_no'], config["random_seed"], triplet_no_per_topic)
elif train_subset_firstk:
train_file_path = generate_train_subset_from_train_firstk(config["train_tsv"], run_folder, config['train_data_size'])
else:
train_file_path = generate_train_subset_from_train(config["train_tsv"], run_folder, config['train_data_size'], config["random_seed"])
config["train_tsv"] = train_file_path
else:
if not train_subset_warmstart:
if train_subset_control_topic_no > -1:
train_file_path = generate_subset_control_incrementally(config["train_tsv"], run_folder, config['train_subset_control_topic_no'],
config["random_seed"], triplet_no_per_topic,
config['expirement_base_path'],
config['previous_exp_name'])
else:
train_file_path = generate_train_subset_incrementally(config["train_tsv"], run_folder, config['train_data_size'],
config["random_seed"], config['expirement_base_path'], config['previous_exp_name'])
else:
if train_subset_control_topic_no > -1:
train_file_path, warmstart_path = generate_subset_control_incrementally(config["train_tsv"], run_folder, config['train_subset_control_topic_no'],
config["random_seed"], triplet_no_per_topic,
config['expirement_base_path'],
config['previous_exp_name'], warmstart_model=True)
else:
train_file_path, warmstart_path = generate_train_subset_incrementally(config["train_tsv"], run_folder, config['train_data_size'],
config["random_seed"], config['expirement_base_path'], config['previous_exp_name'], warmstart_model=True)
config["warmstart_model_path"] = warmstart_path
config["train_tsv"] = train_file_path
#
# create the training nfold subset
# -------------------------------
#
train_subset_nfold = config.get("train_subset_nfold", False)
if train_subset_nfold:
if config['nfold_sampling'] == 'nfold':
print('use nfolds from {} to {}'.format(config["fold_lb"], config["fold_ub"]))
else:
print('use random sampling for fold')
# now i use the config["train_tsv"] to subsample the size!
create_nfold_train_test_data(config['collection'], config['queries'], run_folder, config['candidate_path'],
config['validation_cont']['qrels'], sampling=config['nfold_sampling'],
fold_lb=config.get("fold_lb", 0), fold_ub=config.get('fold_ub', 0),
train_size=config.get('fold_train_size', 0), index_dir=config.get('index_dir', ''),
n_samples_per_query=config.get('n_samples_per_query', -1))
config["train_tsv"] = os.path.join(run_folder, 'train_triples_nfold.tsv')
config["validation_cont"]["tsv"] = os.path.join(run_folder, 'test_nfold_queries_rerank.tsv')
config["test"]["top1000_description"]["tsv"] = os.path.join(run_folder, 'test_nfold_queries.tsv')
#
# create (and load) model instance
# -------------------------------
#
# load candidate set for efficient cs@N validation
validation_cont_candidate_set = None
if from_scratch and "candidate_set_path" in config["validation_cont"]:
validation_cont_candidate_set = parse_candidate_set(config["validation_cont"]["candidate_set_path"],config["validation_cont"]["candidate_set_from_to"][1])
word_embedder, padding_idx = get_word_embedder(config) | model, encoder_type = get_model(config,word_embedder,padding_idx) | 2 | 2023-11-21 10:38:22+00:00 | 12k |
dmamakas2000/ipo | experiments/ipo.py | [
{
"identifier": "FinancialHierarchicalBert",
"path": "models/financial_features_hierarchical_bert.py",
"snippet": "class FinancialHierarchicalBert(nn.Module):\n\n def __init__(self, config, encoder, max_segments=64, max_segment_length=128, max_pooled=False):\n super(FinancialHierarchicalBert, self).__init__()\n supported_models = ['bert', 'roberta', 'deberta']\n assert encoder.config.model_type in supported_models # other model types are not supported so far\n\n # Pre-trained segment (token-wise) encoder, e.g., BERT\n self.encoder = encoder\n\n # Max-Pooling variant\n self.max_pooled = max_pooled\n\n # Specs for the segment-wise encoder\n self.hidden_size = encoder.config.hidden_size\n self.max_segments = max_segments\n self.max_segment_length = max_segment_length\n\n # Init sinusoidal positional embeddings\n self.seg_pos_embeddings = nn.Embedding(max_segments + 1, encoder.config.hidden_size,\n padding_idx=0,\n _weight=sinusoidal_init(max_segments + 1, encoder.config.hidden_size))\n\n self.seg_encoder = nn.Transformer(d_model=encoder.config.hidden_size,\n nhead=encoder.config.num_attention_heads,\n batch_first=True, dim_feedforward=encoder.config.intermediate_size,\n activation=encoder.config.hidden_act,\n dropout=encoder.config.hidden_dropout_prob,\n layer_norm_eps=encoder.config.layer_norm_eps,\n num_encoder_layers=2, num_decoder_layers=0).encoder\n\n # Add the two dense layers on top of the architecture\n self.first_dense_layer = nn.Linear(config.hidden_size, config.reduction_features)\n self.second_dense_layer = nn.Linear(config.reduction_features + 8, (config.reduction_features + 8) // 2)\n\n def forward(self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n financial_features=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n # Hypothetical Example\n # Batch of 4 documents: (batch_size, n_segments, max_segment_length) --> (4, 64, 128)\n # BERT-BASE encoder: 768 hidden units\n\n # Squash samples and segments into a single axis (batch_size * n_segments, max_segment_length) --> (256, 128)\n input_ids_reshape = input_ids.contiguous().view(-1, input_ids.size(-1))\n attention_mask_reshape = attention_mask.contiguous().view(-1, attention_mask.size(-1))\n if token_type_ids is not None:\n token_type_ids_reshape = token_type_ids.contiguous().view(-1, token_type_ids.size(-1))\n else:\n token_type_ids_reshape = None\n\n # Encode segments with BERT --> (256, 128, 768)\n encoder_outputs = self.encoder(input_ids=input_ids_reshape,\n attention_mask=attention_mask_reshape,\n token_type_ids=token_type_ids_reshape)[0]\n\n # Reshape back to (batch_size, n_segments, max_segment_length, output_size) --> (4, 64, 128, 768)\n encoder_outputs = encoder_outputs.contiguous().view(input_ids.size(0), self.max_segments,\n self.max_segment_length,\n self.hidden_size)\n\n if self.max_pooled:\n # Gather the maximum element from each vector of each segment\n encoder_outputs, _ = torch.max(encoder_outputs, dim=3) # Size -> (4, 64, 128)\n\n batch_size = encoder_outputs.size()[0]\n\n # Reshape tensor to (n * 64, 128)\n encoder_outputs = encoder_outputs.view(-1, self.max_segment_length)\n\n # Linear transformation to (n * 64, 768)\n linear = nn.Linear(self.max_segment_length, self.encoder.config.hidden_size).to('cuda')\n encoder_outputs = linear(encoder_outputs)\n\n # Reshape transformed tensor back to (n, 64, 768)\n encoder_outputs = encoder_outputs.view(batch_size, self.max_segments, self.encoder.config.hidden_size)\n\n # Encode segments with segment-wise transformer\n seg_encoder_outputs = self.seg_encoder(encoder_outputs)\n\n # Collect document representation\n outputs, _ = torch.max(seg_encoder_outputs, 1)\n\n return SimpleOutput(last_hidden_state=outputs, hidden_states=outputs)\n else:\n # Gather CLS outputs per segment --> (4, 64, 768)\n encoder_outputs = encoder_outputs[:, :, 0]\n\n # Infer real segments, i.e., mask paddings\n seg_mask = (torch.sum(input_ids, 2) != 0).to(input_ids.dtype)\n # Infer and collect segment positional embeddings\n seg_positions = torch.arange(1, self.max_segments + 1).to(input_ids.device) * seg_mask\n # Add segment positional embeddings to segment inputs\n encoder_outputs += self.seg_pos_embeddings(seg_positions)\n\n # Encode segments with segment-wise transformer\n seg_encoder_outputs = self.seg_encoder(encoder_outputs)\n\n # Collect document representation\n outputs, _ = torch.max(seg_encoder_outputs, 1)\n\n # Use the dense layer in order to reduce the BERT's output dimension from 768 to 8.\n reduced_embedding = self.first_dense_layer(outputs)\n\n # Perform the concatenation with the equivalent financial embedding\n concatenated_embedding = torch.cat((reduced_embedding, financial_features), dim=1)\n\n reduced_concatenated_embedding = self.second_dense_layer(concatenated_embedding)\n\n return SimpleOutput(last_hidden_state=reduced_concatenated_embedding,\n hidden_states=reduced_concatenated_embedding)"
},
{
"identifier": "HierarchicalBertFinancialModelForSequenceClassification",
"path": "models/financial_features_hierarchical_bert.py",
"snippet": "class HierarchicalBertFinancialModelForSequenceClassification(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n self.bert = BertFinancial(config)\n\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear((config.reduction_features + 8) // 2, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n financial_features=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n financial_features=financial_features,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n # Perform the classification.\n logits = self.classifier(outputs.last_hidden_state)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )"
},
{
"identifier": "BinaryTrainer",
"path": "trainer/trainer.py",
"snippet": "class BinaryTrainer(Trainer):\n def compute_loss(self, model, inputs, return_outputs=False):\n if 'labels' in inputs:\n labels = inputs.pop(\"labels\")\n outputs = model(**inputs)\n logits = outputs.logits\n\n # Define Binary Cross Entropy Loss and Sigmoid\n loss_fct = nn.BCEWithLogitsLoss()\n\n # Compute the loss, and return\n loss = loss_fct(logits.view(-1, self.model.config.num_labels),\n labels.float().view(-1, self.model.config.num_labels))\n return (loss, outputs) if return_outputs else loss"
},
{
"identifier": "HierarchicalBert",
"path": "models/hierarchical_bert.py",
"snippet": "class HierarchicalBert(nn.Module):\n\n def __init__(self, encoder, max_segments=64, max_segment_length=128, max_pooled=False):\n super(HierarchicalBert, self).__init__()\n supported_models = ['bert', 'roberta', 'deberta']\n assert encoder.config.model_type in supported_models # other model types are not supported so far\n\n # Pre-trained segment (token-wise) encoder, e.g., BERT\n self.encoder = encoder\n\n # Max-Pooling variant\n self.max_pooled = max_pooled\n\n # Specs for the segment-wise encoder\n self.hidden_size = encoder.config.hidden_size\n self.max_segments = max_segments\n self.max_segment_length = max_segment_length\n\n # Init sinusoidal positional embeddings\n self.seg_pos_embeddings = nn.Embedding(max_segments + 1, encoder.config.hidden_size,\n padding_idx=0,\n _weight=sinusoidal_init(max_segments + 1, encoder.config.hidden_size))\n\n self.seg_encoder = nn.Transformer(d_model=encoder.config.hidden_size,\n nhead=encoder.config.num_attention_heads,\n batch_first=True, dim_feedforward=encoder.config.intermediate_size,\n activation=encoder.config.hidden_act,\n dropout=encoder.config.hidden_dropout_prob,\n layer_norm_eps=encoder.config.layer_norm_eps,\n num_encoder_layers=2, num_decoder_layers=0).encoder\n\n def forward(self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n # Hypothetical Example\n # Batch of 4 documents: (batch_size, n_segments, max_segment_length) --> (4, 64, 128)\n # BERT-BASE encoder: 768 hidden units\n\n # Squash samples and segments into a single axis (batch_size * n_segments, max_segment_length) --> (256, 128)\n input_ids_reshape = input_ids.contiguous().view(-1, input_ids.size(-1))\n attention_mask_reshape = attention_mask.contiguous().view(-1, attention_mask.size(-1))\n if token_type_ids is not None:\n token_type_ids_reshape = token_type_ids.contiguous().view(-1, token_type_ids.size(-1))\n else:\n token_type_ids_reshape = None\n\n # Encode segments with BERT --> (256, 128, 768)\n encoder_outputs = self.encoder(input_ids=input_ids_reshape,\n attention_mask=attention_mask_reshape,\n token_type_ids=token_type_ids_reshape)[0]\n\n # Reshape back to (batch_size, n_segments, max_segment_length, output_size) --> (4, 64, 128, 768)\n encoder_outputs = encoder_outputs.contiguous().view(input_ids.size(0), self.max_segments,\n self.max_segment_length,\n self.hidden_size)\n\n if self.max_pooled:\n # Gather the maximum element from each vector of each segment\n encoder_outputs, _ = torch.max(encoder_outputs, dim=3) # Size -> (4, 64, 128)\n\n batch_size = encoder_outputs.size()[0]\n\n # Reshape tensor to (n * 64, 128)\n encoder_outputs = encoder_outputs.view(-1, self.max_segment_length)\n\n # Linear transformation to (n * 64, 768)\n linear = nn.Linear(self.max_segment_length, self.encoder.config.hidden_size).to('cuda')\n encoder_outputs = linear(encoder_outputs)\n\n # Reshape transformed tensor back to (n, 64, 768)\n encoder_outputs = encoder_outputs.view(batch_size, self.max_segments, self.encoder.config.hidden_size)\n\n # Encode segments with segment-wise transformer\n seg_encoder_outputs = self.seg_encoder(encoder_outputs)\n\n # Collect document representation\n outputs, _ = torch.max(seg_encoder_outputs, 1)\n\n return SimpleOutput(last_hidden_state=outputs, hidden_states=outputs)\n else:\n # Gather CLS outputs per segment --> (4, 64, 768)\n encoder_outputs = encoder_outputs[:, :, 0]\n\n # Infer real segments, i.e., mask paddings\n seg_mask = (torch.sum(input_ids, 2) != 0).to(input_ids.dtype)\n # Infer and collect segment positional embeddings\n seg_positions = torch.arange(1, self.max_segments + 1).to(input_ids.device) * seg_mask\n # Add segment positional embeddings to segment inputs\n encoder_outputs += self.seg_pos_embeddings(seg_positions)\n\n # Encode segments with segment-wise transformer\n seg_encoder_outputs = self.seg_encoder(encoder_outputs)\n\n # Collect document representation\n outputs, _ = torch.max(seg_encoder_outputs, 1)\n\n return SimpleOutput(last_hidden_state=outputs, hidden_states=outputs)"
},
{
"identifier": "FinancialTrainer",
"path": "trainer/financial_features_trainer.py",
"snippet": "class FinancialTrainer(Trainer):\n def compute_loss(self, model, inputs, return_outputs=False):\n if 'labels' in inputs:\n labels = inputs.pop(\"labels\")\n outputs = model(**inputs)\n\n # Define Binary Cross Entropy Loss and Sigmoid\n loss_fct = nn.BCEWithLogitsLoss()\n\n # Compute the loss, and return\n loss = loss_fct(outputs.logits.view(-1, self.model.config.num_labels),\n labels.float().view(-1, self.model.config.num_labels))\n return (loss, outputs) if return_outputs else loss"
},
{
"identifier": "segment_string",
"path": "functions/functions.py",
"snippet": "def segment_string(text, segment_length):\n \"\"\"\n Splits the text into segments of fixed length.\n \"\"\"\n segments = []\n words = text.split()\n # Split the words into segments\n for i in range(0, len(words), segment_length):\n segment = ' '.join(words[i:i + segment_length])\n segments.append(segment)\n return segments"
},
{
"identifier": "new_compute_metrics",
"path": "functions/functions.py",
"snippet": "def new_compute_metrics(p: EvalPrediction, threshold_):\n \"\"\"\n Computes the model's metrics.\n \"\"\"\n\n # Set a threshold to work with\n threshold = threshold_\n\n # True values\n targets_ = p.label_ids.astype(np.int32)\n targets = []\n for x in targets_:\n if x[0] == 1:\n targets.append(1)\n else:\n targets.append(0)\n\n # Convert targets to numpy array\n targets = np.array(targets)\n\n # Predictions\n predictions = []\n probabilities = []\n if isinstance(p.predictions, tuple):\n for x in p.predictions[0]:\n p = sig(x)\n probabilities.append(p)\n if p >= threshold:\n predictions.append(1)\n else:\n predictions.append(0)\n else:\n for x in p.predictions:\n p = sig(x)\n probabilities.append(p)\n if p >= threshold:\n predictions.append(1)\n else:\n predictions.append(0)\n\n # Convert predictions to numpy array\n predictions = np.array(predictions)\n\n # Compute metric scores\n precision = precision_score(targets, predictions, zero_division=0, average=None)\n recall = recall_score(targets, predictions, zero_division=0, average=None)\n\n precision_0 = precision[0]\n precision_1 = precision[1]\n recall_0 = recall[0]\n recall_1 = recall[1]\n\n f1_0 = calculate_f1_score_per_class(precision_0, recall_0)\n f1_1 = calculate_f1_score_per_class(precision_1, recall_1)\n\n auc_0, auc_1 = calculate_auc_scores(probabilities, targets)\n\n # Macro averaging\n macro_avg_precision = (precision_0 + precision_1) / 2\n macro_avg_recall = (recall_0 + recall_1) / 2\n macro_avg_f1 = (f1_0 + f1_1) / 2\n macro_avg_pr_auc = (auc_0 + auc_1) / 2\n metrics = {\n 'precision-class-0': precision_0,\n 'precision-class-1': precision_1,\n 'recall-class-0': recall_0,\n 'recall-class-1': recall_1,\n 'pr-auc-class-0': auc_0,\n 'pr-auc-class-1': auc_1,\n 'f1-class-0': f1_0,\n 'f1-class-1': f1_1,\n 'macro-avg-precision': macro_avg_precision,\n 'macro-avg-recall': macro_avg_recall,\n 'macro-avg-auc': macro_avg_pr_auc,\n 'macro-avg-f1': macro_avg_f1\n }\n return metrics"
},
{
"identifier": "BertMaxPooledForSequenceClassification",
"path": "models/max_pooled_bert.py",
"snippet": "class BertMaxPooledForSequenceClassification(BertPreTrainedModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n\n self.bert = BertMaxPooledModel(config)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n # Max pooled outputs\n pooled_output = torch.max(outputs.last_hidden_state, dim=1)[0]\n pooled_output = self.dropout(pooled_output)\n\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )"
},
{
"identifier": "BertFinancialModelForSequenceClassification",
"path": "models/financial_features_bert.py",
"snippet": "class BertFinancialModelForSequenceClassification(BertPreTrainedModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n self.bert = BertFinancial(config)\n\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n\n if config.multiple_dense_layers:\n # Use two dense layers on top of each other in the architecture\n self.first_dense_layer = nn.Linear(config.hidden_size, config.reduction_features)\n self.second_dense_layer = nn.Linear(config.reduction_features + 8, (config.reduction_features + 8) // 2)\n self.classifier = nn.Linear((config.reduction_features + 8) // 2, config.num_labels)\n else:\n # Use only one dense layer in the architecture\n self.dense_layer = nn.Linear(config.hidden_size, config.reduction_features)\n self.classifier = nn.Linear(config.reduction_features + 8, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n financial_features=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n financial_features=financial_features,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n if hasattr(self, 'second_dense_layer'):\n # Use the dense layer in order to reduce the BERT's output dimension from 768 to 8.\n reduced_embedding = self.first_dense_layer(outputs.pooler_output)\n\n # Perform the concatenation with the equivalent financial embedding\n concatenated_embedding = torch.cat((reduced_embedding, financial_features), dim=1)\n\n reduced_concatenated_embedding = self.second_dense_layer(concatenated_embedding)\n\n # Perform the classification.\n logits = self.classifier(reduced_concatenated_embedding)\n else:\n # Use the dense layer in order to reduce the BERT's output dimension from 768 to 8.\n reduced_embedding = self.dense_layer(outputs.pooler_output)\n\n # Perform the concatenation with the equivalent financial embedding\n concatenated_embedding = torch.cat((reduced_embedding, financial_features), dim=1)\n\n # Perform the classification.\n logits = self.classifier(concatenated_embedding)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )"
},
{
"identifier": "FinancialBertMaxPooledForSequenceClassification",
"path": "models/max_pooled_financial_features_bert.py",
"snippet": "class FinancialBertMaxPooledForSequenceClassification(BertPreTrainedModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n self.bert = BertFinancial(config)\n\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n\n if config.multiple_dense_layers:\n # Use two dense layers on top of each other in the architecture\n self.first_dense_layer = nn.Linear(config.hidden_size, config.reduction_features)\n self.second_dense_layer = nn.Linear(config.reduction_features + 8, (config.reduction_features + 8) // 2)\n self.classifier = nn.Linear((config.reduction_features + 8) // 2, config.num_labels)\n else:\n # Use only one dense layer in the architecture\n self.dense_layer = nn.Linear(config.hidden_size, config.reduction_features)\n self.classifier = nn.Linear(config.reduction_features + 8, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n financial_features=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n financial_features=None,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n # Max pooled outputs\n pooled_output = torch.max(outputs.last_hidden_state, dim=1)[0]\n pooled_output = self.dropout(pooled_output)\n\n if hasattr(self, 'second_dense_layer'):\n # Use the dense layer in order to reduce the BERT's output dimension from 768 to 8.\n reduced_embedding = self.first_dense_layer(pooled_output)\n\n # Perform the concatenation with the equivalent financial embedding\n concatenated_embedding = torch.cat((reduced_embedding, financial_features), dim=1)\n\n reduced_concatenated_embedding = self.second_dense_layer(concatenated_embedding)\n\n # Perform the classification.\n logits = self.classifier(reduced_concatenated_embedding)\n else:\n # Use the dense layer in order to reduce the BERT's output dimension from 768 to 8.\n reduced_embedding = self.dense_layer(pooled_output)\n\n # Perform the concatenation with the equivalent financial embedding\n concatenated_embedding = torch.cat((reduced_embedding, financial_features), dim=1)\n\n # Perform the classification.\n logits = self.classifier(concatenated_embedding)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )"
}
] | import os
import sys
import glob
import shutil
import random
import logging
import datasets
import transformers
from typing import Optional
from datasets import load_dataset
from dataclasses import dataclass, field
from models.financial_features_hierarchical_bert import FinancialHierarchicalBert, \
HierarchicalBertFinancialModelForSequenceClassification
from trainer.trainer import BinaryTrainer
from transformers.utils import check_min_version
from models.hierarchical_bert import HierarchicalBert
from transformers.utils.versions import require_version
from transformers.trainer_utils import get_last_checkpoint
from trainer.financial_features_trainer import FinancialTrainer
from functions.functions import segment_string, new_compute_metrics
from models.max_pooled_bert import BertMaxPooledForSequenceClassification
from models.financial_features_bert import BertFinancialModelForSequenceClassification
from models.max_pooled_financial_features_bert import FinancialBertMaxPooledForSequenceClassification
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
HfArgumentParser,
TrainingArguments,
default_data_collator,
set_seed,
) | 10,126 | model = HierarchicalBertFinancialModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
segment_encoder = model.bert
model_encoder = FinancialHierarchicalBert(config=config,
encoder=segment_encoder,
max_segments=data_args.max_segments,
max_segment_length=data_args.max_seg_length,
max_pooled=model_args.max_pooled)
model.bert = model_encoder
else:
"""
Scenario 6: Hierarchical-BERT.
"""
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
segment_encoder = model.bert
model_encoder = HierarchicalBert(encoder=segment_encoder,
max_segments=data_args.max_segments,
max_segment_length=data_args.max_seg_length,
max_pooled=model_args.max_pooled)
model.bert = model_encoder
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
padding = False
def preprocess_function(examples):
"""
Preprocesses the examples of a specific batch.
"""
if model_args.hierarchical:
case_template = [[0] * data_args.max_seg_length]
batch = {'input_ids': [], 'attention_mask': [], 'token_type_ids': []}
for doc in examples['text']:
doc = segment_string(doc, data_args.max_seg_length)
doc_encodings = tokenizer(doc[:data_args.max_segments], padding=padding,
max_length=data_args.max_seg_length, truncation=True)
batch['input_ids'].append(doc_encodings['input_ids'] + case_template * (
data_args.max_segments - len(doc_encodings['input_ids'])))
batch['attention_mask'].append(doc_encodings['attention_mask'] + case_template * (
data_args.max_segments - len(doc_encodings['attention_mask'])))
batch['token_type_ids'].append(doc_encodings['token_type_ids'] + case_template * (
data_args.max_segments - len(doc_encodings['token_type_ids'])))
else:
# Tokenize the texts
batch = tokenizer(
examples["text"],
padding=padding,
max_length=data_args.max_seq_length,
truncation=True,
)
# batch["labels"] = [[1 if labels == label else 0 for label in label_list] for labels in examples["class"]]
batch["labels"] = [[0 if labels == label else 1 for label in label_list] for labels in examples["class"]]
if model_args.concatenate_financial_features:
batch['financial_features'] = examples['financial']
return batch
# If training, apply the preprocessing and log a few random samples
if training_args.do_train:
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# If evaluating, apply the preprocessing and log a few random samples
if training_args.do_eval:
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
# If predicting, apply the preprocessing and log a few random samples
if training_args.do_predict:
if data_args.max_predict_samples is not None:
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Trainer
if model_args.concatenate_financial_features:
| #!/usr/bin/env python
# coding=utf-8
"""
File: ipo.py
Author: Dimitrios Mamakas (Athens University of Economics and Business)
Date: November 22, 2023
Description: Implementation of the following BERT-based and Hierarchical-BERT-based variants.
• bert-tx-cls-512
• bert-txff-cls-512
• bert-tx-max-512
• bert-txff-max-512
• hierbert-tx-cls-8192
• hierbert-txff-cls-8192
• hierbert-tx-cls-20480
• hierbert-txff-cls-20480
License:
This code is provided under the MIT License.
You are free to copy, modify, and distribute the code.
If you use this code in your research, please include a reference to the original study (please visit the home page).
"""
# Will error if the minimal version of Transformers is not installed
check_min_version("4.9.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
max_seq_length: Optional[int] = field(
default=512,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_segments: Optional[int] = field(
default=64,
metadata={
"help": "The maximum number of segments (paragraphs) to be considered. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_seg_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
train_dataset_dir: Optional[str] = field(
default=None,
metadata={
"help": "The directory to retrieve the training dataset from."
}
)
eval_dataset_dir: Optional[str] = field(
default=None,
metadata={
"help": "The directory to retrieve the evaluation dataset from."
}
)
test_dataset_dir: Optional[str] = field(
default=None,
metadata={
"help": "The directory to retrieve the test dataset from."
}
)
server_ip: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
server_port: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
do_lower_case: Optional[bool] = field(
default=True,
metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
}
)
max_pooled: bool = field(
default=False,
metadata={
"help": "Whether to use a max-pooled embedding as an input into the classification head."
"If set to False, the CLS embedding will be used to perform the classification."
}
)
hierarchical: bool = field(
default=False, metadata={"help": "Whether to use a hierarchical variant or not."}
)
concatenate_financial_features: bool = field(
default=False, metadata={"help": "Whether to concatenate the financial features among with the textual, or not."}
)
reduction_features: int = field(
default=8,
metadata={
"help": "The number of output BERT features to keep in case it is asked."
},
)
multiple_dense_layers: bool = field(
default=True,
metadata={
"help": "Whether to use a second dense layer on top of the first one (if selected), or not."
},
)
threshold: float = field(
default=0.5,
metadata={
"help": "The threshold to classify texts with."
}
)
def main():
"""
Main method.
"""
# Arguments
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Fix boolean parameter
if model_args.do_lower_case == 'False' or not model_args.do_lower_case:
model_args.do_lower_case = False
'Tokenizer do_lower_case False'
else:
model_args.do_lower_case = True
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Load the dataset splits
if training_args.do_train:
train_dataset = load_dataset("json", data_files=data_args.train_dataset_dir, split="train",
cache_dir=model_args.cache_dir)
if training_args.do_eval:
eval_dataset = load_dataset("json", data_files=data_args.eval_dataset_dir, split="train",
cache_dir=model_args.cache_dir)
if training_args.do_predict:
predict_dataset = load_dataset("json", data_files=data_args.test_dataset_dir, split="train",
cache_dir=model_args.cache_dir)
# Labels
label_list = list(range(1))
num_labels = len(label_list)
# Config
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Set additional parameters to control the flow of the experiments
config.reduction_features = model_args.reduction_features
config.multiple_dense_layers = model_args.multiple_dense_layers
# Tokenizer
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
do_lower_case=model_args.do_lower_case,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Check all possible combinations for choosing model and setting
if not model_args.hierarchical:
if model_args.max_pooled:
if model_args.concatenate_financial_features:
"""
Scenario 1: BERT (max-pooled) using financial embeddings.
"""
model = FinancialBertMaxPooledForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
"""
Scenario 2: BERT (max-pooled).
"""
model = BertMaxPooledForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
if model_args.concatenate_financial_features:
"""
Scenario 3: BERT (cls-pooled) using financial embeddings.
"""
model = BertFinancialModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
"""
Scenario 4: BERT (cls-pooled).
"""
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if model_args.hierarchical:
if model_args.concatenate_financial_features:
"""
Scenario 5: Hierarchical-BERT using financial embeddings.
"""
model = HierarchicalBertFinancialModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
segment_encoder = model.bert
model_encoder = FinancialHierarchicalBert(config=config,
encoder=segment_encoder,
max_segments=data_args.max_segments,
max_segment_length=data_args.max_seg_length,
max_pooled=model_args.max_pooled)
model.bert = model_encoder
else:
"""
Scenario 6: Hierarchical-BERT.
"""
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
segment_encoder = model.bert
model_encoder = HierarchicalBert(encoder=segment_encoder,
max_segments=data_args.max_segments,
max_segment_length=data_args.max_seg_length,
max_pooled=model_args.max_pooled)
model.bert = model_encoder
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
padding = False
def preprocess_function(examples):
"""
Preprocesses the examples of a specific batch.
"""
if model_args.hierarchical:
case_template = [[0] * data_args.max_seg_length]
batch = {'input_ids': [], 'attention_mask': [], 'token_type_ids': []}
for doc in examples['text']:
doc = segment_string(doc, data_args.max_seg_length)
doc_encodings = tokenizer(doc[:data_args.max_segments], padding=padding,
max_length=data_args.max_seg_length, truncation=True)
batch['input_ids'].append(doc_encodings['input_ids'] + case_template * (
data_args.max_segments - len(doc_encodings['input_ids'])))
batch['attention_mask'].append(doc_encodings['attention_mask'] + case_template * (
data_args.max_segments - len(doc_encodings['attention_mask'])))
batch['token_type_ids'].append(doc_encodings['token_type_ids'] + case_template * (
data_args.max_segments - len(doc_encodings['token_type_ids'])))
else:
# Tokenize the texts
batch = tokenizer(
examples["text"],
padding=padding,
max_length=data_args.max_seq_length,
truncation=True,
)
# batch["labels"] = [[1 if labels == label else 0 for label in label_list] for labels in examples["class"]]
batch["labels"] = [[0 if labels == label else 1 for label in label_list] for labels in examples["class"]]
if model_args.concatenate_financial_features:
batch['financial_features'] = examples['financial']
return batch
# If training, apply the preprocessing and log a few random samples
if training_args.do_train:
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# If evaluating, apply the preprocessing and log a few random samples
if training_args.do_eval:
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
# If predicting, apply the preprocessing and log a few random samples
if training_args.do_predict:
if data_args.max_predict_samples is not None:
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Trainer
if model_args.concatenate_financial_features: | trainer = FinancialTrainer( | 4 | 2023-11-22 18:46:55+00:00 | 12k |
barseghyanartur/fake.py | examples/customization/override_default_data.py | [
{
"identifier": "Factory",
"path": "fake.py",
"snippet": "class Factory:\n \"\"\"Factory.\"\"\"\n\n def __init__(self, faker: Optional[Faker] = None) -> None:\n # Directly use the setter to ensure provider methods are added\n self.faker = faker or FAKER\n\n @property\n def faker(self):\n return self._faker\n\n @faker.setter\n def faker(self, value):\n self._faker = value\n self._add_provider_methods(value)\n\n def _add_provider_methods(self, faker_instance):\n for class_name, methods in PROVIDER_REGISTRY.items():\n if (\n class_name == f\"{__name__}.{Faker.__name__}\"\n or class_name == self.faker.uid\n ):\n for method_name in methods:\n if hasattr(faker_instance, method_name):\n bound_method = create_factory_method(method_name)\n setattr(self, method_name, bound_method.__get__(self))"
},
{
"identifier": "Faker",
"path": "fake.py",
"snippet": "class Faker:\n \"\"\"fake.py - simplified, standalone alternative with no dependencies.\n\n ----\n\n Usage example:\n\n .. code-block:: python\n\n from fake import FAKER\n\n print(FAKER.first_name()) # Random first name\n print(FAKER.last_name()) # Random last name\n print(FAKER.name()) # Random name\n print(FAKER.word()) # Random word from the Zen of Python\n print(FAKER.words(nb=3)) # List of 3 random words from Zen of Python\n print(FAKER.sentence()) # Random sentence (5 random words by default)\n print(FAKER.paragraph()) # Paragraph (5 random sentences by default)\n print(FAKER.paragraphs()) # 3 random paragraphs\n print(FAKER.text()) # Random text up to 200 characters\n print(FAKER.file_name()) # Random filename with '.txt' extension\n print(FAKER.email()) # Random email\n print(FAKER.url()) # Random URL\n print(FAKER.pyint()) # Random integer\n print(FAKER.pybool()) # Random boolean\n print(FAKER.pystr()) # Random string\n print(FAKER.pyfloat()) # Random float\n\n ----\n\n PDF:\n\n .. code-block:: python\n\n from pathlib import Path\n from fake import FAKER, TextPdfGenerator, GraphicPdfGenerator\n\n Path(\"/tmp/graphic_pdf.pdf\").write_bytes(\n FAKER.pdf(nb_pages=100, generator=GraphicPdfGenerator)\n )\n\n Path(\"/tmp/text_pdf.pdf\").write_bytes(\n FAKER.pdf(nb_pages=100, generator=TextPdfGenerator)\n )\n\n ----\n\n Various image formats:\n\n .. code-block:: python\n\n from pathlib import Path\n from fake import FAKER\n\n Path(\"/tmp/image.png\").write_bytes(FAKER.png())\n\n Path(\"/tmp/image.svg\").write_bytes(FAKER.svg())\n\n Path(\"/tmp/image.bmp\").write_bytes(FAKER.bmp())\n\n Path(\"/tmp/image.gif\").write_bytes(FAKER.gif())\n\n Note, that all image formats accept `size` (default: `(100, 100)`)\n and `color`(default: `(255, 0, 0)`) arguments.\n \"\"\"\n\n def __init__(self, alias: Optional[str] = None) -> None:\n self._words: List[str] = []\n self._first_names: List[str] = []\n self._last_names: List[str] = []\n\n self.uid = f\"{self.__class__.__module__}.{self.__class__.__name__}\"\n if alias and alias in ALIAS_REGISTRY:\n LOGGER.warning(\n f\"Alias '{alias}' already registered. \"\n f\"Using '{self.uid}' as alias instead.\"\n )\n alias = None\n\n self.alias = alias or self.uid\n if self.uid not in UID_REGISTRY:\n UID_REGISTRY[self.uid] = self\n if self.alias not in ALIAS_REGISTRY:\n ALIAS_REGISTRY[self.alias] = self\n\n self.load_words()\n self.load_names()\n\n @staticmethod\n def get_by_uid(uid: str) -> Union[\"Faker\", None]:\n return UID_REGISTRY.get(uid, None)\n\n @staticmethod\n def get_by_alias(alias: str) -> Union[\"Faker\", None]:\n return ALIAS_REGISTRY.get(alias, None)\n\n def load_words(self) -> None:\n with contextlib.redirect_stdout(io.StringIO()):\n # Dynamically import 'this' module\n this = __import__(\"this\")\n\n zen_encoded: str = this.s\n translation_map: Dict[str, str] = {v: k for k, v in this.d.items()}\n zen: str = self._rot13_translate(zen_encoded, translation_map)\n self._words = (\n zen.translate(str.maketrans(\"\", \"\", string.punctuation))\n .lower()\n .split()\n )\n\n def load_names(self) -> None:\n authorship_data = AuthorshipData()\n self._first_names = list(authorship_data.first_names)\n self._last_names = list(authorship_data.last_names)\n\n @staticmethod\n def _rot13_translate(text: str, translation_map: Dict[str, str]) -> str:\n return \"\".join([translation_map.get(c, c) for c in text])\n\n @provider\n def uuid(self) -> UUID:\n return uuid.uuid4()\n\n @provider\n def uuids(self, nb: int = 5) -> List[UUID]:\n return [uuid.uuid4() for _ in range(nb)]\n\n @provider\n def first_name(self) -> str:\n return random.choice(self._first_names)\n\n @provider\n def first_names(self, nb: int = 5) -> List[str]:\n return [self.first_name() for _ in range(nb)]\n\n @provider\n def last_name(self) -> str:\n return random.choice(self._last_names)\n\n @provider\n def last_names(self, nb: int = 5) -> List[str]:\n return [self.last_name() for _ in range(nb)]\n\n @provider\n def name(self) -> str:\n return f\"{self.first_name()} {self.last_name()}\"\n\n @provider\n def names(self, nb: int = 5) -> List[str]:\n return [self.name() for _ in range(nb)]\n\n @provider\n def username(self) -> str:\n return (\n f\"{self.word()}_{self.word()}_{self.word()}_{self.pystr()}\"\n ).lower()\n\n @provider\n def usernames(self, nb: int = 5) -> List[str]:\n return [self.username() for _ in range(nb)]\n\n @provider\n def slug(self) -> str:\n return (\n f\"{self.word()}-{self.word()}-{self.word()}-{self.pystr()}\"\n ).lower()\n\n @provider\n def slugs(self, nb: int = 5) -> List[str]:\n return [self.slug() for _ in range(nb)]\n\n @provider\n def word(self) -> str:\n return random.choice(self._words).capitalize()\n\n @provider\n def words(self, nb: int = 5) -> List[str]:\n return [word.capitalize() for word in random.choices(self._words, k=nb)]\n\n @provider\n def sentence(self, nb_words: int = 5) -> str:\n return (\n f\"{' '.join([self.word() for _ in range(nb_words)]).capitalize()}.\"\n )\n\n @provider\n def sentences(self, nb: int = 3) -> List[str]:\n return [self.sentence() for _ in range(nb)]\n\n @provider\n def paragraph(self, nb_sentences: int = 5) -> str:\n return \" \".join([self.sentence() for _ in range(nb_sentences)])\n\n @provider\n def paragraphs(self, nb: int = 3) -> List[str]:\n return [self.paragraph() for _ in range(nb)]\n\n @provider\n def text(self, nb_chars: int = 200) -> str:\n current_text: str = \"\"\n while len(current_text) < nb_chars:\n sentence: str = self.sentence()\n current_text += f\" {sentence}\" if current_text else sentence\n return current_text[:nb_chars]\n\n @provider\n def texts(self, nb: int = 3) -> List[str]:\n return [self.text() for _ in range(nb)]\n\n @provider\n def file_name(self, extension: str = \"txt\") -> str:\n with NamedTemporaryFile(suffix=f\".{extension}\") as temp_file:\n return temp_file.name\n\n @provider\n def email(self, domain: str = \"example.com\") -> str:\n if not domain:\n domain = \"example.com\"\n return f\"{self.word().lower()}@{domain}\"\n\n @provider\n def url(\n self,\n protocols: Optional[Tuple[str]] = None,\n tlds: Optional[Tuple[str]] = None,\n suffixes: Optional[Tuple[str]] = None,\n ) -> str:\n protocol = random.choice(protocols or (\"http\", \"https\"))\n domain = self.word().lower()\n tld = random.choice(\n tlds\n or (\n \"com\",\n \"org\",\n \"net\",\n \"io\",\n )\n )\n suffix = random.choice(suffixes or (\".html\", \".php\", \".go\", \"\", \"/\"))\n return f\"{protocol}://{domain}.{tld}/{self.word().lower()}{suffix}\"\n\n @provider\n def image_url(\n self,\n width: int = 800,\n height: int = 600,\n service_url: Optional[str] = None,\n ) -> str:\n \"\"\"Image URL.\"\"\"\n if service_url is None:\n service_url = random.choice(IMAGE_SERVICES)\n return service_url.format(width=width, height=height)\n\n @provider\n def pyint(self, min_value: int = 0, max_value: int = 9999) -> int:\n return random.randint(min_value, max_value)\n\n @provider\n def pybool(self) -> bool:\n return random.choice(\n (\n True,\n False,\n )\n )\n\n @provider\n def pystr(self, nb_chars: int = 20) -> str:\n return \"\".join(random.choices(string.ascii_letters, k=nb_chars))\n\n @provider\n def pyfloat(\n self,\n min_value: float = 0.0,\n max_value: float = 10.0,\n ) -> float:\n return random.uniform(min_value, max_value)\n\n @provider\n def pydecimal(\n self,\n left_digits: int = 5,\n right_digits: int = 2,\n positive: bool = True,\n ) -> Decimal:\n \"\"\"Generate a random Decimal number.\n\n :param left_digits: Number of digits to the left of the decimal point.\n :param right_digits: Number of digits to the right of the decimal point.\n :param positive: If True, the number will be positive, otherwise it\n can be negative.\n :return: A randomly generated Decimal number.\n \"\"\"\n if left_digits < 0:\n raise ValueError(\"`left_digits` must be at least 0\")\n if right_digits < 0:\n raise ValueError(\"`right_digits` must be at least 0\")\n\n if left_digits > 0:\n # Generate the integer part\n __lower = 10 ** (left_digits - 1)\n __upper = (10**left_digits) - 1\n int_part = random.randint(__lower, __upper)\n else:\n int_part = 0\n\n if right_digits > 0:\n # Generate the fractional part\n __lower = 10 ** (right_digits - 1)\n __upper = (10**right_digits) - 1\n fractional_part = random.randint(__lower, __upper)\n else:\n fractional_part = 0\n\n # Combine both parts\n number = Decimal(f\"{int_part}.{fractional_part}\")\n\n # Make the number negative if needed\n if not positive:\n number = -number\n\n return number\n\n @provider\n def ipv4(self) -> str:\n return \".\".join(str(random.randint(0, 255)) for _ in range(4))\n\n def _parse_date_string(\n self, date_str: str, tzinfo=timezone.utc\n ) -> datetime:\n \"\"\"Parse date string with notation below into a datetime object:\n\n - '5M': 5 minutes from now\n - '-1d': 1 day ago\n - '-1H': 1 hour ago\n - '-365d': 365 days ago\n\n :param date_str: The date string with shorthand notation.\n :return: A datetime object representing the time offset.\n \"\"\"\n if date_str in [\"now\", \"today\"]:\n return datetime.now(tzinfo)\n\n match = re.match(r\"([+-]?\\d+)([dHM])\", date_str)\n if not match:\n raise ValueError(\n \"Date string format is incorrect. Expected formats like \"\n \"'-1d', '+2H', '-30M'.\"\n )\n value, unit = match.groups()\n value = int(value)\n if unit == \"d\": # Days\n return datetime.now(tzinfo) + timedelta(days=value)\n elif unit == \"H\": # Hours\n return datetime.now(tzinfo) + timedelta(hours=value)\n\n # Otherwise it's minutes\n return datetime.now(tzinfo) + timedelta(minutes=value)\n\n @provider\n def date(\n self,\n start_date: str = \"-7d\",\n end_date: str = \"+0d\",\n tzinfo=timezone.utc,\n ) -> date:\n \"\"\"Generate random date between `start_date` and `end_date`.\n\n :param start_date: The start date from which the random date should\n be generated in the shorthand notation.\n :param end_date: The end date up to which the random date should be\n generated in the shorthand notation.\n :param tzinfo: The timezone.\n :return: A string representing the formatted date.\n \"\"\"\n start_datetime = self._parse_date_string(start_date, tzinfo)\n end_datetime = self._parse_date_string(end_date, tzinfo)\n time_between_dates = (end_datetime - start_datetime).days\n random_days = random.randrange(\n time_between_dates + 1\n ) # Include the end date\n random_date = start_datetime + timedelta(days=random_days)\n return random_date.date()\n\n @provider\n def date_time(\n self,\n start_date: str = \"-7d\",\n end_date: str = \"+0d\",\n tzinfo=timezone.utc,\n ) -> datetime:\n \"\"\"Generate a random datetime between `start_date` and `end_date`.\n\n :param start_date: The start datetime from which the random datetime\n should be generated in the shorthand notation.\n :param end_date: The end datetime up to which the random datetime\n should be generated in the shorthand notation.\n :param tzinfo: The timezone.\n :return: A string representing the formatted datetime.\n \"\"\"\n start_datetime = self._parse_date_string(start_date, tzinfo)\n end_datetime = self._parse_date_string(end_date, tzinfo)\n time_between_datetimes = int(\n (end_datetime - start_datetime).total_seconds()\n )\n random_seconds = random.randrange(\n time_between_datetimes + 1\n ) # Include the end date time\n random_date_time = start_datetime + timedelta(seconds=random_seconds)\n return random_date_time\n\n @provider\n def pdf(\n self,\n nb_pages: int = 1,\n generator: Union[\n Type[TextPdfGenerator], Type[GraphicPdfGenerator]\n ] = GraphicPdfGenerator,\n metadata: Optional[MetaData] = None,\n **kwargs,\n ) -> bytes:\n \"\"\"Create a PDF document of a given size.\"\"\"\n _pdf = generator(faker=self)\n return _pdf.create(nb_pages=nb_pages, metadata=metadata, **kwargs)\n\n @provider\n def png(\n self,\n size: Tuple[int, int] = (100, 100),\n color: Tuple[int, int, int] = (0, 0, 255),\n ) -> bytes:\n \"\"\"Create a PNG image of a specified color.\n\n :param size: Tuple of width and height of the image in pixels.\n :param color: Color of the image in RGB format (tuple of three\n integers).\n :return: Byte content of the PNG image.\n \"\"\"\n width, height = size\n\n # PNG file format header\n png_header = b\"\\x89PNG\\r\\n\\x1a\\n\"\n\n # IHDR chunk: width, height, bit depth, color type, compression,\n # filter, interlace\n ihdr_content = (\n width.to_bytes(4, byteorder=\"big\")\n + height.to_bytes(4, byteorder=\"big\")\n + b\"\\x08\\x02\\x00\\x00\\x00\"\n )\n ihdr = b\"IHDR\" + ihdr_content\n ihdr_chunk = (\n len(ihdr_content).to_bytes(4, byteorder=\"big\")\n + ihdr\n + zlib.crc32(ihdr).to_bytes(4, byteorder=\"big\")\n )\n\n # IDAT chunk: image data\n raw_data = (\n b\"\\x00\" + bytes(color) * width\n ) # No filter, and RGB data for each pixel\n compressed_data = zlib.compress(raw_data * height) # Compress the data\n idat_chunk = (\n len(compressed_data).to_bytes(4, byteorder=\"big\")\n + b\"IDAT\"\n + compressed_data\n + zlib.crc32(b\"IDAT\" + compressed_data).to_bytes(\n length=4,\n byteorder=\"big\",\n )\n )\n\n # IEND chunk: marks the image end\n iend_chunk = b\"\\x00\\x00\\x00\\x00IEND\\xAE\\x42\\x60\\x82\"\n\n # Combine all chunks\n png_data = png_header + ihdr_chunk + idat_chunk + iend_chunk\n\n return png_data\n\n @provider\n def svg(\n self,\n size: Tuple[int, int] = (100, 100),\n color: Tuple[int, int, int] = (0, 0, 255),\n ) -> bytes:\n \"\"\"Create a SVG image of a specified color.\n\n :param size: Tuple of width and height of the image in pixels.\n :param color: Color of the image in RGB format (tuple of three\n integers).\n :return: Byte content of the SVG image.\n \"\"\"\n width, height = size\n return SVG_TPL.format(width=width, height=height, color=color).encode()\n\n @provider\n def bmp(\n self,\n size: Tuple[int, int] = (100, 100),\n color: Tuple[int, int, int] = (0, 0, 255),\n ) -> bytes:\n \"\"\"Create a BMP image of a specified color.\n\n :param size: Tuple of width and height of the image in pixels.\n :param color: Color of the image in RGB format (tuple of three\n integers).\n :return: Byte content of the BMP image.\n \"\"\"\n width, height = size\n\n # BMP Header and DIB Header (BITMAPINFOHEADER format)\n file_header = b\"BM\" # Signature\n dib_header = b\"\\x28\\x00\\x00\\x00\" # DIB Header size (40 bytes)\n\n # Image width and height\n width_bytes = width.to_bytes(4, byteorder=\"little\")\n height_bytes = height.to_bytes(4, byteorder=\"little\")\n\n # Image pixel data\n # BMP files are padded to be a multiple of 4 bytes wide\n row_padding = (4 - (3 * width) % 4) % 4\n pixel_data = bytes(color[::-1]) * width + b\"\\x00\" * row_padding\n image_data = pixel_data * height\n\n # File size\n file_size = (\n 14 + 40 + len(image_data)\n ) # 14 bytes file header, 40 bytes DIB header\n file_size_bytes = file_size.to_bytes(4, byteorder=\"little\")\n\n # Final assembly of the BMP file\n return (\n file_header\n + file_size_bytes\n + b\"\\x00\\x00\\x00\\x00\"\n + b\"\\x36\\x00\\x00\\x00\" # Reserved 4 bytes\n # Pixel data offset (54 bytes: 14 for file header, 40 for DIB\n # header)\n + dib_header\n + width_bytes\n + height_bytes\n + b\"\\x01\\x00\"\n + b\"\\x18\\x00\" # Number of color planes\n + b\"\\x00\\x00\\x00\\x00\" # Bits per pixel (24 for RGB)\n + len(image_data).to_bytes( # Compression method (0 for none)\n 4, byteorder=\"little\"\n )\n + b\"\\x13\\x0B\\x00\\x00\" # Size of the raw bitmap data\n # Print resolution of the image (2835 pixels/meter)\n + b\"\\x13\\x0B\\x00\\x00\"\n + b\"\\x00\\x00\\x00\\x00\"\n + b\"\\x00\\x00\\x00\\x00\" # Number of colors in the palette\n + image_data # Important colors\n )\n\n @provider\n def gif(\n self,\n size: Tuple[int, int] = (100, 100),\n color: Tuple[int, int, int] = (0, 0, 255),\n ) -> bytes:\n \"\"\"Create a GIF image of a specified color.\n\n :param size: Tuple of width and height of the image in pixels.\n :param color: Color of the image in RGB format (tuple of three\n integers).\n :return: Byte content of the GIF image.\n \"\"\"\n width, height = size\n\n # Header\n header = b\"GIF89a\"\n\n # Logical Screen Descriptor\n screen_width = width.to_bytes(2, byteorder=\"little\")\n screen_height = height.to_bytes(2, byteorder=\"little\")\n # Global Color Table Flag set to 1, Color resolution, and Sort Flag\n # to 0\n packed_field = b\"\\xF7\"\n bg_color_index = b\"\\x00\" # Background Color Index\n pixel_aspect_ratio = b\"\\x00\" # No aspect ratio information\n\n # Global Color Table.\n # Since it's a single color, we only need one entry in our table,\n # rest are black.\n # Each color is 3 bytes (RGB).\n color_table = bytes(color) + b\"\\x00\" * (3 * 255)\n\n # Image Descriptor\n image_descriptor = (\n b\"\\x2C\"\n + b\"\\x00\\x00\\x00\\x00\"\n + screen_width\n + screen_height\n + b\"\\x00\"\n )\n\n # Image Data\n lzw_min_code_size = b\"\\x08\" # Set to 8 for no compression\n\n # Image Data Blocks for a single color.\n # Simplest LZW encoding for a single color: clear code, followed\n # by color index, end code.\n image_data_blocks = bytearray(\n [0x02, 0x4C, 0x01, 0x00]\n ) # Compressed data\n\n # Footer\n footer = b\"\\x3B\"\n\n # Combine all parts\n return (\n header\n + screen_width\n + screen_height\n + packed_field\n + bg_color_index\n + pixel_aspect_ratio\n + color_table\n + image_descriptor\n + lzw_min_code_size\n + image_data_blocks\n + footer\n )\n\n @provider\n def image(\n self,\n image_format: Literal[\"png\", \"svg\", \"bmp\", \"gif\"] = \"png\",\n size: Tuple[int, int] = (100, 100),\n color: Tuple[int, int, int] = (0, 0, 255),\n ) -> bytes:\n if image_format not in {\"png\", \"svg\", \"bmp\", \"gif\"}:\n raise ValueError()\n image_func = getattr(self, image_format)\n return image_func(size=size, color=color)\n\n @provider\n def docx(\n self,\n nb_pages: Optional[int] = 1,\n texts: Optional[List[str]] = None,\n metadata: Optional[MetaData] = None,\n ) -> bytes:\n _docx = DocxGenerator(faker=self)\n return _docx.create(nb_pages=nb_pages, texts=texts, metadata=metadata)\n\n @provider\n def pdf_file(\n self,\n nb_pages: int = 1,\n generator: Union[\n Type[TextPdfGenerator], Type[GraphicPdfGenerator]\n ] = GraphicPdfGenerator,\n storage: Optional[BaseStorage] = None,\n basename: Optional[str] = None,\n prefix: Optional[str] = None,\n **kwargs,\n ) -> StringValue:\n if storage is None:\n storage = FileSystemStorage()\n filename = storage.generate_filename(\n extension=\"pdf\",\n prefix=prefix,\n basename=basename,\n )\n metadata = MetaData()\n data = self.pdf(\n nb_pages=nb_pages, generator=generator, metadata=metadata, **kwargs\n )\n storage.write_bytes(filename=filename, data=data)\n file = StringValue(storage.relpath(filename))\n file.data = {\n \"storage\": storage,\n \"filename\": filename,\n \"content\": metadata.content,\n }\n FILE_REGISTRY.add(file)\n return file\n\n def _image_file(\n self,\n extension: str,\n size: Tuple[int, int] = (100, 100),\n color: Tuple[int, int, int] = (0, 0, 255),\n storage: Optional[BaseStorage] = None,\n basename: Optional[str] = None,\n prefix: Optional[str] = None,\n ) -> StringValue:\n if storage is None:\n storage = FileSystemStorage()\n filename = storage.generate_filename(\n extension=extension,\n prefix=prefix,\n basename=basename,\n )\n data = self.png(size=size, color=color)\n storage.write_bytes(filename=filename, data=data)\n file = StringValue(storage.relpath(filename))\n file.data = {\"storage\": storage, \"filename\": filename}\n FILE_REGISTRY.add(file)\n return file\n\n @provider\n def png_file(\n self,\n size: Tuple[int, int] = (100, 100),\n color: Tuple[int, int, int] = (0, 0, 255),\n storage: Optional[BaseStorage] = None,\n basename: Optional[str] = None,\n prefix: Optional[str] = None,\n ) -> StringValue:\n return self._image_file(\n extension=\"png\",\n size=size,\n color=color,\n storage=storage,\n basename=basename,\n prefix=prefix,\n )\n\n @provider\n def svg_file(\n self,\n size: Tuple[int, int] = (100, 100),\n color: Tuple[int, int, int] = (0, 0, 255),\n storage: Optional[BaseStorage] = None,\n basename: Optional[str] = None,\n prefix: Optional[str] = None,\n ) -> StringValue:\n return self._image_file(\n extension=\"svg\",\n size=size,\n color=color,\n storage=storage,\n basename=basename,\n prefix=prefix,\n )\n\n @provider\n def bmp_file(\n self,\n size: Tuple[int, int] = (100, 100),\n color: Tuple[int, int, int] = (0, 0, 255),\n storage: Optional[BaseStorage] = None,\n basename: Optional[str] = None,\n prefix: Optional[str] = None,\n ) -> StringValue:\n return self._image_file(\n extension=\"bmp\",\n size=size,\n color=color,\n storage=storage,\n basename=basename,\n prefix=prefix,\n )\n\n @provider\n def gif_file(\n self,\n size: Tuple[int, int] = (100, 100),\n color: Tuple[int, int, int] = (0, 0, 255),\n storage: Optional[BaseStorage] = None,\n basename: Optional[str] = None,\n prefix: Optional[str] = None,\n ) -> StringValue:\n return self._image_file(\n extension=\"gif\",\n size=size,\n color=color,\n storage=storage,\n basename=basename,\n prefix=prefix,\n )\n\n @provider\n def docx_file(\n self,\n nb_pages: int = 1,\n texts: Optional[List[str]] = None,\n storage: Optional[BaseStorage] = None,\n basename: Optional[str] = None,\n prefix: Optional[str] = None,\n ) -> StringValue:\n if storage is None:\n storage = FileSystemStorage()\n filename = storage.generate_filename(\n extension=\"docx\",\n prefix=prefix,\n basename=basename,\n )\n metadata = MetaData()\n data = self.docx(texts=texts, metadata=metadata)\n storage.write_bytes(filename=filename, data=data)\n file = StringValue(storage.relpath(filename))\n file.data = {\n \"storage\": storage,\n \"filename\": filename,\n \"content\": metadata.content,\n }\n FILE_REGISTRY.add(file)\n return file\n\n @provider\n def txt_file(\n self,\n nb_chars: Optional[int] = 200,\n storage: Optional[BaseStorage] = None,\n basename: Optional[str] = None,\n prefix: Optional[str] = None,\n text: Optional[str] = None,\n ) -> StringValue:\n if storage is None:\n storage = FileSystemStorage()\n filename = storage.generate_filename(\n extension=\"txt\",\n prefix=prefix,\n basename=basename,\n )\n if not text:\n if not nb_chars:\n nb_chars = 200\n text = self.text(nb_chars=nb_chars)\n storage.write_text(filename=filename, data=text) # type: ignore\n file = StringValue(storage.relpath(filename))\n file.data = {\n \"storage\": storage,\n \"filename\": filename,\n \"content\": text,\n }\n FILE_REGISTRY.add(file)\n return file\n\n @provider\n def generic_file(\n self,\n content: Union[bytes, str],\n extension: str,\n storage: Optional[BaseStorage] = None,\n basename: Optional[str] = None,\n prefix: Optional[str] = None,\n ) -> StringValue:\n if storage is None:\n storage = FileSystemStorage()\n filename = storage.generate_filename(\n extension=extension,\n prefix=prefix,\n basename=basename,\n )\n\n if isinstance(content, bytes):\n storage.write_bytes(filename, content)\n else:\n storage.write_text(filename, content)\n\n file = StringValue(storage.relpath(filename))\n file.data = {\n \"content\": content,\n \"filename\": filename,\n \"storage\": storage,\n }\n FILE_REGISTRY.add(file)\n return file"
}
] | from fake import Factory, Faker
from data import FIRST_NAMES, LAST_NAMES, WORDS | 7,989 |
__author__ = "Artur Barseghyan <[email protected]>"
__copyright__ = "2023 Artur Barseghyan"
__license__ = "MIT"
__all__ = (
"FACTORY",
"FAKER",
)
class FakerOverrideDefaultData(Faker):
"""Faker class for custom names and words."""
def load_names(self) -> None:
"""Override default first- and last-names dictionaries."""
self._first_names = FIRST_NAMES
self._last_names = LAST_NAMES
def load_words(self) -> None:
"""Override default words dictionary."""
self._words = WORDS
FAKER = FakerOverrideDefaultData(alias="override_default_data")
|
__author__ = "Artur Barseghyan <[email protected]>"
__copyright__ = "2023 Artur Barseghyan"
__license__ = "MIT"
__all__ = (
"FACTORY",
"FAKER",
)
class FakerOverrideDefaultData(Faker):
"""Faker class for custom names and words."""
def load_names(self) -> None:
"""Override default first- and last-names dictionaries."""
self._first_names = FIRST_NAMES
self._last_names = LAST_NAMES
def load_words(self) -> None:
"""Override default words dictionary."""
self._words = WORDS
FAKER = FakerOverrideDefaultData(alias="override_default_data") | FACTORY = Factory(FAKER) | 0 | 2023-11-24 21:36:14+00:00 | 12k |
Yifei-Y/Openset-RCNN | openset_rcnn/modeling/roi_heads/osrcnn_roi_heads.py | [
{
"identifier": "OpensetFastRCNNOutputLayers",
"path": "openset_rcnn/modeling/roi_heads/osrcnn_fast_rcnn.py",
"snippet": "class OpensetFastRCNNOutputLayers(nn.Module):\n \"\"\"\n Two linear layers for predicting Fast R-CNN outputs:\n\n 1. proposal-to-detection box regression deltas\n 2. iou\n \"\"\"\n\n @configurable\n def __init__(\n self,\n input_shape: ShapeSpec,\n *,\n box2box_transform,\n num_classes: int,\n test_objectness_score_thresh: float = 0.0,\n test_nms_thresh: float = 0.5,\n test_topk_per_image: int = 100,\n mean_type: str = \"geometric\",\n cls_agnostic_bbox_reg: bool = False,\n box_smooth_l1_beta: float = 0.0,\n box_reg_loss_type: str = \"smooth_l1\",\n iou_smooth_l1_beta: float = 0.0,\n iou_reg_loss_type: str = \"smooth_l1\",\n loss_weight: Union[float, Dict[str, float]] = 1.0,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n input_shape (ShapeSpec): shape of the input feature to this module\n box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):\n num_classes (int): number of foreground classes\n test_objectness_score_thresh (float): threshold to filter predictions results.\n test_nms_thresh (float): NMS threshold for prediction results.\n test_topk_per_image (int): number of top predictions to produce per image.\n mean_type (str): Mean type of integrating centerness and IoU to get objectness.\n cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression\n box_smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if\n `box_reg_loss_type` is \"smooth_l1\"\n box_reg_loss_type (str): Box regression loss type. One of: \"smooth_l1\", \"giou\",\n \"diou\", \"ciou\"\n iou_smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if\n `iou_reg_loss_type` is \"smooth_l1\"\n iou_reg_loss_type (str): IoU regression loss type. Supported losses: \"smooth_l1\"\n loss_weight (float|dict): weights to use for losses. Can be single float for weighting\n all losses, or a dict of individual weightings. Valid dict keys are:\n * \"loss_box_reg\": applied to box regression loss\n * \"loss_iou_reg\": applied to iou regression loss\n \"\"\"\n super().__init__()\n if isinstance(input_shape, int): # some backward compatibility\n input_shape = ShapeSpec(channels=input_shape)\n self.num_classes = num_classes\n input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1)\n \n num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes\n box_dim = len(box2box_transform.weights)\n self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)\n self.iou_pred = nn.Linear(input_size, 1)\n\n nn.init.normal_(self.bbox_pred.weight, std=0.001)\n nn.init.normal_(self.iou_pred.weight, std=0.01)\n for l in [self.bbox_pred, self.iou_pred]:\n nn.init.constant_(l.bias, 0)\n\n self.box2box_transform = box2box_transform\n self.box_smooth_l1_beta = box_smooth_l1_beta\n self.test_objectness_score_thresh = test_objectness_score_thresh\n self.test_nms_thresh = test_nms_thresh\n self.test_topk_per_image = test_topk_per_image\n self.mean_type = mean_type\n self.box_reg_loss_type = box_reg_loss_type\n self.iou_smooth_l1_beta = iou_smooth_l1_beta\n self.iou_reg_loss_type = iou_reg_loss_type\n if isinstance(loss_weight, float):\n loss_weight = {\"loss_box_reg\": loss_weight, \"loss_iou_reg\": loss_weight}\n self.loss_weight = loss_weight\n\n @classmethod\n def from_config(cls, cfg, input_shape):\n return {\n \"input_shape\": input_shape,\n \"box2box_transform\": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS),\n # fmt: off\n \"num_classes\" : cfg.MODEL.ROI_HEADS.NUM_CLASSES,\n \"cls_agnostic_bbox_reg\" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,\n \"box_smooth_l1_beta\" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,\n \"test_objectness_score_thresh\" : cfg.MODEL.ROI_HEADS.OBJ_SCORE_THRESH_TEST,\n \"test_nms_thresh\" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,\n \"test_topk_per_image\" : cfg.TEST.DETECTIONS_PER_IMAGE,\n \"mean_type\" : cfg.MODEL.ROI_HEADS.MEAN_TYPE,\n \"box_reg_loss_type\" : cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE,\n \"iou_smooth_l1_beta\" : cfg.MODEL.ROI_BOX_HEAD.IOU_SMOOTH_L1_BETA,\n \"iou_reg_loss_type\" : cfg.MODEL.ROI_BOX_HEAD.IOU_REG_LOSS_TYPE,\n \"loss_weight\" : {\"loss_box_reg\": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT,\n \"loss_iou\": cfg.MODEL.ROI_BOX_HEAD.IOU_REG_LOSS_WEIGHT},\n # fmt: on\n }\n\n def forward(self, x):\n \"\"\"\n Args:\n x: per-region features of shape (N, ...) for N bounding boxes to predict.\n\n Returns:\n (Tensor, Tensor):\n First tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4),\n or (N,4) for class-agnostic regression.\n\n Second tensor: iou prediction for each box. Shape is (N, 1)\n \"\"\"\n if x.dim() > 2:\n x = torch.flatten(x, start_dim=1)\n proposal_deltas = self.bbox_pred(x)\n pred_iou = self.iou_pred(x).sigmoid()\n return proposal_deltas, pred_iou\n\n def losses(self, predictions, proposals):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were used\n to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,\n ``gt_classes``, ``ious`` are expected.\n\n Returns:\n Dict[str, Tensor]: dict of losses\n \"\"\"\n proposal_deltas, pred_iou = predictions\n # Tensor (#images*num_samples, 4) (#images*num_samples, 1)\n\n # parse classification outputs\n gt_classes = (\n cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)\n )\n gt_iou = (\n cat([p.ious for p in proposals], dim=0) if len(proposals) else torch.empty(0)\n )\n # Tensor: (images * num_samples)\n\n # parse box regression outputs\n if len(proposals):\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4\n assert not proposal_boxes.requires_grad, \"Proposals should not require gradients!\"\n # If \"gt_boxes\" does not exist, the proposals must be all negative and\n # should not be included in regression loss computation.\n # Here we just use proposal_boxes as an arbitrary placeholder because its\n # value won't be used in self.box_reg_loss().\n gt_boxes = cat(\n [(p.gt_boxes if p.has(\"gt_boxes\") else p.proposal_boxes).tensor for p in proposals],\n dim=0,\n )\n else:\n proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device) \n\n losses = {\n \"loss_box_reg\": self.box_reg_loss(\n proposal_boxes, gt_boxes, proposal_deltas, gt_classes\n ),\n \"loss_iou\": self.iou_loss(pred_iou, gt_iou, gt_classes)\n }\n return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}\n\n def box_reg_loss(self, proposal_boxes, gt_boxes, pred_deltas, gt_classes):\n \"\"\"\n Args:\n proposal_boxes/gt_boxes are tensors with the same shape (R, 4 or 5).\n pred_deltas has shape (R, 4 or 5), or (R, num_classes * (4 or 5)).\n gt_classes is a long tensor of shape R, the gt class label of each proposal.\n R shall be the number of proposals.\n \"\"\"\n box_dim = proposal_boxes.shape[1] # 4 or 5\n # Regression loss is only computed for foreground proposals (those matched to a GT)\n fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0]\n if pred_deltas.shape[1] == box_dim: # cls-agnostic regression\n fg_pred_deltas = pred_deltas[fg_inds]\n else:\n fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[\n fg_inds, gt_classes[fg_inds]\n ]\n\n loss_box_reg = _dense_box_regression_loss_w_iou(\n [proposal_boxes[fg_inds]],\n self.box2box_transform,\n [fg_pred_deltas.unsqueeze(0)],\n [gt_boxes[fg_inds]],\n ...,\n self.box_reg_loss_type,\n self.box_smooth_l1_beta,\n )\n\n # The reg loss is normalized using the total number of regions (R), not the number\n # of foreground regions even though the box regression loss is only defined on\n # foreground regions. Why? Because doing so gives equal training influence to\n # each foreground example. To see how, consider two different minibatches:\n # (1) Contains a single foreground region\n # (2) Contains 100 foreground regions\n # If we normalize by the number of foreground regions, the single example in\n # minibatch (1) will be given 100 times as much influence as each foreground\n # example in minibatch (2). Normalizing by the total number of regions, R,\n # means that the single example in minibatch (1) and each of the 100 examples\n # in minibatch (2) are given equal influence.\n return loss_box_reg / max(gt_classes.numel(), 1.0) # return 0 if empty\n\n def iou_loss(self, pred_iou, gt_iou, gt_classes):\n \"\"\"\n IoU regression loss.\n\n Args: \n pred_iou (Tensor): shape (#images * num_samples, 1), IoU prediction\n gt_iou (list[Tensor]): length #images list, element i is length num_samples Tensor containing the ground truth IoU\n gt_classes (Tensor): length #images * num_samples, the gt class label of each proposal\n \n Returns:\n Tensor: IoU loss\n \"\"\"\n fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0]\n fg_pred_iou = pred_iou.squeeze()[fg_inds]\n fg_gt_iou = gt_iou[fg_inds]\n loss_iou = smooth_l1_loss(fg_pred_iou, fg_gt_iou, beta=self.iou_smooth_l1_beta, reduction='sum')\n \n return loss_iou / max(gt_classes.numel(), 1.0)\n\n def inference(\n self,\n predictions: Tuple[torch.Tensor, torch.Tensor],\n proposals: List[Instances],\n box_features: torch.Tensor\n ):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were\n used to compute predictions. The ``proposal_boxes`` field is expected.\n box_features (Tensor): shape (#images * #proposals, feat_dims), per-region feature vector.\n\n Returns:\n see `find_top_proposals`\n \"\"\"\n boxes = self.predict_boxes(predictions, proposals)\n ious = self.predict_ious(predictions, proposals)\n image_shapes = [x.image_size for x in proposals]\n num_prop_per_image = [len(p) for p in proposals]\n box_features = box_features.split(num_prop_per_image)\n return fast_rcnn_inference(\n boxes,\n ious,\n image_shapes,\n box_features,\n self.test_objectness_score_thresh,\n self.test_nms_thresh,\n self.test_topk_per_image\n )\n\n def predict_boxes(\n self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]\n ):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were\n used to compute predictions. The ``proposal_boxes`` field is expected.\n\n Returns:\n list[Tensor]:\n A list of Tensors of predicted class-specific or class-agnostic boxes\n for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is\n the number of proposals for image i and B is the box dimension (4 or 5)\n \"\"\"\n if not len(proposals):\n return []\n proposal_deltas, _ = predictions\n num_prop_per_image = [len(p) for p in proposals]\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)\n predict_boxes = self.box2box_transform.apply_deltas(\n proposal_deltas,\n proposal_boxes,\n ) # Nx(KxB)\n return predict_boxes.split(num_prop_per_image)\n\n def predict_ious(\n self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]\n ):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were\n used to compute predictions.\n\n Returns:\n list[Tensor]:\n A list of Tensors of predicted IoUs for each image.\n Element i has shape (Ri, 1), where Ri is the number of proposals for image i.\n \"\"\"\n _, ious = predictions\n centerness = cat([p.objectness_logits for p in proposals]).unsqueeze(1)\n if self.mean_type == \"geometric\":\n scores = torch.sqrt(ious * centerness)\n if self.mean_type == \"arithmetic\":\n scores = (ious + centerness) / 2.0\n num_prop_per_image = [len(p) for p in proposals]\n return scores.split(num_prop_per_image)"
},
{
"identifier": "SoftMaxClassifier",
"path": "openset_rcnn/modeling/roi_heads/softmax_classifier.py",
"snippet": "class SoftMaxClassifier(nn.Module):\n \"\"\"\n Softmax classifier with one linear layer to classify the known objects.\n \"\"\"\n @configurable\n def __init__(\n self,\n num_classes: int,\n num_known_classes: int,\n dataset_name: str,\n opendet_benchmark: bool,\n input_size: int,\n known_score_thresh: float,\n known_nms_thresh: float,\n known_topk: int,\n unknown_score_thresh: float,\n unknown_nms_thresh: float,\n unknown_topk: int,\n cls_loss_weight: float\n ):\n \"\"\"\n Args:\n num_classes (int): number of foreground classes.\n num_known_classes (int): number of known foreground classes.\n dataset_name (str): name of training set.\n opendet_benchmark (bool): whether to use OpenDet benchmark.\n input_size (int): dim of input feature vector.\n known_score_thresh (float): threshold to filter known predictions results.\n known_nms_thresh (float): NMS threshold for known prediction results.\n known_topk (int): number of top known predictions to produce per image.\n unknown_score_thresh (float): threshold to filter unknown predictions results.\n unknown_nms_thresh (float): NMS threshold for unknown prediction results.\n unknown_topk (int): number of top unknown predictions to produce per image.\n cls_loss_weight (float): weights to use for classification loss.\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.num_known_classes = num_known_classes\n\n self.cls_score = nn.Linear(input_size, num_known_classes + 1)\n nn.init.normal_(self.cls_score.weight, std=0.01)\n nn.init.constant_(self.cls_score.bias, 0)\n\n self.known_score_thresh = known_score_thresh\n self.known_nms_thresh = known_nms_thresh\n self.known_topk = known_topk\n self.unknown_score_thresh = unknown_score_thresh\n self.unknown_nms_thresh = unknown_nms_thresh\n self.unknown_topk = unknown_topk\n self.cls_loss_weight = cls_loss_weight\n\n self.opendet_benchmark = opendet_benchmark\n\n if self.opendet_benchmark:\n self.id_map = torch.zeros(self.num_classes+1, device='cuda') - 1\n for i in range(self.num_known_classes):\n self.id_map[i] = torch.tensor(i, device='cuda')\n self.id_map[self.num_classes] = torch.tensor(self.num_known_classes, device='cuda')\n\n self.id_map = self.id_map.long()\n else:\n meta = MetadataCatalog.get(dataset_name)\n self.class_id, _ = torch.sort(\n torch.tensor(\n [meta.thing_dataset_id_to_contiguous_id[thing_id] for thing_id in GRASPNET_KNOWN_IDS], \n device='cuda'\n )\n )\n\n self.id_map = torch.zeros(self.num_classes+1, device='cuda') - 1\n for i, v in enumerate(self.class_id):\n self.id_map[v] = torch.tensor(i, device='cuda')\n self.id_map[self.num_classes] = torch.tensor(self.num_known_classes, device='cuda')\n\n self.class_id = self.class_id.long()\n self.id_map = self.id_map.long()\n \n @classmethod\n def from_config(cls, cfg):\n return {\n \"num_classes\" : cfg.MODEL.ROI_HEADS.NUM_CLASSES,\n \"num_known_classes\" : cfg.MODEL.ROI_HEADS.NUM_KNOWN_CLASSES,\n \"dataset_name\" : cfg.DATASETS.TRAIN[0],\n \"opendet_benchmark\" : cfg.OPENDET_BENCHMARK,\n # sm-de\n \"input_size\" : cfg.MODEL.ROI_BOX_HEAD.FC_DIM,\n # # sm-de-rec, sm\n \"known_score_thresh\" : cfg.MODEL.ROI_HEADS.KNOWN_SCORE_THRESH,\n \"known_nms_thresh\" : cfg.MODEL.ROI_HEADS.KNOWN_NMS_THRESH,\n \"known_topk\" : cfg.MODEL.ROI_HEADS.KNOWN_TOPK,\n \"unknown_score_thresh\" : cfg.MODEL.ROI_HEADS.UNKNOWN_SCORE_THRESH,\n \"unknown_nms_thresh\" : cfg.MODEL.ROI_HEADS.UNKNOWN_NMS_THRESH,\n \"unknown_topk\" : cfg.MODEL.ROI_HEADS.UNKNOWN_TOPK,\n \"cls_loss_weight\" : cfg.MODEL.ROI_BOX_HEAD.CLS_LOSS_WEIGHT\n }\n \n def loss(self, dml_features, proposals):\n \"\"\"\n Args:\n dml_features (Tensor): feature output from PLN.\n proposals (list[Instances]): proposals that match the features that were used\n to compute predictions. The fields ``gt_classes`` are expected.\n \n Returns:\n Tensor: classification loss\n \"\"\"\n scores = self.cls_score(dml_features)\n\n gt_classes = (\n cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)\n )\n _log_classification_stats(scores, self.id_map[gt_classes])\n\n loss = cross_entropy(scores, self.id_map[gt_classes], reduction=\"mean\")\n\n return self.cls_loss_weight * loss\n \n def inference(self, fg_instances: List[Instances]):\n \"\"\"\n Args:\n fg_instances (list[Instances]): A list of N instances, one for each image in the batch,\n that stores the top most confidence detections including pred_boxes (Boxes), \n pred_classes (Tensor), features (Tensor) and scores (Tensor).\n\n Returns:\n instances: (list[Instances]): A list of N instances, one for each image in the batch,\n that stores the top most confidence detections.\n \"\"\"\n results = []\n for fg_instances_per_image in fg_instances:\n if self.opendet_benchmark:\n known = fg_instances_per_image.pred_classes != 80\n else:\n known = fg_instances_per_image.pred_classes != 1000\n\n known_features = fg_instances_per_image.features[known]\n known_scores = self.cls_score(known_features)\n known_probs = F.softmax(known_scores, dim=-1)\n\n result_k = fast_rcnn_inference_single_image_known(\n fg_instances_per_image.pred_boxes[known].tensor,\n known_probs,\n fg_instances_per_image.image_size,\n self.known_score_thresh,\n self.known_nms_thresh,\n self.known_topk\n )\n if not known.all():\n result_unk = fast_rcnn_inference_single_image_unknown(\n fg_instances_per_image.pred_boxes[~known].tensor,\n fg_instances_per_image.scores[~known],\n fg_instances_per_image.image_size,\n self.unknown_score_thresh,\n self.unknown_nms_thresh,\n self.unknown_topk,\n self.opendet_benchmark\n )\n res = Instances(fg_instances_per_image.image_size)\n res.pred_boxes = Boxes.cat([result_unk.pred_boxes, \n result_k.pred_boxes])\n res.scores = cat([result_unk.scores, result_k.scores])\n if self.opendet_benchmark:\n res.pred_classes = cat([result_unk.pred_classes, result_k.pred_classes])\n else:\n res.pred_classes = cat([result_unk.pred_classes, self.class_id[result_k.pred_classes]])\n else:\n res = Instances(fg_instances_per_image.image_size)\n res.pred_boxes = result_k.pred_boxes\n res.scores = result_k.scores\n if self.opendet_benchmark:\n res.pred_classes = result_k.pred_classes\n else:\n res.pred_classes = self.class_id[result_k.pred_classes]\n \n results.append(res)\n \n return results"
},
{
"identifier": "PLN",
"path": "openset_rcnn/modeling/roi_heads/prototype_learning_network.py",
"snippet": "class PLN(nn.Module):\n \"\"\"\n Prototype Learning Network.\n \"\"\"\n @configurable\n def __init__(\n self,\n num_classes: int,\n num_known_classes: int,\n feature_dim: int,\n embedding_dim: int,\n distance_type: str,\n reps_per_class: int,\n alpha: float,\n beta: float,\n loss_weight: float,\n dataset_name: str,\n iou_threshold: float,\n unk_thr: float,\n opendet_benchmark: bool\n ):\n \"\"\"\n Args:\n num_classes (int): number of foreground classes.\n num_known_classes (int): number of known foreground classes.\n feature_dim (int): dim of RoI feature.\n embedding_dim (int): dim of embedding space in PLN.\n distance_type (str): the distance type used in PLN. Supported type: \"L1\", \"L2\", \"COS\".\n reps_per_class (int): number of representatives per foreground class.\n alpha (float): threshold of intra distance.\n beta (float): threshold of inter distance.\n loss_weight (float): weight to use for PLN loss.\n dataset_name (str): name of training set.\n iou_threshold (float): threshold to select foreground instances.\n unk_thr (float): threshold to differentiate unknown objects.\n opendet_benchmark (bool): whether to use OpenDet benchmark.\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.num_known_classes = num_known_classes\n self.feature_dim = feature_dim\n self.embedding_dim = embedding_dim\n self.distance_type = distance_type\n self.reps_per_class = reps_per_class\n self.alpha = alpha\n self.beta = beta\n self.loss_weight = loss_weight\n self.unk_thr = unk_thr\n self.opendet_benchmark = opendet_benchmark\n\n self.encoder = nn.Linear(self.feature_dim, self.embedding_dim, device='cuda')\n nn.init.normal_(self.encoder.weight, std=0.01)\n nn.init.constant_(self.encoder.bias, 0)\n\n self.decoder = nn.Linear(self.embedding_dim, self.feature_dim, device='cuda')\n nn.init.normal_(self.decoder.weight, std=0.01)\n nn.init.constant_(self.decoder.bias, 0)\n\n self.representatives = nn.parameter.Parameter(\n torch.zeros(self.num_known_classes * self.reps_per_class, self.embedding_dim)\n )\n nn.init.normal_(self.representatives)\n\n if not self.opendet_benchmark:\n meta = MetadataCatalog.get(dataset_name)\n self.class_id, _ = torch.sort(\n torch.tensor(\n [meta.thing_dataset_id_to_contiguous_id[thing_id] for thing_id in GRASPNET_KNOWN_IDS], \n device='cuda'\n )\n )\n\n self.id_map = torch.zeros(self.num_classes+1, device='cuda') - 1\n for i, v in enumerate(self.class_id):\n self.id_map[v] = torch.tensor(i, device='cuda')\n self.id_map[self.num_classes] = torch.tensor(self.num_known_classes, device='cuda')\n\n self.class_id = self.class_id.long()\n self.id_map = self.id_map.long()\n\n self.iou_threshold = iou_threshold\n\n @classmethod\n def from_config(cls, cfg):\n return {\n \"num_classes\": cfg.MODEL.ROI_HEADS.NUM_CLASSES,\n \"num_known_classes\": cfg.MODEL.ROI_HEADS.NUM_KNOWN_CLASSES,\n \"feature_dim\": cfg.MODEL.ROI_BOX_HEAD.FC_DIM,\n \"embedding_dim\": cfg.MODEL.PLN.EMD_DIM,\n \"distance_type\": cfg.MODEL.PLN.DISTANCE_TYPE,\n \"reps_per_class\": cfg.MODEL.PLN.REPS_PER_CLASS,\n \"alpha\": cfg.MODEL.PLN.ALPHA,\n \"beta\": cfg.MODEL.PLN.BETA,\n \"loss_weight\": cfg.MODEL.PLN.LOSS_WEIGHT, \n \"dataset_name\": cfg.DATASETS.TRAIN[0],\n \"iou_threshold\": cfg.MODEL.PLN.IOU_THRESHOLD,\n \"unk_thr\": cfg.MODEL.PLN.UNK_THR,\n \"opendet_benchmark\": cfg.OPENDET_BENCHMARK,\n }\n\n def loss(self, roi_features: torch.Tensor, proposals: List[Instances]):\n \"\"\"\n PLN loss: L = y_ij * max(Dij-alpha,0) + (1-y_ij) * max(beta-Dij,0).\n\n Args:\n roi_features (Tensor): shape (#images * num_samples, feature_dim),\n features after ROI Align and FC.\n proposals (list[Instances]): the per-image object proposals with\n their matching ground truth.\n Each has fields \"proposal_boxes\", and \"objectness_logits\",\n \"gt_classes\", \"gt_boxes\".\n \n Returns:\n Tensor: PLN loss.\n \"\"\"\n # Tensor (images * num_samples, embedding_dim)\n emb_features = self.encoder(roi_features)\n new_features = F.normalize(emb_features)\n rec_features = self.decoder(emb_features)\n # Tensor (num_known_classes * reps_per_class, embedding_dim)\n representatives = F.normalize(self.representatives) \n \n ious = (\n cat([p.ious for p in proposals], dim=0) if len(proposals) else torch.empty(0)\n )\n\n gt_classes = (\n cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)\n )\n if not self.opendet_benchmark:\n gt_classes = self.id_map[gt_classes]\n \n fg_inds = nonzero_tuple(\n (gt_classes >= 0) & (gt_classes < self.num_known_classes) & (ious > self.iou_threshold)\n )[0]\n \n new_features = new_features[fg_inds]\n\n # Tensor (num_fg_samples, num_known_classes * reps_per_class)\n if self.distance_type == 'L1':\n dist = torch.cdist(new_features, representatives, p=1.0)\n elif self.distance_type == 'L2':\n dist = torch.cdist(new_features, representatives)\n elif self.distance_type == 'COS':\n dist = 1.0 - torch.mm(new_features, representatives.transpose(0,1))\n \n # Tensor (num_fg_samples, num_known_classes)\n min_dist, _ = torch.min(dist.reshape(-1, self.num_known_classes, self.reps_per_class), dim=2) \n # Tensor (num_fg_samples)\n intra_dist = min_dist[torch.arange(min_dist.shape[0]), gt_classes[fg_inds]] \n\n min_dist[torch.arange(min_dist.shape[0]), gt_classes[fg_inds]] = 1000\n inter_dist, _ = torch.min(min_dist, dim=1)\n\n if self.distance_type == 'L1':\n center_dist = torch.cdist(representatives, representatives, p=1.0)\n elif self.distance_type == 'L2':\n center_dist = torch.cdist(representatives, representatives)\n elif self.distance_type == 'COS':\n center_dist = 1.0 - torch.mm(representatives, representatives.transpose(0,1))\n\n center_dist_clone = center_dist.clone()\n for i in range(self.num_known_classes):\n center_dist_clone[i * self.reps_per_class:(i+1)*self.reps_per_class, i * self.reps_per_class:(i+1)*self.reps_per_class] = 1000\n c_dist, _ = torch.min(center_dist_clone, dim=1)\n\n dml_loss = torch.sum(torch.max(intra_dist-self.alpha, torch.zeros_like(intra_dist))) + \\\n torch.sum(torch.max(self.beta - inter_dist, torch.zeros_like(inter_dist))) + \\\n torch.sum(torch.max(self.beta + self.alpha - c_dist, torch.zeros_like(c_dist)))\n \n return emb_features, rec_features, dml_loss * self.loss_weight / max(gt_classes.numel(), 1.0)\n\n def inference(self, fg_instances: List[Instances]):\n \"\"\"\n Args:\n fg_instances (list[Instances]): A list of N instances, one for each image in the batch,\n that stores the topk most confidence detections including pred_boxes (Boxes), \n ious (Tensor) and features (Tensor).\n\n Returns: \n list[Instances]: add pred_classes to fg_instances, `num_classes+1` for unknown.\n \"\"\"\n representatives = F.normalize(self.representatives)\n\n results = []\n for fg_instances_per_image in fg_instances:\n features_per_image = fg_instances_per_image.features\n emb_features_per_image = self.encoder(features_per_image)\n rec_features_per_image = self.decoder(emb_features_per_image)\n new_features_per_image = F.normalize(emb_features_per_image)\n\n if self.distance_type == 'L1':\n dist = torch.cdist(new_features_per_image, representatives, p=1.0)\n elif self.distance_type == 'L2':\n dist = torch.cdist(new_features_per_image, representatives)\n elif self.distance_type == 'COS':\n dist = 1.0 - torch.mm(new_features_per_image, representatives.transpose(0,1))\n\n min_dist, _ = torch.min(dist.reshape(-1, self.num_known_classes, self.reps_per_class), dim=2) \n min_dist, min_index = torch.min(min_dist, dim=1)\n\n unknown = (min_dist > self.unk_thr).nonzero().squeeze()\n if self.opendet_benchmark:\n min_index[unknown] = 80\n else:\n min_index = self.class_id[min_index]\n min_index[unknown] = 1000\n\n fg_instances_per_image.features = rec_features_per_image\n fg_instances_per_image.pred_classes = min_index\n\n results.append(fg_instances_per_image)\n\n return results\n\n def encode(self, roi_features):\n new_features = F.normalize(self.encoder(roi_features))\n return new_features"
}
] | import inspect
import logging
import numpy as np
import torch
from typing import Dict, List, Optional, Tuple
from torch import nn
from detectron2.config import configurable
from detectron2.layers import ShapeSpec
from detectron2.structures import ImageList, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.modeling.roi_heads.roi_heads import ROIHeads, ROI_HEADS_REGISTRY
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.proposal_generator.proposal_utils import add_ground_truth_to_proposals
from detectron2.modeling.roi_heads.box_head import build_box_head
from .osrcnn_fast_rcnn import OpensetFastRCNNOutputLayers
from .softmax_classifier import SoftMaxClassifier
from .prototype_learning_network import PLN | 8,876 | # Copyright (c) Facebook, Inc. and its affiliates.
logger = logging.getLogger(__name__)
@ROI_HEADS_REGISTRY.register()
class OpensetROIHeads(ROIHeads):
"""
Openset RoI Head.
1. RoI Align and FC
2. * bbox regression
* iou prediction
* prototype learning
"""
@configurable
def __init__(
self,
*,
box_in_features: List[str],
box_pooler: ROIPooler,
box_head: nn.Module,
box_predictor: nn.Module,
dml: nn.Module,
softmaxcls: nn.Module,
train_on_pred_boxes: bool = False,
**kwargs,
):
"""
NOTE: this interface is experimental.
Args:
box_in_features (list[str]): list of feature names to use for the box head.
box_pooler (ROIPooler): pooler to extra region features for box head
box_head (nn.Module): transform features to make box predictions
box_predictor (nn.Module): make box predictions from the feature.
Should have the same interface as :class:`FastRCNNOutputLayers`.
dml: metric learning class.
softmaxcls: softmax classifier class.
train_on_pred_boxes (bool): whether to use proposal boxes or
predicted boxes from the box head to train other heads.
"""
super().__init__(**kwargs)
# keep self.in_features for backward compatibility
self.in_features = self.box_in_features = box_in_features
self.box_pooler = box_pooler
self.box_head = box_head
self.box_predictor = box_predictor
self.train_on_pred_boxes = train_on_pred_boxes
self.dml = dml
self.softmaxcls = softmaxcls
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg)
ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
# Subclasses that have not been updated to use from_config style construction
# may have overridden _init_*_head methods. In this case, those overridden methods
# will not be classmethods and we need to avoid trying to call them here.
# We test for this with ismethod which only returns True for bound methods of cls.
# Such subclasses will need to handle calling their overridden _init_*_head methods.
if inspect.ismethod(cls._init_box_head):
ret.update(cls._init_box_head(cfg, input_shape))
return ret
@classmethod
def _init_box_head(cls, cfg, input_shape):
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
# If StandardROIHeads is applied on multiple feature maps (as in FPN),
# then we share the same predictors and therefore the channel counts must be the same
in_channels = [input_shape[f].channels for f in in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
# Here we split "box head" and "box predictor", which is mainly due to historical reasons.
# They are used together so the "box predictor" layers should be part of the "box head".
# New subclasses of ROIHeads do not need "box predictor"s.
box_head = build_box_head(
cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
)
box_predictor = OpensetFastRCNNOutputLayers(cfg, box_head.output_shape)
| # Copyright (c) Facebook, Inc. and its affiliates.
logger = logging.getLogger(__name__)
@ROI_HEADS_REGISTRY.register()
class OpensetROIHeads(ROIHeads):
"""
Openset RoI Head.
1. RoI Align and FC
2. * bbox regression
* iou prediction
* prototype learning
"""
@configurable
def __init__(
self,
*,
box_in_features: List[str],
box_pooler: ROIPooler,
box_head: nn.Module,
box_predictor: nn.Module,
dml: nn.Module,
softmaxcls: nn.Module,
train_on_pred_boxes: bool = False,
**kwargs,
):
"""
NOTE: this interface is experimental.
Args:
box_in_features (list[str]): list of feature names to use for the box head.
box_pooler (ROIPooler): pooler to extra region features for box head
box_head (nn.Module): transform features to make box predictions
box_predictor (nn.Module): make box predictions from the feature.
Should have the same interface as :class:`FastRCNNOutputLayers`.
dml: metric learning class.
softmaxcls: softmax classifier class.
train_on_pred_boxes (bool): whether to use proposal boxes or
predicted boxes from the box head to train other heads.
"""
super().__init__(**kwargs)
# keep self.in_features for backward compatibility
self.in_features = self.box_in_features = box_in_features
self.box_pooler = box_pooler
self.box_head = box_head
self.box_predictor = box_predictor
self.train_on_pred_boxes = train_on_pred_boxes
self.dml = dml
self.softmaxcls = softmaxcls
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg)
ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
# Subclasses that have not been updated to use from_config style construction
# may have overridden _init_*_head methods. In this case, those overridden methods
# will not be classmethods and we need to avoid trying to call them here.
# We test for this with ismethod which only returns True for bound methods of cls.
# Such subclasses will need to handle calling their overridden _init_*_head methods.
if inspect.ismethod(cls._init_box_head):
ret.update(cls._init_box_head(cfg, input_shape))
return ret
@classmethod
def _init_box_head(cls, cfg, input_shape):
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
# If StandardROIHeads is applied on multiple feature maps (as in FPN),
# then we share the same predictors and therefore the channel counts must be the same
in_channels = [input_shape[f].channels for f in in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
# Here we split "box head" and "box predictor", which is mainly due to historical reasons.
# They are used together so the "box predictor" layers should be part of the "box head".
# New subclasses of ROIHeads do not need "box predictor"s.
box_head = build_box_head(
cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
)
box_predictor = OpensetFastRCNNOutputLayers(cfg, box_head.output_shape)
| dml = PLN(cfg) | 2 | 2023-11-21 01:47:01+00:00 | 12k |
MICLab-Unicamp/medpseg | medpseg/edet/modeling_efficientdet.py | [
{
"identifier": "SelfAttention",
"path": "medpseg/self_attention.py",
"snippet": "class SelfAttention(nn.Module):\n '''\n Spatial attention module, with 1x1 convolutions, idea from\n ASSESSING KNEE OA SEVERITY WITH CNN ATTENTION-BASED END-TO-END ARCHITECTURES\n '''\n def __init__(self, in_ch, dim):\n super().__init__()\n self.first_conv = getattr(nn, f\"Conv{dim}\")(in_ch, in_ch//2, kernel_size=1, padding=0, stride=1, bias=False)\n self.second_conv = getattr(nn, f\"Conv{dim}\")(in_ch//2, in_ch//4, kernel_size=1, padding=0, stride=1, bias=False)\n self.third_conv = getattr(nn, f\"Conv{dim}\")(in_ch//4, 1, kernel_size=1, padding=0, stride=1, bias=False)\n\n def forward(self, x):\n y = self.first_conv(x)\n y = F.leaky_relu(y, inplace=True)\n y = self.second_conv(y)\n y = F.leaky_relu(y, inplace=True)\n self.att = self.third_conv(y).sigmoid()\n return x*self.att"
},
{
"identifier": "MemoryEfficientSwish",
"path": "medpseg/edet/efficientnet/utils.py",
"snippet": "class MemoryEfficientSwish(nn.Module):\n def forward(self, x):\n return SwishImplementation.apply(x)"
},
{
"identifier": "BiFPN",
"path": "medpseg/edet/efficientdet/model.py",
"snippet": "class BiFPN(nn.Module):\n \"\"\"\n modified by Zylo117\n \"\"\"\n\n def __init__(self, num_channels, conv_channels, first_time=False, epsilon=1e-4, onnx_export=False, attention=True):\n \"\"\"\n\n Args:\n num_channels:\n conv_channels:\n first_time: whether the input comes directly from the efficientnet,\n if True, downchannel it first, and downsample P5 to generate P6 then P7\n epsilon: epsilon of fast weighted attention sum of BiFPN, not the BN's epsilon\n onnx_export: if True, use Swish instead of MemoryEfficientSwish\n \"\"\"\n super(BiFPN, self).__init__()\n self.epsilon = epsilon\n # Conv layers\n self.conv6_up = SeparableConvBlock(\n num_channels, onnx_export=onnx_export)\n self.conv5_up = SeparableConvBlock(\n num_channels, onnx_export=onnx_export)\n self.conv4_up = SeparableConvBlock(\n num_channels, onnx_export=onnx_export)\n self.conv3_up = SeparableConvBlock(\n num_channels, onnx_export=onnx_export)\n self.conv4_down = SeparableConvBlock(\n num_channels, onnx_export=onnx_export)\n self.conv5_down = SeparableConvBlock(\n num_channels, onnx_export=onnx_export)\n self.conv6_down = SeparableConvBlock(\n num_channels, onnx_export=onnx_export)\n self.conv7_down = SeparableConvBlock(\n num_channels, onnx_export=onnx_export)\n\n # Feature scaling layers\n self.p6_upsample = nn.Upsample(scale_factor=2, mode='nearest')\n self.p5_upsample = nn.Upsample(scale_factor=2, mode='nearest')\n self.p4_upsample = nn.Upsample(scale_factor=2, mode='nearest')\n self.p3_upsample = nn.Upsample(scale_factor=2, mode='nearest')\n\n self.p4_downsample = MaxPool2dStaticSamePadding(3, 2)\n self.p5_downsample = MaxPool2dStaticSamePadding(3, 2)\n self.p6_downsample = MaxPool2dStaticSamePadding(3, 2)\n self.p7_downsample = MaxPool2dStaticSamePadding(3, 2)\n\n self.swish = MemoryEfficientSwish() if not onnx_export else Swish()\n\n self.first_time = first_time\n if self.first_time:\n self.p5_down_channel = nn.Sequential(\n Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),\n nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),\n )\n self.p4_down_channel = nn.Sequential(\n Conv2dStaticSamePadding(conv_channels[1], num_channels, 1),\n nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),\n )\n self.p3_down_channel = nn.Sequential(\n Conv2dStaticSamePadding(conv_channels[0], num_channels, 1),\n nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),\n )\n\n self.p5_to_p6 = nn.Sequential(\n Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),\n nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),\n MaxPool2dStaticSamePadding(3, 2)\n )\n self.p6_to_p7 = nn.Sequential(\n MaxPool2dStaticSamePadding(3, 2)\n )\n\n self.p4_down_channel_2 = nn.Sequential(\n Conv2dStaticSamePadding(conv_channels[1], num_channels, 1),\n nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),\n )\n self.p5_down_channel_2 = nn.Sequential(\n Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),\n nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),\n )\n\n # Weight\n self.p6_w1 = nn.Parameter(torch.ones(\n 2, dtype=torch.float32), requires_grad=True)\n self.p6_w1_relu = nn.ReLU()\n self.p5_w1 = nn.Parameter(torch.ones(\n 2, dtype=torch.float32), requires_grad=True)\n self.p5_w1_relu = nn.ReLU()\n self.p4_w1 = nn.Parameter(torch.ones(\n 2, dtype=torch.float32), requires_grad=True)\n self.p4_w1_relu = nn.ReLU()\n self.p3_w1 = nn.Parameter(torch.ones(\n 2, dtype=torch.float32), requires_grad=True)\n self.p3_w1_relu = nn.ReLU()\n\n self.p4_w2 = nn.Parameter(torch.ones(\n 3, dtype=torch.float32), requires_grad=True)\n self.p4_w2_relu = nn.ReLU()\n self.p5_w2 = nn.Parameter(torch.ones(\n 3, dtype=torch.float32), requires_grad=True)\n self.p5_w2_relu = nn.ReLU()\n self.p6_w2 = nn.Parameter(torch.ones(\n 3, dtype=torch.float32), requires_grad=True)\n self.p6_w2_relu = nn.ReLU()\n self.p7_w2 = nn.Parameter(torch.ones(\n 2, dtype=torch.float32), requires_grad=True)\n self.p7_w2_relu = nn.ReLU()\n\n self.attention = attention\n\n def forward(self, inputs):\n \"\"\"\n illustration of a minimal bifpn unit\n P7_0 -------------------------> P7_2 -------->\n |-------------| ↑\n ↓ |\n P6_0 ---------> P6_1 ---------> P6_2 -------->\n |-------------|--------------↑ ↑\n ↓ |\n P5_0 ---------> P5_1 ---------> P5_2 -------->\n |-------------|--------------↑ ↑\n ↓ |\n P4_0 ---------> P4_1 ---------> P4_2 -------->\n |-------------|--------------↑ ↑\n |--------------↓ |\n P3_0 -------------------------> P3_2 -------->\n \"\"\"\n\n # downsample channels using same-padding conv2d to target phase's if not the same\n # judge: same phase as target,\n # if same, pass;\n # elif earlier phase, downsample to target phase's by pooling\n # elif later phase, upsample to target phase's by nearest interpolation\n if self.attention:\n p3_out, p4_out, p5_out, p6_out, p7_out = self._forward_fast_attention(\n inputs)\n else:\n p3_out, p4_out, p5_out, p6_out, p7_out = self._forward(inputs)\n\n return p3_out, p4_out, p5_out, p6_out, p7_out\n\n def _forward_fast_attention(self, inputs):\n if self.first_time:\n p3, p4, p5 = inputs\n\n p6_in = self.p5_to_p6(p5)\n p7_in = self.p6_to_p7(p6_in)\n\n p3_in = self.p3_down_channel(p3)\n p4_in = self.p4_down_channel(p4)\n p5_in = self.p5_down_channel(p5)\n\n else:\n # P3_0, P4_0, P5_0, P6_0 and P7_0\n p3_in, p4_in, p5_in, p6_in, p7_in = inputs\n\n # P7_0 to P7_2\n\n # Weights for P6_0 and P7_0 to P6_1\n p6_w1 = self.p6_w1_relu(self.p6_w1)\n weight = p6_w1 / (torch.sum(p6_w1, dim=0) + self.epsilon)\n # Connections for P6_0 and P7_0 to P6_1 respectively\n p6_up = self.conv6_up(self.swish(weight[0] * p6_in + weight[1] * self.p6_upsample(p7_in)))\n\n # Weights for P5_0 and P6_1 to P5_1\n p5_w1 = self.p5_w1_relu(self.p5_w1)\n weight = p5_w1 / (torch.sum(p5_w1, dim=0) + self.epsilon)\n # Connections for P5_0 and P6_0 to P5_1 respectively\n p5_up = self.conv5_up(self.swish(weight[0] * p5_in + weight[1] * self.p5_upsample(p6_up)))\n\n # Weights for P4_0 and P5_1 to P4_1\n p4_w1 = self.p4_w1_relu(self.p4_w1)\n weight = p4_w1 / (torch.sum(p4_w1, dim=0) + self.epsilon)\n # Connections for P4_0 and P5_0 to P4_1 respectively\n p4_up = self.conv4_up(self.swish(weight[0] * p4_in + weight[1] * self.p4_upsample(p5_up)))\n\n # Weights for P3_0 and P4_1 to P3_2\n p3_w1 = self.p3_w1_relu(self.p3_w1)\n weight = p3_w1 / (torch.sum(p3_w1, dim=0) + self.epsilon)\n # Connections for P3_0 and P4_1 to P3_2 respectively\n p3_out = self.conv3_up(self.swish(weight[0] * p3_in + weight[1] * self.p3_upsample(p4_up)))\n\n if self.first_time:\n p4_in = self.p4_down_channel_2(p4)\n p5_in = self.p5_down_channel_2(p5)\n\n # Weights for P4_0, P4_1 and P3_2 to P4_2\n p4_w2 = self.p4_w2_relu(self.p4_w2)\n weight = p4_w2 / (torch.sum(p4_w2, dim=0) + self.epsilon)\n # Connections for P4_0, P4_1 and P3_2 to P4_2 respectively\n p4_out = self.conv4_down(self.swish(weight[0] * p4_in + weight[1] * p4_up + weight[2] * self.p4_downsample(p3_out)))\n\n # Weights for P5_0, P5_1 and P4_2 to P5_2\n p5_w2 = self.p5_w2_relu(self.p5_w2)\n weight = p5_w2 / (torch.sum(p5_w2, dim=0) + self.epsilon)\n # Connections for P5_0, P5_1 and P4_2 to P5_2 respectively\n p5_out = self.conv5_down(\n self.swish(weight[0] * p5_in + weight[1] * p5_up + weight[2] * self.p5_downsample(p4_out)))\n\n # Weights for P6_0, P6_1 and P5_2 to P6_2\n p6_w2 = self.p6_w2_relu(self.p6_w2)\n weight = p6_w2 / (torch.sum(p6_w2, dim=0) + self.epsilon)\n # Connections for P6_0, P6_1 and P5_2 to P6_2 respectively\n p6_out = self.conv6_down(\n self.swish(weight[0] * p6_in + weight[1] * p6_up + weight[2] * self.p6_downsample(p5_out)))\n\n # Weights for P7_0 and P6_2 to P7_2\n p7_w2 = self.p7_w2_relu(self.p7_w2)\n weight = p7_w2 / (torch.sum(p7_w2, dim=0) + self.epsilon)\n # Connections for P7_0 and P6_2 to P7_2\n p7_out = self.conv7_down(self.swish(\n weight[0] * p7_in + weight[1] * self.p7_downsample(p6_out)))\n\n return p3_out, p4_out, p5_out, p6_out, p7_out\n\n def _forward(self, inputs):\n if self.first_time:\n p3, p4, p5 = inputs\n\n p6_in = self.p5_to_p6(p5)\n p7_in = self.p6_to_p7(p6_in)\n\n p3_in = self.p3_down_channel(p3)\n p4_in = self.p4_down_channel(p4)\n p5_in = self.p5_down_channel(p5)\n\n else:\n # P3_0, P4_0, P5_0, P6_0 and P7_0\n p3_in, p4_in, p5_in, p6_in, p7_in = inputs\n\n # P7_0 to P7_2\n\n # Connections for P6_0 and P7_0 to P6_1 respectively\n p6_up = self.conv6_up(self.swish(p6_in + self.p6_upsample(p7_in)))\n\n # Connections for P5_0 and P6_1 to P5_1 respectively\n p5_up = self.conv5_up(self.swish(p5_in + self.p5_upsample(p6_up)))\n\n # Connections for P4_0 and P5_1 to P4_1 respectively\n p4_up = self.conv4_up(self.swish(p4_in + self.p4_upsample(p5_up)))\n\n # Connections for P3_0 and P4_1 to P3_2 respectively\n p3_out = self.conv3_up(self.swish(p3_in + self.p3_upsample(p4_up)))\n\n if self.first_time:\n p4_in = self.p4_down_channel_2(p4)\n p5_in = self.p5_down_channel_2(p5)\n\n # Connections for P4_0, P4_1 and P3_2 to P4_2 respectively\n p4_out = self.conv4_down(\n self.swish(p4_in + p4_up + self.p4_downsample(p3_out)))\n\n # Connections for P5_0, P5_1 and P4_2 to P5_2 respectively\n p5_out = self.conv5_down(\n self.swish(p5_in + p5_up + self.p5_downsample(p4_out)))\n\n # Connections for P6_0, P6_1 and P5_2 to P6_2 respectively\n p6_out = self.conv6_down(\n self.swish(p6_in + p6_up + self.p6_downsample(p5_out)))\n\n # Connections for P7_0 and P6_2 to P7_2\n p7_out = self.conv7_down(self.swish(\n p7_in + self.p7_downsample(p6_out)))\n\n return p3_out, p4_out, p5_out, p6_out, p7_out"
},
{
"identifier": "EfficientNet",
"path": "medpseg/edet/efficientdet/model.py",
"snippet": "class EfficientNet(nn.Module):\n \"\"\"\n modified by Zylo117\n \"\"\"\n\n def __init__(self, compound_coef, load_weights=True):\n super(EfficientNet, self).__init__()\n if load_weights:\n model = EffNet.from_pretrained(f'efficientnet-b{compound_coef}')\n else:\n model = EffNet.from_name(f'efficientnet-b{compound_coef}')\n del model._conv_head\n del model._bn1\n del model._avg_pooling\n del model._dropout\n del model._fc\n self.model = model\n\n def forward(self, x):\n x = self.model._conv_stem(x)\n x = self.model._bn0(x)\n x = self.model._swish(x)\n feature_maps = []\n\n # TODO: temporarily storing extra tensor last_x and del it later might not be a good idea,\n # try recording stride changing when creating efficientnet,\n # and then apply it here.\n last_x = None\n for idx, block in enumerate(self.model._blocks):\n drop_connect_rate = self.model._global_params.drop_connect_rate\n if drop_connect_rate:\n drop_connect_rate *= float(idx) / len(self.model._blocks)\n \n if len(feature_maps) < 3:\n # avoid unnecessary forwards\n x = block(x, drop_connect_rate=drop_connect_rate)\n\n if block._depthwise_conv.stride == [2, 2]:\n feature_maps.append(last_x)\n elif idx == len(self.model._blocks) - 1:\n feature_maps.append(x)\n last_x = x\n\n return feature_maps"
},
{
"identifier": "SegmentationClasssificationHead",
"path": "medpseg/edet/efficientdet/model.py",
"snippet": "class SegmentationClasssificationHead(nn.Module):\n '''\n DLPT v3.5 changes removed some arguments\n '''\n def __init__(self, in_channels, num_classes, num_layers, onnx_export=False, squeeze=False, deep_supervision=False, **kwargs):\n super().__init__()\n self.num_classes = num_classes\n self.num_layers = num_layers\n self.squeeze = squeeze # squeeze channels before header, use when in_channels is too large\n \n # Squeezing channels changes\n if self.squeeze:\n self.internal_channels = [2**i for i in range(num_layers+1)]\n self.conv_list = nn.ModuleList([SeparableConvBlock(in_channels//self.internal_channels[i], in_channels//self.internal_channels[i+1], norm=False, activation=False) for i in range(num_layers)])\n self.bn_list = nn.ModuleList([nn.BatchNorm2d(in_channels//self.internal_channels[i+1], momentum=0.01, eps=1e-3) for i in range(num_layers)])\n self.header = SeparableConvBlock(in_channels//self.internal_channels[num_layers], num_classes, norm=False, activation=False)\n else:\n self.internal_channels = None\n self.conv_list = nn.ModuleList([SeparableConvBlock(in_channels, in_channels, norm=False, activation=False) for i in range(num_layers)])\n self.bn_list = nn.ModuleList([nn.BatchNorm2d(in_channels, momentum=0.01, eps=1e-3) for i in range(num_layers)])\n self.header = SeparableConvBlock(in_channels, num_classes, norm=False, activation=False)\n\n # Swishes are swishes\n self.swish = MemoryEfficientSwish() if not onnx_export else Swish()\n\n self.deep_supervision = deep_supervision\n if self.deep_supervision:\n self.ds_seg_heads: nn.ModuleList = nn.ModuleList([SegmentationClasssificationHead(in_channels=in_channels, \n num_classes=num_classes, \n num_layers=num_layers, \n onnx_export=onnx_export, \n squeeze=squeeze, \n deep_supervision=False) for _ in range(4)])\n\n def forward(self, feat, return_tensor=False):\n # Main head\n if torch.is_tensor(feat):\n main_feat = feat\n else:\n main_feat = feat[0]\n\n for i, bn, conv in zip(range(self.num_layers), self.bn_list, self.conv_list):\n main_feat = conv(main_feat)\n main_feat = bn(main_feat)\n main_feat = self.swish(main_feat)\n main_feat = self.header(main_feat)\n\n return_dict = {\"main\": main_feat} \n if self.deep_supervision:\n assert isinstance(feat, list)\n for i in range(1, 5):\n return_dict[f\"main{i}\"] = self.ds_seg_heads[i-1](feat[i])[\"main\"]\n\n if return_tensor:\n assert not self.deep_supervision, \"Deep supervision should not attempt to return a single tensor\"\n return return_dict[\"main\"]\n else:\n return return_dict"
},
{
"identifier": "CirculatoryBranch",
"path": "medpseg/edet/efficientdet/model.py",
"snippet": "class CirculatoryBranch(nn.Module):\n '''\n This is intented to be used to optimize vessels and airways at the same time.\n '''\n def __init__(self, bifpn, bifpn_channels, in_channels, num_classes, num_layers, squeeze, feature_adapters, expand_bifpn, expand_conv, deep_supervision, self_attention):\n super().__init__()\n self.bifpn = bifpn\n self.feature_adapters = feature_adapters\n self.expand_bifpn = expand_bifpn\n self.expand_conv = expand_conv\n self.deep_supervision = deep_supervision\n self.attention = self_attention\n self.airway_head = SegmentationClasssificationHead(in_channels=in_channels,\n num_classes=num_classes, \n num_layers=num_layers,\n squeeze=squeeze\n )\n self.vessel_head = SegmentationClasssificationHead(in_channels=in_channels,\n num_classes=num_classes, \n num_layers=num_layers,\n squeeze=squeeze\n )\n if self.attention:\n self.attention_modules: nn.ModuleList = nn.ModuleList([SelfAttention(bifpn_channels, dim='2d') for _ in range(5)])\n if self.deep_supervision:\n self.ds_airway_heads: nn.ModuleList = nn.ModuleList([SegmentationClasssificationHead(in_channels=in_channels,\n num_classes=num_classes, \n num_layers=num_layers,\n squeeze=squeeze\n ) for _ in range(4)])\n self.ds_vessel_heads: nn.ModuleList = nn.ModuleList([SegmentationClasssificationHead(in_channels=in_channels,\n num_classes=num_classes, \n num_layers=num_layers,\n squeeze=squeeze\n ) for _ in range(4)])\n \n def forward(self, backbone_features):\n '''\n Replicates the original medseg forward actually\n '''\n feat_map = self.bifpn(backbone_features)\n \n # Apply attention gates in BiFPN outputs\n if self.attention:\n assert len(feat_map) == len(self.attention_modules)\n feat_map = [attention_module(x) for x, attention_module in zip(feat_map, self.attention_modules)]\n \n if self.feature_adapters is not None:\n if self.deep_supervision:\n raise RuntimeError(\"Can't do deep supervision with feature adapters\")\n feat_map = self.feature_adapters(feat_map)\n else:\n if self.deep_supervision:\n # Apply expand bifpn on all feat_maps and still keep the list for deep supervision\n new_feat_map = []\n for feat in feat_map:\n if self.expand_bifpn is not None and self.expand_bifpn != True and self.expand_bifpn != \"no\":\n new_feat_map.append(self.expand_conv(feat))\n feat_map = new_feat_map\n else:\n feat_map = feat_map[0] # higher resolution feature is first\n\n if self.expand_bifpn is not None and self.expand_bifpn != True and self.expand_bifpn != \"no\":\n feat_map = self.expand_conv(feat_map)\n \n \n if self.deep_supervision:\n assert isinstance(feat_map, list)\n return_dict = {\"atm\": self.airway_head(feat_map[0], return_tensor=True), \"vessel\": self.vessel_head(feat_map[0], return_tensor=True)}\n for i in range(1, 5):\n return_dict[f\"atm{i}\"] = self.ds_airway_heads[i-1](feat_map[i], return_tensor=True)\n return_dict[f\"vessel{i}\"] = self.ds_vessel_heads[i-1](feat_map[i], return_tensor=True)\n else:\n return_dict = {\"atm\": self.airway_head(feat_map, return_tensor=True), \"vessel\": self.vessel_head(feat_map, return_tensor=True)}\n\n return return_dict"
}
] | from typing import List, Optional, Tuple, Union
from collections import OrderedDict
from torch import nn
from medpseg.self_attention import SelfAttention
from medpseg.edet.efficientnet.utils import MemoryEfficientSwish
from medpseg.edet.efficientdet.model import BiFPN, EfficientNet, SegmentationClasssificationHead, CirculatoryBranch
import torch | 8,881 | elif key == "seg_exponential_stride_compression":
self.feature_adapters = nn.ModuleList([nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=11, padding=5, stride=128, dilation=6, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=9, padding=4, stride=64, dilation=5, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=7, padding=3, stride=32, dilation=4, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=5, padding=2, stride=16, dilation=3, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=3, padding=1, stride=8, dilation=2, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False))])
self.upsampler = nn.UpsamplingBilinear2d(scale_factor=2)
self.pooling = nn.AdaptiveAvgPool2d(1)
elif key == "nonlinear_esc": # Save this for future embbedding building for transformers
# Reduced stride progression, trusting average pooling, makes network work with 128x128 inputs minimum
self.feature_adapters = nn.ModuleList([nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=11, padding=5, stride=64, dilation=4, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=9, padding=4, stride=32, dilation=3, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=7, padding=3, stride=16, dilation=3, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=5, padding=2, stride=8, dilation=2, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=3, padding=1, stride=4, dilation=2, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU())])
self.upsampler = nn.UpsamplingBilinear2d(scale_factor=2)
self.pooling = nn.AdaptiveAvgPool2d(1)
else:
raise ValueError(f"Unsupported feature adapter {key}. Use one of {FeatureFusion.SUPPORTED_STRATS}")
self.latent_space = None
def get_latent_space(self):
# Save this for future transformer involvement
B, C, _, _ = self.latent_space.shape
return self.latent_space.reshape(B, C)
def forward(self, in_features: List[torch.Tensor]) -> Optional[torch.Tensor]:
out_features = None
for feature_adapter, in_feature in zip(self.feature_adapters, in_features):
if out_features is None: # first thing
out_features = feature_adapter(in_feature)
elif self.key == "upsample_cat":
out_features = torch.cat([out_features, feature_adapter(in_feature)], dim=1) # upsample cat concatenates in channel dimension
else:
out_features += feature_adapter(in_feature)
if self.key in ["nonlinear_esc", "seg_exponential_stride_compression"]:
self.latent_space = self.pooling(out_features)
return self.upsampler(in_features[0]) * self.latent_space # latent space weights channel contributions
else:
return out_features
class EfficientDetForSemanticSegmentation(nn.Module):
def __init__(self,
load_weights:bool = True,
num_classes: int = 2,
compound_coef: int = 4,
repeat: int = 3,
expand_bifpn: Union[bool, str] = False,
backbone: str = "effnet",
circulatory_branch: bool = False,
bifpn_channels: int = 128,
squeeze:bool = False,
deep_supervision: bool = False,
self_attention: bool = False,
soft_circulatory: bool = False,
**kwargs): # dump for old variables
'''
load_weights: wether to load pre trained as backbone
num_classes: number of classes for primary downstream segmentation task
compound_coef: which efficientnet variation to base the architecture of, only supports 4.
repeat: how many conv blocks on the segmentation head
expand_bifpn: how to expand the bifpn features. Upsample is best
backbone: efficientnet or convnext as backbone
num_classes_aux: number of classes for secondary segmentation task. If None will not initialize second output.
'''
super().__init__()
for k, v in kwargs.items():
print(f"WARNING: MEDSeg Argument {k}={v} being ignored")
self.compound_coef = compound_coef
self.backbone_compound_coef = [0, 1, 2, 3, 4, 5, 6, 7]
self.input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536]
self.num_classes = num_classes
self.expand_bifpn = expand_bifpn
self.backbone = backbone
self.self_attention = self_attention
self.deep_supervision = deep_supervision
if self.self_attention:
self.attention_modules: nn.ModuleList = nn.ModuleList([SelfAttention(bifpn_channels, dim='2d') for _ in range(5)])
if self.expand_bifpn == "upsample_cat":
self.upsample_cat_scaling = 5
else:
self.upsample_cat_scaling = 1 # scale expected input of segmentation heads
# Check if expand_bifpn requires
feature_fusion = self.set_expand_conv()
conv_channel_coef = {
# the channels of P2/P3/P4.
0: [16, 24, 40],
4: [24, 32, 56],
6: [32, 40, 72],
7: [32, 48, 80],
-1: [96, 192, 384]
}
if self.backbone == "convnext":
print("Changing compound coeff of BiFPN due to convnext backbone")
compound_coef = -1
print(f"Convnext upsample scale {self.convnext_upsample_scale}")
| '''
Copyright (c) Diedre Carmo, Medical Imaging Computing Lab (MICLab https://miclab.fee.unicamp.br/.
https://github.com/MICLab-Unicamp/medpseg
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Original Author: Zylo117
Modified by Israel, adopted by Diedre as an initial EfficientDet implementation and extended to MEDPSeg related implementations
April 2023: removed code not directly related to MEDSeg and extra deprecations
'''
class FeatureFusion(nn.Module):
'''
Feature fusion module that makes use of all BiFPN features for segmentation instead of only
upsampling the highest spatial resolution.
upsample_sum: upsamples and sums all features
(ESC) exponential_stride_compression: increases kernel size and dilation and exponentially increases the stride to compress features, from B, C, x, y into a B, C, x/256, y/256 array that can be linearized easily with reshape. Minimum input size 256x256.
seg_exponential_stride_compression: use values derived from ESC to weight high resolution features
'''
SUPPORTED_STRATS = ["cat", "upsample_sum", "upsample_cat", "exponential_stride_compression", "seg_exponential_stride_compression", "nonlinear_esc"]
def __init__(self, in_c: int, out_c: int, key: Union[bool, str]):
super().__init__()
print(f"SELECTING FEATURE ADAPTER: {key}")
self.key = key
if key == "cat":
# Concatenate features without over upsampling (results in features /2 the spatial resolution of the input)
self.feature_adapters = nn.ModuleList([nn.Identity(),
nn.UpsamplingBilinear2d(scale_factor=2),
nn.UpsamplingBilinear2d(scale_factor=4),
nn.UpsamplingBilinear2d(scale_factor=8),
nn.UpsamplingBilinear2d(scale_factor=16)])
elif key == "upsample_sum" or key == "upsample_cat":
self.feature_adapters = nn.ModuleList([nn.UpsamplingBilinear2d(scale_factor=2),
nn.UpsamplingBilinear2d(scale_factor=4),
nn.UpsamplingBilinear2d(scale_factor=8),
nn.UpsamplingBilinear2d(scale_factor=16),
nn.UpsamplingBilinear2d(scale_factor=32)])
elif key == "exponential_stride_compression":
self.feature_adapters = nn.ModuleList([nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=11, padding=5, stride=128, dilation=6, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=9, padding=4, stride=64, dilation=5, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=7, padding=3, stride=32, dilation=4, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=5, padding=2, stride=16, dilation=3, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=3, padding=1, stride=8, dilation=2, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False))])
elif key == "seg_exponential_stride_compression":
self.feature_adapters = nn.ModuleList([nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=11, padding=5, stride=128, dilation=6, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=9, padding=4, stride=64, dilation=5, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=7, padding=3, stride=32, dilation=4, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=5, padding=2, stride=16, dilation=3, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=3, padding=1, stride=8, dilation=2, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False))])
self.upsampler = nn.UpsamplingBilinear2d(scale_factor=2)
self.pooling = nn.AdaptiveAvgPool2d(1)
elif key == "nonlinear_esc": # Save this for future embbedding building for transformers
# Reduced stride progression, trusting average pooling, makes network work with 128x128 inputs minimum
self.feature_adapters = nn.ModuleList([nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=11, padding=5, stride=64, dilation=4, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=9, padding=4, stride=32, dilation=3, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=7, padding=3, stride=16, dilation=3, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=5, padding=2, stride=8, dilation=2, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=3, padding=1, stride=4, dilation=2, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU())])
self.upsampler = nn.UpsamplingBilinear2d(scale_factor=2)
self.pooling = nn.AdaptiveAvgPool2d(1)
else:
raise ValueError(f"Unsupported feature adapter {key}. Use one of {FeatureFusion.SUPPORTED_STRATS}")
self.latent_space = None
def get_latent_space(self):
# Save this for future transformer involvement
B, C, _, _ = self.latent_space.shape
return self.latent_space.reshape(B, C)
def forward(self, in_features: List[torch.Tensor]) -> Optional[torch.Tensor]:
out_features = None
for feature_adapter, in_feature in zip(self.feature_adapters, in_features):
if out_features is None: # first thing
out_features = feature_adapter(in_feature)
elif self.key == "upsample_cat":
out_features = torch.cat([out_features, feature_adapter(in_feature)], dim=1) # upsample cat concatenates in channel dimension
else:
out_features += feature_adapter(in_feature)
if self.key in ["nonlinear_esc", "seg_exponential_stride_compression"]:
self.latent_space = self.pooling(out_features)
return self.upsampler(in_features[0]) * self.latent_space # latent space weights channel contributions
else:
return out_features
class EfficientDetForSemanticSegmentation(nn.Module):
def __init__(self,
load_weights:bool = True,
num_classes: int = 2,
compound_coef: int = 4,
repeat: int = 3,
expand_bifpn: Union[bool, str] = False,
backbone: str = "effnet",
circulatory_branch: bool = False,
bifpn_channels: int = 128,
squeeze:bool = False,
deep_supervision: bool = False,
self_attention: bool = False,
soft_circulatory: bool = False,
**kwargs): # dump for old variables
'''
load_weights: wether to load pre trained as backbone
num_classes: number of classes for primary downstream segmentation task
compound_coef: which efficientnet variation to base the architecture of, only supports 4.
repeat: how many conv blocks on the segmentation head
expand_bifpn: how to expand the bifpn features. Upsample is best
backbone: efficientnet or convnext as backbone
num_classes_aux: number of classes for secondary segmentation task. If None will not initialize second output.
'''
super().__init__()
for k, v in kwargs.items():
print(f"WARNING: MEDSeg Argument {k}={v} being ignored")
self.compound_coef = compound_coef
self.backbone_compound_coef = [0, 1, 2, 3, 4, 5, 6, 7]
self.input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536]
self.num_classes = num_classes
self.expand_bifpn = expand_bifpn
self.backbone = backbone
self.self_attention = self_attention
self.deep_supervision = deep_supervision
if self.self_attention:
self.attention_modules: nn.ModuleList = nn.ModuleList([SelfAttention(bifpn_channels, dim='2d') for _ in range(5)])
if self.expand_bifpn == "upsample_cat":
self.upsample_cat_scaling = 5
else:
self.upsample_cat_scaling = 1 # scale expected input of segmentation heads
# Check if expand_bifpn requires
feature_fusion = self.set_expand_conv()
conv_channel_coef = {
# the channels of P2/P3/P4.
0: [16, 24, 40],
4: [24, 32, 56],
6: [32, 40, 72],
7: [32, 48, 80],
-1: [96, 192, 384]
}
if self.backbone == "convnext":
print("Changing compound coeff of BiFPN due to convnext backbone")
compound_coef = -1
print(f"Convnext upsample scale {self.convnext_upsample_scale}")
| self.bifpn = nn.Sequential(*[BiFPN(bifpn_channels, | 2 | 2023-11-21 20:03:33+00:00 | 12k |
amikey/Fooocus | fooocus_extras/facexlib/detection/retinaface.py | [
{
"identifier": "get_reference_facial_points",
"path": "fooocus_extras/facexlib/detection/align_trans.py",
"snippet": "def get_reference_facial_points(output_size=None, inner_padding_factor=0.0, outer_padding=(0, 0), default_square=False):\n \"\"\"\n Function:\n ----------\n get reference 5 key points according to crop settings:\n 0. Set default crop_size:\n if default_square:\n crop_size = (112, 112)\n else:\n crop_size = (96, 112)\n 1. Pad the crop_size by inner_padding_factor in each side;\n 2. Resize crop_size into (output_size - outer_padding*2),\n pad into output_size with outer_padding;\n 3. Output reference_5point;\n Parameters:\n ----------\n @output_size: (w, h) or None\n size of aligned face image\n @inner_padding_factor: (w_factor, h_factor)\n padding factor for inner (w, h)\n @outer_padding: (w_pad, h_pad)\n each row is a pair of coordinates (x, y)\n @default_square: True or False\n if True:\n default crop_size = (112, 112)\n else:\n default crop_size = (96, 112);\n !!! make sure, if output_size is not None:\n (output_size - outer_padding)\n = some_scale * (default crop_size * (1.0 +\n inner_padding_factor))\n Returns:\n ----------\n @reference_5point: 5x2 np.array\n each row is a pair of transformed coordinates (x, y)\n \"\"\"\n\n tmp_5pts = np.array(REFERENCE_FACIAL_POINTS)\n tmp_crop_size = np.array(DEFAULT_CROP_SIZE)\n\n # 0) make the inner region a square\n if default_square:\n size_diff = max(tmp_crop_size) - tmp_crop_size\n tmp_5pts += size_diff / 2\n tmp_crop_size += size_diff\n\n if (output_size and output_size[0] == tmp_crop_size[0] and output_size[1] == tmp_crop_size[1]):\n\n return tmp_5pts\n\n if (inner_padding_factor == 0 and outer_padding == (0, 0)):\n if output_size is None:\n return tmp_5pts\n else:\n raise FaceWarpException('No paddings to do, output_size must be None or {}'.format(tmp_crop_size))\n\n # check output size\n if not (0 <= inner_padding_factor <= 1.0):\n raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)')\n\n if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0) and output_size is None):\n output_size = tmp_crop_size * \\\n (1 + inner_padding_factor * 2).astype(np.int32)\n output_size += np.array(outer_padding)\n if not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1]):\n raise FaceWarpException('Not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1])')\n\n # 1) pad the inner region according inner_padding_factor\n if inner_padding_factor > 0:\n size_diff = tmp_crop_size * inner_padding_factor * 2\n tmp_5pts += size_diff / 2\n tmp_crop_size += np.round(size_diff).astype(np.int32)\n\n # 2) resize the padded inner region\n size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2\n\n if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]:\n raise FaceWarpException('Must have (output_size - outer_padding)'\n '= some_scale * (crop_size * (1.0 + inner_padding_factor)')\n\n scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0]\n tmp_5pts = tmp_5pts * scale_factor\n # size_diff = tmp_crop_size * (scale_factor - min(scale_factor))\n # tmp_5pts = tmp_5pts + size_diff / 2\n tmp_crop_size = size_bf_outer_pad\n\n # 3) add outer_padding to make output_size\n reference_5point = tmp_5pts + np.array(outer_padding)\n tmp_crop_size = output_size\n\n return reference_5point"
},
{
"identifier": "warp_and_crop_face",
"path": "fooocus_extras/facexlib/detection/align_trans.py",
"snippet": "def warp_and_crop_face(src_img, facial_pts, reference_pts=None, crop_size=(96, 112), align_type='smilarity'):\n \"\"\"\n Function:\n ----------\n apply affine transform 'trans' to uv\n Parameters:\n ----------\n @src_img: 3x3 np.array\n input image\n @facial_pts: could be\n 1)a list of K coordinates (x,y)\n or\n 2) Kx2 or 2xK np.array\n each row or col is a pair of coordinates (x, y)\n @reference_pts: could be\n 1) a list of K coordinates (x,y)\n or\n 2) Kx2 or 2xK np.array\n each row or col is a pair of coordinates (x, y)\n or\n 3) None\n if None, use default reference facial points\n @crop_size: (w, h)\n output face image size\n @align_type: transform type, could be one of\n 1) 'similarity': use similarity transform\n 2) 'cv2_affine': use the first 3 points to do affine transform,\n by calling cv2.getAffineTransform()\n 3) 'affine': use all points to do affine transform\n Returns:\n ----------\n @face_img: output face image with size (w, h) = @crop_size\n \"\"\"\n\n if reference_pts is None:\n if crop_size[0] == 96 and crop_size[1] == 112:\n reference_pts = REFERENCE_FACIAL_POINTS\n else:\n default_square = False\n inner_padding_factor = 0\n outer_padding = (0, 0)\n output_size = crop_size\n\n reference_pts = get_reference_facial_points(output_size, inner_padding_factor, outer_padding,\n default_square)\n\n ref_pts = np.float32(reference_pts)\n ref_pts_shp = ref_pts.shape\n if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:\n raise FaceWarpException('reference_pts.shape must be (K,2) or (2,K) and K>2')\n\n if ref_pts_shp[0] == 2:\n ref_pts = ref_pts.T\n\n src_pts = np.float32(facial_pts)\n src_pts_shp = src_pts.shape\n if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:\n raise FaceWarpException('facial_pts.shape must be (K,2) or (2,K) and K>2')\n\n if src_pts_shp[0] == 2:\n src_pts = src_pts.T\n\n if src_pts.shape != ref_pts.shape:\n raise FaceWarpException('facial_pts and reference_pts must have the same shape')\n\n if align_type == 'cv2_affine':\n tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])\n elif align_type == 'affine':\n tfm = get_affine_transform_matrix(src_pts, ref_pts)\n else:\n tfm = get_similarity_transform_for_cv2(src_pts, ref_pts)\n\n face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))\n\n return face_img"
},
{
"identifier": "FPN",
"path": "fooocus_extras/facexlib/detection/retinaface_net.py",
"snippet": "class FPN(nn.Module):\n\n def __init__(self, in_channels_list, out_channels):\n super(FPN, self).__init__()\n leaky = 0\n if (out_channels <= 64):\n leaky = 0.1\n self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride=1, leaky=leaky)\n self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride=1, leaky=leaky)\n self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride=1, leaky=leaky)\n\n self.merge1 = conv_bn(out_channels, out_channels, leaky=leaky)\n self.merge2 = conv_bn(out_channels, out_channels, leaky=leaky)\n\n def forward(self, input):\n # names = list(input.keys())\n # input = list(input.values())\n\n output1 = self.output1(input[0])\n output2 = self.output2(input[1])\n output3 = self.output3(input[2])\n\n up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode='nearest')\n output2 = output2 + up3\n output2 = self.merge2(output2)\n\n up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode='nearest')\n output1 = output1 + up2\n output1 = self.merge1(output1)\n\n out = [output1, output2, output3]\n return out"
},
{
"identifier": "SSH",
"path": "fooocus_extras/facexlib/detection/retinaface_net.py",
"snippet": "class SSH(nn.Module):\n\n def __init__(self, in_channel, out_channel):\n super(SSH, self).__init__()\n assert out_channel % 4 == 0\n leaky = 0\n if (out_channel <= 64):\n leaky = 0.1\n self.conv3X3 = conv_bn_no_relu(in_channel, out_channel // 2, stride=1)\n\n self.conv5X5_1 = conv_bn(in_channel, out_channel // 4, stride=1, leaky=leaky)\n self.conv5X5_2 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)\n\n self.conv7X7_2 = conv_bn(out_channel // 4, out_channel // 4, stride=1, leaky=leaky)\n self.conv7x7_3 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)\n\n def forward(self, input):\n conv3X3 = self.conv3X3(input)\n\n conv5X5_1 = self.conv5X5_1(input)\n conv5X5 = self.conv5X5_2(conv5X5_1)\n\n conv7X7_2 = self.conv7X7_2(conv5X5_1)\n conv7X7 = self.conv7x7_3(conv7X7_2)\n\n out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1)\n out = F.relu(out)\n return out"
},
{
"identifier": "MobileNetV1",
"path": "fooocus_extras/facexlib/detection/retinaface_net.py",
"snippet": "class MobileNetV1(nn.Module):\n\n def __init__(self):\n super(MobileNetV1, self).__init__()\n self.stage1 = nn.Sequential(\n conv_bn(3, 8, 2, leaky=0.1), # 3\n conv_dw(8, 16, 1), # 7\n conv_dw(16, 32, 2), # 11\n conv_dw(32, 32, 1), # 19\n conv_dw(32, 64, 2), # 27\n conv_dw(64, 64, 1), # 43\n )\n self.stage2 = nn.Sequential(\n conv_dw(64, 128, 2), # 43 + 16 = 59\n conv_dw(128, 128, 1), # 59 + 32 = 91\n conv_dw(128, 128, 1), # 91 + 32 = 123\n conv_dw(128, 128, 1), # 123 + 32 = 155\n conv_dw(128, 128, 1), # 155 + 32 = 187\n conv_dw(128, 128, 1), # 187 + 32 = 219\n )\n self.stage3 = nn.Sequential(\n conv_dw(128, 256, 2), # 219 +3 2 = 241\n conv_dw(256, 256, 1), # 241 + 64 = 301\n )\n self.avg = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(256, 1000)\n\n def forward(self, x):\n x = self.stage1(x)\n x = self.stage2(x)\n x = self.stage3(x)\n x = self.avg(x)\n # x = self.model(x)\n x = x.view(-1, 256)\n x = self.fc(x)\n return x"
},
{
"identifier": "make_bbox_head",
"path": "fooocus_extras/facexlib/detection/retinaface_net.py",
"snippet": "def make_bbox_head(fpn_num=3, inchannels=64, anchor_num=2):\n bboxhead = nn.ModuleList()\n for i in range(fpn_num):\n bboxhead.append(BboxHead(inchannels, anchor_num))\n return bboxhead"
},
{
"identifier": "make_class_head",
"path": "fooocus_extras/facexlib/detection/retinaface_net.py",
"snippet": "def make_class_head(fpn_num=3, inchannels=64, anchor_num=2):\n classhead = nn.ModuleList()\n for i in range(fpn_num):\n classhead.append(ClassHead(inchannels, anchor_num))\n return classhead"
},
{
"identifier": "make_landmark_head",
"path": "fooocus_extras/facexlib/detection/retinaface_net.py",
"snippet": "def make_landmark_head(fpn_num=3, inchannels=64, anchor_num=2):\n landmarkhead = nn.ModuleList()\n for i in range(fpn_num):\n landmarkhead.append(LandmarkHead(inchannels, anchor_num))\n return landmarkhead"
},
{
"identifier": "PriorBox",
"path": "fooocus_extras/facexlib/detection/retinaface_utils.py",
"snippet": "class PriorBox(object):\n\n def __init__(self, cfg, image_size=None, phase='train'):\n super(PriorBox, self).__init__()\n self.min_sizes = cfg['min_sizes']\n self.steps = cfg['steps']\n self.clip = cfg['clip']\n self.image_size = image_size\n self.feature_maps = [[ceil(self.image_size[0] / step), ceil(self.image_size[1] / step)] for step in self.steps]\n self.name = 's'\n\n def forward(self):\n anchors = []\n for k, f in enumerate(self.feature_maps):\n min_sizes = self.min_sizes[k]\n for i, j in product(range(f[0]), range(f[1])):\n for min_size in min_sizes:\n s_kx = min_size / self.image_size[1]\n s_ky = min_size / self.image_size[0]\n dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]\n dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]\n for cy, cx in product(dense_cy, dense_cx):\n anchors += [cx, cy, s_kx, s_ky]\n\n # back to torch land\n output = torch.Tensor(anchors).view(-1, 4)\n if self.clip:\n output.clamp_(max=1, min=0)\n return output"
},
{
"identifier": "batched_decode",
"path": "fooocus_extras/facexlib/detection/retinaface_utils.py",
"snippet": "def batched_decode(b_loc, priors, variances):\n \"\"\"Decode locations from predictions using priors to undo\n the encoding we did for offset regression at train time.\n Args:\n b_loc (tensor): location predictions for loc layers,\n Shape: [num_batches,num_priors,4]\n priors (tensor): Prior boxes in center-offset form.\n Shape: [1,num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n decoded bounding box predictions\n \"\"\"\n boxes = (\n priors[:, :, :2] + b_loc[:, :, :2] * variances[0] * priors[:, :, 2:],\n priors[:, :, 2:] * torch.exp(b_loc[:, :, 2:] * variances[1]),\n )\n boxes = torch.cat(boxes, dim=2)\n\n boxes[:, :, :2] -= boxes[:, :, 2:] / 2\n boxes[:, :, 2:] += boxes[:, :, :2]\n return boxes"
},
{
"identifier": "batched_decode_landm",
"path": "fooocus_extras/facexlib/detection/retinaface_utils.py",
"snippet": "def batched_decode_landm(pre, priors, variances):\n \"\"\"Decode landm from predictions using priors to undo\n the encoding we did for offset regression at train time.\n Args:\n pre (tensor): landm predictions for loc layers,\n Shape: [num_batches,num_priors,10]\n priors (tensor): Prior boxes in center-offset form.\n Shape: [1,num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n decoded landm predictions\n \"\"\"\n landms = (\n priors[:, :, :2] + pre[:, :, :2] * variances[0] * priors[:, :, 2:],\n priors[:, :, :2] + pre[:, :, 2:4] * variances[0] * priors[:, :, 2:],\n priors[:, :, :2] + pre[:, :, 4:6] * variances[0] * priors[:, :, 2:],\n priors[:, :, :2] + pre[:, :, 6:8] * variances[0] * priors[:, :, 2:],\n priors[:, :, :2] + pre[:, :, 8:10] * variances[0] * priors[:, :, 2:],\n )\n landms = torch.cat(landms, dim=2)\n return landms"
},
{
"identifier": "decode",
"path": "fooocus_extras/facexlib/detection/retinaface_utils.py",
"snippet": "def decode(loc, priors, variances):\n \"\"\"Decode locations from predictions using priors to undo\n the encoding we did for offset regression at train time.\n Args:\n loc (tensor): location predictions for loc layers,\n Shape: [num_priors,4]\n priors (tensor): Prior boxes in center-offset form.\n Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n decoded bounding box predictions\n \"\"\"\n\n boxes = torch.cat((priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],\n priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)\n boxes[:, :2] -= boxes[:, 2:] / 2\n boxes[:, 2:] += boxes[:, :2]\n return boxes"
},
{
"identifier": "decode_landm",
"path": "fooocus_extras/facexlib/detection/retinaface_utils.py",
"snippet": "def decode_landm(pre, priors, variances):\n \"\"\"Decode landm from predictions using priors to undo\n the encoding we did for offset regression at train time.\n Args:\n pre (tensor): landm predictions for loc layers,\n Shape: [num_priors,10]\n priors (tensor): Prior boxes in center-offset form.\n Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n decoded landm predictions\n \"\"\"\n tmp = (\n priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],\n priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],\n priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],\n priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],\n priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],\n )\n landms = torch.cat(tmp, dim=1)\n return landms"
},
{
"identifier": "py_cpu_nms",
"path": "fooocus_extras/facexlib/detection/retinaface_utils.py",
"snippet": "def py_cpu_nms(dets, thresh):\n \"\"\"Pure Python NMS baseline.\"\"\"\n keep = torchvision.ops.nms(\n boxes=torch.Tensor(dets[:, :4]),\n scores=torch.Tensor(dets[:, 4]),\n iou_threshold=thresh,\n )\n\n return list(keep)"
}
] | import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from PIL import Image
from torchvision.models._utils import IntermediateLayerGetter as IntermediateLayerGetter
from fooocus_extras.facexlib.detection.align_trans import get_reference_facial_points, warp_and_crop_face
from fooocus_extras.facexlib.detection.retinaface_net import FPN, SSH, MobileNetV1, make_bbox_head, make_class_head, make_landmark_head
from fooocus_extras.facexlib.detection.retinaface_utils import (PriorBox, batched_decode, batched_decode_landm, decode, decode_landm,
py_cpu_nms) | 7,929 | boxes = boxes * self.scale / self.resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landmarks = decode_landm(landmarks.squeeze(0), priors, self.cfg['variance'])
landmarks = landmarks * self.scale1 / self.resize
landmarks = landmarks.cpu().numpy()
# ignore low scores
inds = np.where(scores > conf_threshold)[0]
boxes, landmarks, scores = boxes[inds], landmarks[inds], scores[inds]
# sort
order = scores.argsort()[::-1]
boxes, landmarks, scores = boxes[order], landmarks[order], scores[order]
# do NMS
bounding_boxes = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(bounding_boxes, nms_threshold)
bounding_boxes, landmarks = bounding_boxes[keep, :], landmarks[keep]
# self.t['forward_pass'].toc()
# print(self.t['forward_pass'].average_time)
# import sys
# sys.stdout.flush()
return np.concatenate((bounding_boxes, landmarks), axis=1)
def __align_multi(self, image, boxes, landmarks, limit=None):
if len(boxes) < 1:
return [], []
if limit:
boxes = boxes[:limit]
landmarks = landmarks[:limit]
faces = []
for landmark in landmarks:
facial5points = [[landmark[2 * j], landmark[2 * j + 1]] for j in range(5)]
warped_face = warp_and_crop_face(np.array(image), facial5points, self.reference, crop_size=(112, 112))
faces.append(warped_face)
return np.concatenate((boxes, landmarks), axis=1), faces
def align_multi(self, img, conf_threshold=0.8, limit=None):
rlt = self.detect_faces(img, conf_threshold=conf_threshold)
boxes, landmarks = rlt[:, 0:5], rlt[:, 5:]
return self.__align_multi(img, boxes, landmarks, limit)
# batched detection
def batched_transform(self, frames, use_origin_size):
"""
Arguments:
frames: a list of PIL.Image, or torch.Tensor(shape=[n, h, w, c],
type=np.float32, BGR format).
use_origin_size: whether to use origin size.
"""
from_PIL = True if isinstance(frames[0], Image.Image) else False
# convert to opencv format
if from_PIL:
frames = [cv2.cvtColor(np.asarray(frame), cv2.COLOR_RGB2BGR) for frame in frames]
frames = np.asarray(frames, dtype=np.float32)
# testing scale
im_size_min = np.min(frames[0].shape[0:2])
im_size_max = np.max(frames[0].shape[0:2])
resize = float(self.target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size
if np.round(resize * im_size_max) > self.max_size:
resize = float(self.max_size) / float(im_size_max)
resize = 1 if use_origin_size else resize
# resize
if resize != 1:
if not from_PIL:
frames = F.interpolate(frames, scale_factor=resize)
else:
frames = [
cv2.resize(frame, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
for frame in frames
]
# convert to torch.tensor format
if not from_PIL:
frames = frames.transpose(1, 2).transpose(1, 3).contiguous()
else:
frames = frames.transpose((0, 3, 1, 2))
frames = torch.from_numpy(frames)
return frames, resize
def batched_detect_faces(self, frames, conf_threshold=0.8, nms_threshold=0.4, use_origin_size=True):
"""
Arguments:
frames: a list of PIL.Image, or np.array(shape=[n, h, w, c],
type=np.uint8, BGR format).
conf_threshold: confidence threshold.
nms_threshold: nms threshold.
use_origin_size: whether to use origin size.
Returns:
final_bounding_boxes: list of np.array ([n_boxes, 5],
type=np.float32).
final_landmarks: list of np.array ([n_boxes, 10], type=np.float32).
"""
# self.t['forward_pass'].tic()
frames, self.resize = self.batched_transform(frames, use_origin_size)
frames = frames.to(self.device)
frames = frames - self.mean_tensor
b_loc, b_conf, b_landmarks, priors = self.__detect_faces(frames)
final_bounding_boxes, final_landmarks = [], []
# decode
priors = priors.unsqueeze(0)
|
def generate_config(network_name):
cfg_mnet = {
'name': 'mobilenet0.25',
'min_sizes': [[16, 32], [64, 128], [256, 512]],
'steps': [8, 16, 32],
'variance': [0.1, 0.2],
'clip': False,
'loc_weight': 2.0,
'gpu_train': True,
'batch_size': 32,
'ngpu': 1,
'epoch': 250,
'decay1': 190,
'decay2': 220,
'image_size': 640,
'return_layers': {
'stage1': 1,
'stage2': 2,
'stage3': 3
},
'in_channel': 32,
'out_channel': 64
}
cfg_re50 = {
'name': 'Resnet50',
'min_sizes': [[16, 32], [64, 128], [256, 512]],
'steps': [8, 16, 32],
'variance': [0.1, 0.2],
'clip': False,
'loc_weight': 2.0,
'gpu_train': True,
'batch_size': 24,
'ngpu': 4,
'epoch': 100,
'decay1': 70,
'decay2': 90,
'image_size': 840,
'return_layers': {
'layer2': 1,
'layer3': 2,
'layer4': 3
},
'in_channel': 256,
'out_channel': 256
}
if network_name == 'mobile0.25':
return cfg_mnet
elif network_name == 'resnet50':
return cfg_re50
else:
raise NotImplementedError(f'network_name={network_name}')
class RetinaFace(nn.Module):
def __init__(self, network_name='resnet50', half=False, phase='test', device=None):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device
super(RetinaFace, self).__init__()
self.half_inference = half
cfg = generate_config(network_name)
self.backbone = cfg['name']
self.model_name = f'retinaface_{network_name}'
self.cfg = cfg
self.phase = phase
self.target_size, self.max_size = 1600, 2150
self.resize, self.scale, self.scale1 = 1., None, None
self.mean_tensor = torch.tensor([[[[104.]], [[117.]], [[123.]]]], device=self.device)
self.reference = get_reference_facial_points(default_square=True)
# Build network.
backbone = None
if cfg['name'] == 'mobilenet0.25':
backbone = MobileNetV1()
self.body = IntermediateLayerGetter(backbone, cfg['return_layers'])
elif cfg['name'] == 'Resnet50':
backbone = models.resnet50(weights=None)
self.body = IntermediateLayerGetter(backbone, cfg['return_layers'])
in_channels_stage2 = cfg['in_channel']
in_channels_list = [
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
out_channels = cfg['out_channel']
self.fpn = FPN(in_channels_list, out_channels)
self.ssh1 = SSH(out_channels, out_channels)
self.ssh2 = SSH(out_channels, out_channels)
self.ssh3 = SSH(out_channels, out_channels)
self.ClassHead = make_class_head(fpn_num=3, inchannels=cfg['out_channel'])
self.BboxHead = make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])
self.LandmarkHead = make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])
self.to(self.device)
self.eval()
if self.half_inference:
self.half()
def forward(self, inputs):
out = self.body(inputs)
if self.backbone == 'mobilenet0.25' or self.backbone == 'Resnet50':
out = list(out.values())
# FPN
fpn = self.fpn(out)
# SSH
feature1 = self.ssh1(fpn[0])
feature2 = self.ssh2(fpn[1])
feature3 = self.ssh3(fpn[2])
features = [feature1, feature2, feature3]
bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)
classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)], dim=1)
tmp = [self.LandmarkHead[i](feature) for i, feature in enumerate(features)]
ldm_regressions = (torch.cat(tmp, dim=1))
if self.phase == 'train':
output = (bbox_regressions, classifications, ldm_regressions)
else:
output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
return output
def __detect_faces(self, inputs):
# get scale
height, width = inputs.shape[2:]
self.scale = torch.tensor([width, height, width, height], dtype=torch.float32, device=self.device)
tmp = [width, height, width, height, width, height, width, height, width, height]
self.scale1 = torch.tensor(tmp, dtype=torch.float32, device=self.device)
# forawrd
inputs = inputs.to(self.device)
if self.half_inference:
inputs = inputs.half()
loc, conf, landmarks = self(inputs)
# get priorbox
priorbox = PriorBox(self.cfg, image_size=inputs.shape[2:])
priors = priorbox.forward().to(self.device)
return loc, conf, landmarks, priors
# single image detection
def transform(self, image, use_origin_size):
# convert to opencv format
if isinstance(image, Image.Image):
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
image = image.astype(np.float32)
# testing scale
im_size_min = np.min(image.shape[0:2])
im_size_max = np.max(image.shape[0:2])
resize = float(self.target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size
if np.round(resize * im_size_max) > self.max_size:
resize = float(self.max_size) / float(im_size_max)
resize = 1 if use_origin_size else resize
# resize
if resize != 1:
image = cv2.resize(image, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
# convert to torch.tensor format
# image -= (104, 117, 123)
image = image.transpose(2, 0, 1)
image = torch.from_numpy(image).unsqueeze(0)
return image, resize
def detect_faces(
self,
image,
conf_threshold=0.8,
nms_threshold=0.4,
use_origin_size=True,
):
image, self.resize = self.transform(image, use_origin_size)
image = image.to(self.device)
if self.half_inference:
image = image.half()
image = image - self.mean_tensor
loc, conf, landmarks, priors = self.__detect_faces(image)
boxes = decode(loc.data.squeeze(0), priors.data, self.cfg['variance'])
boxes = boxes * self.scale / self.resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landmarks = decode_landm(landmarks.squeeze(0), priors, self.cfg['variance'])
landmarks = landmarks * self.scale1 / self.resize
landmarks = landmarks.cpu().numpy()
# ignore low scores
inds = np.where(scores > conf_threshold)[0]
boxes, landmarks, scores = boxes[inds], landmarks[inds], scores[inds]
# sort
order = scores.argsort()[::-1]
boxes, landmarks, scores = boxes[order], landmarks[order], scores[order]
# do NMS
bounding_boxes = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(bounding_boxes, nms_threshold)
bounding_boxes, landmarks = bounding_boxes[keep, :], landmarks[keep]
# self.t['forward_pass'].toc()
# print(self.t['forward_pass'].average_time)
# import sys
# sys.stdout.flush()
return np.concatenate((bounding_boxes, landmarks), axis=1)
def __align_multi(self, image, boxes, landmarks, limit=None):
if len(boxes) < 1:
return [], []
if limit:
boxes = boxes[:limit]
landmarks = landmarks[:limit]
faces = []
for landmark in landmarks:
facial5points = [[landmark[2 * j], landmark[2 * j + 1]] for j in range(5)]
warped_face = warp_and_crop_face(np.array(image), facial5points, self.reference, crop_size=(112, 112))
faces.append(warped_face)
return np.concatenate((boxes, landmarks), axis=1), faces
def align_multi(self, img, conf_threshold=0.8, limit=None):
rlt = self.detect_faces(img, conf_threshold=conf_threshold)
boxes, landmarks = rlt[:, 0:5], rlt[:, 5:]
return self.__align_multi(img, boxes, landmarks, limit)
# batched detection
def batched_transform(self, frames, use_origin_size):
"""
Arguments:
frames: a list of PIL.Image, or torch.Tensor(shape=[n, h, w, c],
type=np.float32, BGR format).
use_origin_size: whether to use origin size.
"""
from_PIL = True if isinstance(frames[0], Image.Image) else False
# convert to opencv format
if from_PIL:
frames = [cv2.cvtColor(np.asarray(frame), cv2.COLOR_RGB2BGR) for frame in frames]
frames = np.asarray(frames, dtype=np.float32)
# testing scale
im_size_min = np.min(frames[0].shape[0:2])
im_size_max = np.max(frames[0].shape[0:2])
resize = float(self.target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size
if np.round(resize * im_size_max) > self.max_size:
resize = float(self.max_size) / float(im_size_max)
resize = 1 if use_origin_size else resize
# resize
if resize != 1:
if not from_PIL:
frames = F.interpolate(frames, scale_factor=resize)
else:
frames = [
cv2.resize(frame, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
for frame in frames
]
# convert to torch.tensor format
if not from_PIL:
frames = frames.transpose(1, 2).transpose(1, 3).contiguous()
else:
frames = frames.transpose((0, 3, 1, 2))
frames = torch.from_numpy(frames)
return frames, resize
def batched_detect_faces(self, frames, conf_threshold=0.8, nms_threshold=0.4, use_origin_size=True):
"""
Arguments:
frames: a list of PIL.Image, or np.array(shape=[n, h, w, c],
type=np.uint8, BGR format).
conf_threshold: confidence threshold.
nms_threshold: nms threshold.
use_origin_size: whether to use origin size.
Returns:
final_bounding_boxes: list of np.array ([n_boxes, 5],
type=np.float32).
final_landmarks: list of np.array ([n_boxes, 10], type=np.float32).
"""
# self.t['forward_pass'].tic()
frames, self.resize = self.batched_transform(frames, use_origin_size)
frames = frames.to(self.device)
frames = frames - self.mean_tensor
b_loc, b_conf, b_landmarks, priors = self.__detect_faces(frames)
final_bounding_boxes, final_landmarks = [], []
# decode
priors = priors.unsqueeze(0) | b_loc = batched_decode(b_loc, priors, self.cfg['variance']) * self.scale / self.resize | 9 | 2023-11-25 00:42:32+00:00 | 12k |
DLYuanGod/TinyGPT-V | eval_vqa.py | [
{
"identifier": "OKVQAEvalData",
"path": "minigpt4/datasets/datasets/vqa_datasets.py",
"snippet": "class OKVQAEvalData(torch.utils.data.Dataset):\n def __init__(self, loaded_data, vis_processor, root_path):\n self.loaded_data = loaded_data\n self.root_path = root_path\n self.vis_processor = vis_processor\n\n def __len__(self):\n return len(self.loaded_data)\n \n def __getitem__(self, idx):\n data = self.loaded_data[idx]\n img_id = data['image_id']\n question = data['question']\n question_id = data['question_id']\n img_file = '{:0>12}.jpg'.format(img_id)\n image_path = os.path.join(self.root_path, img_file)\n image = Image.open(image_path).convert('RGB')\n image = self.vis_processor(image)\n question = f\"[vqa] Based on the image, respond to this question with a short answer: {question}\"\n return image, question, question_id, img_id"
},
{
"identifier": "VizWizEvalData",
"path": "minigpt4/datasets/datasets/vqa_datasets.py",
"snippet": "class VizWizEvalData(torch.utils.data.Dataset):\n def __init__(self, loaded_data, vis_processor, root_path):\n self.loaded_data = loaded_data\n self.root_path = root_path\n self.vis_processor = vis_processor\n\n def __len__(self):\n return len(self.loaded_data)\n \n def __getitem__(self, idx):\n data = self.loaded_data[idx]\n img_id = data['image']\n question = data['question']\n answers = data['answers']\n answers = '_'.join([answer['answer'] for answer in answers])\n image_path = os.path.join(self.root_path, img_id)\n image = Image.open(image_path).convert('RGB')\n image = self.vis_processor(image)\n question = f\"[vqa] The question is '{question}' Based on the image, answer the question with a single word or phrase. and reply 'unanswerable' when the provided information is insufficient\"\n return image, question, answers"
},
{
"identifier": "IconQAEvalData",
"path": "minigpt4/datasets/datasets/vqa_datasets.py",
"snippet": "class IconQAEvalData(torch.utils.data.Dataset):\n def __init__(self, loaded_data, vis_processor, root_path):\n self.loaded_data = loaded_data\n self.root_path = root_path\n self.vis_processor = vis_processor\n\n def __len__(self):\n return len(self.loaded_data)\n \n def __getitem__(self, idx):\n data = self.loaded_data[str(idx)]\n \n image_id=data[\"split\"]+\"/\"+data[\"ques_type\"]+\"/\"+str(idx)\n question = data['question']\n image_path = os.path.join(self.root_path, image_id, 'image.png')\n image = Image.open(image_path).convert('RGB')\n image = self.vis_processor(image).half().cuda()\n candidates = '_'.join(data.get('choices',\"\"))\n answer = data['answer']\n question = f\"[vqa] Based on the image, respond to this question with a short answer: {question}\"\n return image, question, candidates, answer"
},
{
"identifier": "GQAEvalData",
"path": "minigpt4/datasets/datasets/vqa_datasets.py",
"snippet": "class GQAEvalData(torch.utils.data.Dataset):\n def __init__(self, loaded_data, vis_processor, root_path):\n self.loaded_data = loaded_data\n self.root_path = root_path\n self.vis_processor = vis_processor\n\n def __len__(self):\n return len(self.loaded_data)\n \n def __getitem__(self, idx):\n ann = self.loaded_data[idx]\n image_id = ann[\"image\"]\n image_path = os.path.join(self.root_path, f\"{image_id}\")\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n question = ann[\"question\"]\n question = f\"[vqa] Based on the image, respond to this question with a short answer: {question}\"\n labels = ann[\"answer\"]\n\n return image, question, labels"
},
{
"identifier": "VSREvalData",
"path": "minigpt4/datasets/datasets/vqa_datasets.py",
"snippet": "class VSREvalData(torch.utils.data.Dataset):\n def __init__(self, loaded_data, vis_processor, root_path):\n self.loaded_data = loaded_data\n self.root_path = root_path\n self.vis_processor = vis_processor\n\n def __len__(self):\n return len(self.loaded_data)\n \n def __getitem__(self, idx):\n ann = self.loaded_data[idx]\n image_path = os.path.join(self.root_path, ann[\"image\"])\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n question = ann[\"caption\"]\n question = f'[vqa] Based on the image, is this statement true or false? {question}'\n labels = 'true' if ann[\"label\"] == 1 else 'false'\n\n return image, question, labels"
},
{
"identifier": "HMEvalData",
"path": "minigpt4/datasets/datasets/vqa_datasets.py",
"snippet": "class HMEvalData(torch.utils.data.Dataset):\n def __init__(self, loaded_data, vis_processor, root_path):\n self.loaded_data = loaded_data\n self.root_path = root_path\n self.vis_processor = vis_processor\n\n def __len__(self):\n return len(self.loaded_data)\n \n def __getitem__(self, idx):\n ann = self.loaded_data[idx]\n image_id = ann[\"img\"]\n image_path = os.path.join(self.root_path, f\"{image_id}\")\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n question = ann[\"text\"]\n question = f\"This is an image writting '{question}'. Is this image hateful? Answer yes or no. Answer:\"\n labels = ann[\"label\"]\n\n return image, question, labels"
},
{
"identifier": "VQA",
"path": "minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/vqa.py",
"snippet": "class VQA:\n def __init__(self, annotation_file=None, question_file=None):\n \"\"\"\n Constructor of VQA helper class for reading and visualizing questions and answers.\n :param annotation_file (str): location of VQA annotation file\n :return:\n \"\"\"\n # load dataset\n self.dataset = {}\n self.questions = {}\n self.qa = {}\n self.qqa = {}\n self.imgToQA = {}\n if not annotation_file == None and not question_file == None:\n # print 'loading VQA annotations and questions into memory...'\n time_t = datetime.datetime.utcnow()\n dataset = json.load(open(annotation_file, 'r'))\n questions = json.load(open(question_file, 'r'))\n # print datetime.datetime.utcnow() - time_t\n self.dataset = dataset\n self.questions = questions\n self.createIndex()\n\n def createIndex(self):\n imgToQA = {ann['image_id']: [] for ann in self.dataset['annotations']}\n qa = {ann['question_id']: [] for ann in self.dataset['annotations']}\n qqa = {ann['question_id']: [] for ann in self.dataset['annotations']}\n for ann in self.dataset['annotations']:\n imgToQA[ann['image_id']] += [ann]\n qa[ann['question_id']] = ann\n for ques in self.questions['questions']:\n qqa[ques['question_id']] = ques\n # print 'index created!'\n\n # create class members\n self.qa = qa\n self.qqa = qqa\n self.imgToQA = imgToQA\n\n def info(self):\n \"\"\"\n Print information about the VQA annotation file.\n :return:\n \"\"\"\n\n # for key, value in self.datset['info'].items():\n # \tprint '%s: %s'%(key, value)\n\n def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]):\n \"\"\"\n Get question ids that satisfy given filter conditions. default skips that filter\n :param \timgIds (int array) : get question ids for given imgs\n quesTypes (str array) : get question ids for given question types\n ansTypes (str array) : get question ids for given answer types\n :return: ids (int array) : integer array of question ids\n \"\"\"\n imgIds = imgIds if type(imgIds) == list else [imgIds]\n quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]\n ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]\n\n if len(imgIds) == len(quesTypes) == len(ansTypes) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(imgIds) == 0:\n anns = sum([self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA], [])\n else:\n anns = self.dataset['annotations']\n anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann['question_type'] in quesTypes]\n anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann['answer_type'] in ansTypes]\n ids = [ann['question_id'] for ann in anns]\n return ids\n\n def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]):\n \"\"\"\n Get image ids that satisfy given filter conditions. default skips that filter\n :param quesIds (int array) : get image ids for given question ids\n quesTypes (str array) : get image ids for given question types\n ansTypes (str array) : get image ids for given answer types\n :return: ids (int array) : integer array of image ids\n \"\"\"\n quesIds = quesIds if type(quesIds) == list else [quesIds]\n quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]\n ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]\n\n if len(quesIds) == len(quesTypes) == len(ansTypes) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(quesIds) == 0:\n anns = sum([self.qa[quesId] for quesId in quesIds if quesId in self.qa], [])\n else:\n anns = self.dataset['annotations']\n anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann['question_type'] in quesTypes]\n anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann['answer_type'] in ansTypes]\n ids = [ann['image_id'] for ann in anns]\n return ids\n\n def loadQA(self, ids=[]):\n \"\"\"\n Load questions and answers with the specified question ids.\n :param ids (int array) : integer ids specifying question ids\n :return: qa (object array) : loaded qa objects\n \"\"\"\n if type(ids) == list:\n return [self.qa[id] for id in ids]\n elif type(ids) == int:\n return [self.qa[ids]]\n\n def showQA(self, anns):\n \"\"\"\n Display the specified annotations.\n :param anns (array of object): annotations to display\n :return: None\n \"\"\"\n if len(anns) == 0:\n return 0\n for ann in anns:\n quesId = ann['question_id']\n print(\"Question: %s\" % (self.qqa[quesId]['question']))\n for ans in ann['answers']:\n print(\"Answer %d: %s\" % (ans['answer_id'], ans['answer']))\n\n def loadRes(self, resFile, quesFile):\n \"\"\"\n Load result file and return a result object.\n :param resFile (str) : file name of result file\n :return: res (obj) : result api object\n \"\"\"\n res = VQA()\n res.questions = json.load(open(quesFile))\n res.dataset['info'] = copy.deepcopy(self.questions['info'])\n res.dataset['task_type'] = copy.deepcopy(self.questions['task_type'])\n res.dataset['data_type'] = copy.deepcopy(self.questions['data_type'])\n res.dataset['data_subtype'] = copy.deepcopy(self.questions['data_subtype'])\n res.dataset['license'] = copy.deepcopy(self.questions['license'])\n\n # print 'Loading and preparing results... '\n time_t = datetime.datetime.utcnow()\n anns = json.load(open(resFile))\n assert type(anns) == list, 'results is not an array of objects'\n annsQuesIds = [ann['question_id'] for ann in anns]\n assert set(annsQuesIds) == set(self.getQuesIds()), \\\n 'Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file.'\n for ann in anns:\n quesId = ann['question_id']\n if res.dataset['task_type'] == 'Multiple Choice':\n assert ann['answer'] in self.qqa[quesId][\n 'multiple_choices'], 'predicted answer is not one of the multiple choices'\n qaAnn = self.qa[quesId]\n ann['image_id'] = qaAnn['image_id']\n ann['question_type'] = qaAnn['question_type']\n ann['answer_type'] = qaAnn['answer_type']\n # print 'DONE (t=%0.2fs)'%((datetime.datetime.utcnow() - time_t).total_seconds())\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res"
},
{
"identifier": "VQAEval",
"path": "minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/vqaEval.py",
"snippet": "class VQAEval:\n\tdef __init__(self, vqa, vqaRes, n=2):\n\t\tself.n \t\t\t = n\n\t\tself.accuracy = {}\n\t\tself.evalQA = {}\n\t\tself.evalQuesType = {}\n\t\tself.evalAnsType = {}\n\t\tself.vqa \t\t = vqa\n\t\tself.vqaRes = vqaRes\n\t\tself.params\t\t = {'question_id': vqa.getQuesIds()}\n\t\tself.contractions = {\"aint\": \"ain't\", \"arent\": \"aren't\", \"cant\": \"can't\", \"couldve\": \"could've\", \"couldnt\": \"couldn't\", \\\n\t\t\t\t\t\t\t \"couldn'tve\": \"couldn't've\", \"couldnt've\": \"couldn't've\", \"didnt\": \"didn't\", \"doesnt\": \"doesn't\", \"dont\": \"don't\", \"hadnt\": \"hadn't\", \\\n\t\t\t\t\t\t\t \"hadnt've\": \"hadn't've\", \"hadn'tve\": \"hadn't've\", \"hasnt\": \"hasn't\", \"havent\": \"haven't\", \"hed\": \"he'd\", \"hed've\": \"he'd've\", \\\n\t\t\t\t\t\t\t \"he'dve\": \"he'd've\", \"hes\": \"he's\", \"howd\": \"how'd\", \"howll\": \"how'll\", \"hows\": \"how's\", \"Id've\": \"I'd've\", \"I'dve\": \"I'd've\", \\\n\t\t\t\t\t\t\t \"Im\": \"I'm\", \"Ive\": \"I've\", \"isnt\": \"isn't\", \"itd\": \"it'd\", \"itd've\": \"it'd've\", \"it'dve\": \"it'd've\", \"itll\": \"it'll\", \"let's\": \"let's\", \\\n\t\t\t\t\t\t\t \"maam\": \"ma'am\", \"mightnt\": \"mightn't\", \"mightnt've\": \"mightn't've\", \"mightn'tve\": \"mightn't've\", \"mightve\": \"might've\", \\\n\t\t\t\t\t\t\t \"mustnt\": \"mustn't\", \"mustve\": \"must've\", \"neednt\": \"needn't\", \"notve\": \"not've\", \"oclock\": \"o'clock\", \"oughtnt\": \"oughtn't\", \\\n\t\t\t\t\t\t\t \"ow's'at\": \"'ow's'at\", \"'ows'at\": \"'ow's'at\", \"'ow'sat\": \"'ow's'at\", \"shant\": \"shan't\", \"shed've\": \"she'd've\", \"she'dve\": \"she'd've\", \\\n\t\t\t\t\t\t\t \"she's\": \"she's\", \"shouldve\": \"should've\", \"shouldnt\": \"shouldn't\", \"shouldnt've\": \"shouldn't've\", \"shouldn'tve\": \"shouldn't've\", \\\n\t\t\t\t\t\t\t \"somebody'd\": \"somebodyd\", \"somebodyd've\": \"somebody'd've\", \"somebody'dve\": \"somebody'd've\", \"somebodyll\": \"somebody'll\", \\\n\t\t\t\t\t\t\t \"somebodys\": \"somebody's\", \"someoned\": \"someone'd\", \"someoned've\": \"someone'd've\", \"someone'dve\": \"someone'd've\", \\\n\t\t\t\t\t\t\t \"someonell\": \"someone'll\", \"someones\": \"someone's\", \"somethingd\": \"something'd\", \"somethingd've\": \"something'd've\", \\\n\t\t\t\t\t\t\t \"something'dve\": \"something'd've\", \"somethingll\": \"something'll\", \"thats\": \"that's\", \"thered\": \"there'd\", \"thered've\": \"there'd've\", \\\n\t\t\t\t\t\t\t \"there'dve\": \"there'd've\", \"therere\": \"there're\", \"theres\": \"there's\", \"theyd\": \"they'd\", \"theyd've\": \"they'd've\", \\\n\t\t\t\t\t\t\t \"they'dve\": \"they'd've\", \"theyll\": \"they'll\", \"theyre\": \"they're\", \"theyve\": \"they've\", \"twas\": \"'twas\", \"wasnt\": \"wasn't\", \\\n\t\t\t\t\t\t\t \"wed've\": \"we'd've\", \"we'dve\": \"we'd've\", \"weve\": \"we've\", \"werent\": \"weren't\", \"whatll\": \"what'll\", \"whatre\": \"what're\", \\\n\t\t\t\t\t\t\t \"whats\": \"what's\", \"whatve\": \"what've\", \"whens\": \"when's\", \"whered\": \"where'd\", \"wheres\": \"where's\", \"whereve\": \"where've\", \\\n\t\t\t\t\t\t\t \"whod\": \"who'd\", \"whod've\": \"who'd've\", \"who'dve\": \"who'd've\", \"wholl\": \"who'll\", \"whos\": \"who's\", \"whove\": \"who've\", \"whyll\": \"why'll\", \\\n\t\t\t\t\t\t\t \"whyre\": \"why're\", \"whys\": \"why's\", \"wont\": \"won't\", \"wouldve\": \"would've\", \"wouldnt\": \"wouldn't\", \"wouldnt've\": \"wouldn't've\", \\\n\t\t\t\t\t\t\t \"wouldn'tve\": \"wouldn't've\", \"yall\": \"y'all\", \"yall'll\": \"y'all'll\", \"y'allll\": \"y'all'll\", \"yall'd've\": \"y'all'd've\", \\\n\t\t\t\t\t\t\t \"y'alld've\": \"y'all'd've\", \"y'all'dve\": \"y'all'd've\", \"youd\": \"you'd\", \"youd've\": \"you'd've\", \"you'dve\": \"you'd've\", \\\n\t\t\t\t\t\t\t \"youll\": \"you'll\", \"youre\": \"you're\", \"youve\": \"you've\"}\n\t\tself.manualMap = { 'none': '0',\n\t\t\t\t\t\t\t 'zero': '0',\n\t\t\t\t\t\t\t 'one': '1',\n\t\t\t\t\t\t\t 'two': '2',\n\t\t\t\t\t\t\t 'three': '3',\n\t\t\t\t\t\t\t 'four': '4',\n\t\t\t\t\t\t\t 'five': '5',\n\t\t\t\t\t\t\t 'six': '6',\n\t\t\t\t\t\t\t 'seven': '7',\n\t\t\t\t\t\t\t 'eight': '8',\n\t\t\t\t\t\t\t 'nine': '9',\n\t\t\t\t\t\t\t 'ten': '10'\n\t\t\t\t\t\t\t}\n\t\tself.articles = ['a',\n\t\t\t\t\t\t\t 'an',\n\t\t\t\t\t\t\t 'the'\n\t\t\t\t\t\t\t]\n\n\n\t\tself.periodStrip = re.compile(\"(?!<=\\d)(\\.)(?!\\d)\")\n\t\tself.commaStrip = re.compile(\"(\\d)(\\,)(\\d)\")\n\t\tself.punct = [';', r\"/\", '[', ']', '\"', '{', '}',\n\t\t\t\t\t\t\t '(', ')', '=', '+', '\\\\', '_', '-',\n\t\t\t\t\t\t\t '>', '<', '@', '`', ',', '?', '!']\n\n\n\tdef evaluate(self, quesIds=None):\n\t\tif quesIds == None:\n\t\t\tquesIds = [quesId for quesId in self.params['question_id']]\n\t\tgts = {}\n\t\tres = {}\n\t\tfor quesId in quesIds:\n\t\t\tgts[quesId] = self.vqa.qa[quesId]\n\t\t\tres[quesId] = self.vqaRes.qa[quesId]\n\n\t\t# =================================================\n\t\t# Compute accuracy\n\t\t# =================================================\n\t\taccQA = []\n\t\taccQuesType = {}\n\t\taccAnsType = {}\n\t\t# print \"computing accuracy\"\n\t\tstep = 0\n\t\tfor quesId in quesIds:\n\t\t\tfor ansDic in gts[quesId]['answers']:\n\t\t\t\tansDic['answer'] = ansDic['answer'].replace('\\n', ' ')\n\t\t\t\tansDic['answer'] = ansDic['answer'].replace('\\t', ' ')\n\t\t\t\tansDic['answer'] = ansDic['answer'].strip()\n\t\t\tresAns = res[quesId]['answer']\n\t\t\tresAns = resAns.replace('\\n', ' ')\n\t\t\tresAns = resAns.replace('\\t', ' ')\n\t\t\tresAns = resAns.strip()\n\t\t\tgtAcc = []\n\t\t\tgtAnswers = [ans['answer'] for ans in gts[quesId]['answers']]\n\n\t\t\tif len(set(gtAnswers)) > 1:\n\t\t\t\tfor ansDic in gts[quesId]['answers']:\n\t\t\t\t\tansDic['answer'] = self.processPunctuation(ansDic['answer'])\n\t\t\t\t\tansDic['answer'] = self.processDigitArticle(ansDic['answer'])\n\t\t\t\tresAns = self.processPunctuation(resAns)\n\t\t\t\tresAns = self.processDigitArticle(resAns)\n\n\t\t\tfor gtAnsDatum in gts[quesId]['answers']:\n\t\t\t\totherGTAns = [item for item in gts[quesId]['answers'] if item!=gtAnsDatum]\n\t\t\t\tmatchingAns = [item for item in otherGTAns if item['answer'].lower()==resAns.lower()]\n\t\t\t\tacc = min(1, float(len(matchingAns))/3)\n\t\t\t\tgtAcc.append(acc)\n\t\t\tquesType = gts[quesId]['question_type']\n\t\t\tansType = gts[quesId]['answer_type']\n\t\t\tavgGTAcc = float(sum(gtAcc))/len(gtAcc)\n\t\t\taccQA.append(avgGTAcc)\n\t\t\tif quesType not in accQuesType:\n\t\t\t\taccQuesType[quesType] = []\n\t\t\taccQuesType[quesType].append(avgGTAcc)\n\t\t\tif ansType not in accAnsType:\n\t\t\t\taccAnsType[ansType] = []\n\t\t\taccAnsType[ansType].append(avgGTAcc)\n\t\t\tself.setEvalQA(quesId, avgGTAcc)\n\t\t\tself.setEvalQuesType(quesId, quesType, avgGTAcc)\n\t\t\tself.setEvalAnsType(quesId, ansType, avgGTAcc)\n\t\t\tif step%100 == 0:\n\t\t\t\tself.updateProgress(step/float(len(quesIds)))\n\t\t\tstep = step + 1\n\n\t\tself.setAccuracy(accQA, accQuesType, accAnsType)\n\t\t# print \"Done computing accuracy\"\n\n\tdef processPunctuation(self, inText):\n\t\toutText = inText\n\t\tfor p in self.punct:\n\t\t\tif (p + ' ' in inText or ' ' + p in inText) or (re.search(self.commaStrip, inText) != None):\n\t\t\t\toutText = outText.replace(p, '')\n\t\t\telse:\n\t\t\t\toutText = outText.replace(p, ' ')\n\t\toutText = self.periodStrip.sub(\"\",\n\t\t\t\t\t\t\t\t\t outText,\n\t\t\t\t\t\t\t\t\t re.UNICODE)\n\t\treturn outText\n\n\tdef processDigitArticle(self, inText):\n\t\toutText = []\n\t\ttempText = inText.lower().split()\n\t\tfor word in tempText:\n\t\t\tword = self.manualMap.setdefault(word, word)\n\t\t\tif word not in self.articles:\n\t\t\t\toutText.append(word)\n\t\t\telse:\n\t\t\t\tpass\n\t\tfor wordId, word in enumerate(outText):\n\t\t\tif word in self.contractions:\n\t\t\t\toutText[wordId] = self.contractions[word]\n\t\toutText = ' '.join(outText)\n\t\treturn outText\n\n\tdef setAccuracy(self, accQA, accQuesType, accAnsType):\n\t\tself.accuracy['overall'] = round(100*float(sum(accQA))/len(accQA), self.n)\n\t\tself.accuracy['perQuestionType'] = {quesType: round(100*float(sum(accQuesType[quesType]))/len(accQuesType[quesType]), self.n) for quesType in accQuesType}\n\t\tself.accuracy['perAnswerType'] = {ansType: round(100*float(sum(accAnsType[ansType]))/len(accAnsType[ansType]), self.n) for ansType in accAnsType}\n\n\tdef setEvalQA(self, quesId, acc):\n\t\tself.evalQA[quesId] = round(100*acc, self.n)\n\n\tdef setEvalQuesType(self, quesId, quesType, acc):\n\t\tif quesType not in self.evalQuesType:\n\t\t\tself.evalQuesType[quesType] = {}\n\t\tself.evalQuesType[quesType][quesId] = round(100*acc, self.n)\n\n\tdef setEvalAnsType(self, quesId, ansType, acc):\n\t\tif ansType not in self.evalAnsType:\n\t\t\tself.evalAnsType[ansType] = {}\n\t\tself.evalAnsType[ansType][quesId] = round(100*acc, self.n)\n\n\tdef updateProgress(self, progress):\n\t\tbarLength = 20\n\t\tstatus = \"\"\n\t\tif isinstance(progress, int):\n\t\t\tprogress = float(progress)\n\t\tif not isinstance(progress, float):\n\t\t\tprogress = 0\n\t\t\tstatus = \"error: progress var must be float\\r\\n\"\n\t\tif progress < 0:\n\t\t\tprogress = 0\n\t\t\tstatus = \"Halt...\\r\\n\"\n\t\tif progress >= 1:\n\t\t\tprogress = 1\n\t\t\tstatus = \"Done...\\r\\n\"\n\t\tblock = int(round(barLength*progress))\n\t\ttext = \"\\rFinshed Percent: [{0}] {1}% {2}\".format( \"#\"*block + \"-\"*(barLength-block), int(progress*100), status)\n\t\tsys.stdout.write(text)\n\t\tsys.stdout.flush()"
},
{
"identifier": "prepare_texts",
"path": "minigpt4/common/eval_utils.py",
"snippet": "def prepare_texts(texts, conv_temp):\n convs = [conv_temp.copy() for _ in range(len(texts))]\n [conv.append_message(\n conv.roles[0], '<Img><ImageHere></Img> {}'.format(text)) for conv, text in zip(convs, texts)]\n [conv.append_message(conv.roles[1], None) for conv in convs]\n texts = [conv.get_prompt() for conv in convs]\n return texts"
},
{
"identifier": "init_model",
"path": "minigpt4/common/eval_utils.py",
"snippet": "def init_model(args):\n print('Initialization Model')\n cfg = Config(args)\n # cfg.model_cfg.ckpt = args.ckpt\n # cfg.model_cfg.lora_r = args.lora_r\n # cfg.model_cfg.lora_alpha = args.lora_alpha\n\n model_config = cfg.model_cfg\n model_cls = registry.get_model_class(model_config.arch)\n model = model_cls.from_config(model_config).to('cuda:0')\n\n# import pudb; pudb.set_trace()\n key = list(cfg.datasets_cfg.keys())[0]\n vis_processor_cfg = cfg.datasets_cfg.get(key).vis_processor.train\n vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg)\n print('Initialization Finished')\n return model, vis_processor"
},
{
"identifier": "eval_parser",
"path": "minigpt4/common/eval_utils.py",
"snippet": "def eval_parser():\n parser = argparse.ArgumentParser(description=\"Demo\")\n parser.add_argument(\"--cfg-path\", required=True, help=\"path to configuration file.\")\n parser.add_argument(\"--name\", type=str, default='A2', help=\"evaluation name\")\n parser.add_argument(\"--ckpt\", type=str, help=\"path to configuration file.\")\n parser.add_argument(\"--eval_opt\", type=str, default='all', help=\"path to configuration file.\")\n parser.add_argument(\"--max_new_tokens\", type=int, default=10, help=\"max number of generated tokens\")\n parser.add_argument(\"--batch_size\", type=int, default=32)\n parser.add_argument(\"--lora_r\", type=int, default=64, help=\"lora rank of the model\")\n parser.add_argument(\"--lora_alpha\", type=int, default=16, help=\"lora alpha\")\n parser.add_argument(\n \"--options\",\n nargs=\"+\",\n help=\"override some settings in the used config, the key-value pair \"\n \"in xxx=yyy format will be merged into config file (deprecate), \"\n \"change to --cfg-options instead.\",\n )\n return parser"
},
{
"identifier": "CONV_VISION_minigptv2",
"path": "minigpt4/conversation/conversation.py",
"snippet": "class SeparatorStyle(Enum):\nclass Conversation:\nclass StoppingCriteriaSub(StoppingCriteria):\nclass Chat:\n SINGLE = auto()\n TWO = auto()\n def get_prompt(self):\n def append_message(self, role, message):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):\n def __init__(self, stops=[], encounters=1):\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor):\n def __init__(self, model, vis_processor, device='cuda:0', stopping_criteria=None):\n def ask(self, text, conv):\n def answer_prepare(self, conv, img_list, max_new_tokens=300, num_beams=1, min_length=1, top_p=0.9,\n repetition_penalty=1.05, length_penalty=1, temperature=1.0, max_length=2000):\n def answer(self, conv, img_list, **kargs):\n def stream_answer(self, conv, img_list, **kargs):\n def model_generate(self, *args, **kwargs):\n def encode_img(self, img_list):\n def upload_img(self, image, conv, img_list):"
},
{
"identifier": "Config",
"path": "minigpt4/common/config.py",
"snippet": "class Config:\n def __init__(self, args):\n self.config = {}\n\n self.args = args\n\n # Register the config and configuration for setup\n registry.register(\"configuration\", self)\n\n user_config = self._build_opt_list(self.args.options)\n\n config = OmegaConf.load(self.args.cfg_path)\n\n runner_config = self.build_runner_config(config)\n model_config = self.build_model_config(config, **user_config)\n dataset_config = self.build_dataset_config(config)\n evaluation_dataset_config = self.build_evaluation_dataset_config(config)\n\n # Validate the user-provided runner configuration\n # model and dataset configuration are supposed to be validated by the respective classes\n # [TODO] validate the model/dataset configuration\n # self._validate_runner_config(runner_config)\n\n # Override the default configuration with user options.\n self.config = OmegaConf.merge(\n runner_config, model_config, dataset_config,evaluation_dataset_config, user_config\n )\n\n def _validate_runner_config(self, runner_config):\n \"\"\"\n This method validates the configuration, such that\n 1) all the user specified options are valid;\n 2) no type mismatches between the user specified options and the config.\n \"\"\"\n runner_config_validator = create_runner_config_validator()\n runner_config_validator.validate(runner_config)\n\n def _build_opt_list(self, opts):\n opts_dot_list = self._convert_to_dot_list(opts)\n return OmegaConf.from_dotlist(opts_dot_list)\n\n @staticmethod\n def build_model_config(config, **kwargs):\n model = config.get(\"model\", None)\n assert model is not None, \"Missing model configuration file.\"\n\n model_cls = registry.get_model_class(model.arch)\n assert model_cls is not None, f\"Model '{model.arch}' has not been registered.\"\n\n model_type = kwargs.get(\"model.model_type\", None)\n if not model_type:\n model_type = model.get(\"model_type\", None)\n # else use the model type selected by user.\n\n assert model_type is not None, \"Missing model_type.\"\n\n model_config_path = model_cls.default_config_path(model_type=model_type)\n\n model_config = OmegaConf.create()\n # hierarchy override, customized config > default config\n model_config = OmegaConf.merge(\n model_config,\n OmegaConf.load(model_config_path),\n {\"model\": config[\"model\"]},\n )\n\n return model_config\n\n @staticmethod\n def build_runner_config(config):\n return {\"run\": config.run}\n\n @staticmethod\n def build_dataset_config(config):\n datasets = config.get(\"datasets\", None)\n if datasets is None:\n raise KeyError(\n \"Expecting 'datasets' as the root key for dataset configuration.\"\n )\n\n dataset_config = OmegaConf.create()\n\n for dataset_name in datasets:\n builder_cls = registry.get_builder_class(dataset_name)\n\n dataset_config_type = datasets[dataset_name].get(\"type\", \"default\")\n dataset_config_path = builder_cls.default_config_path(\n type=dataset_config_type\n )\n\n # hierarchy override, customized config > default config\n dataset_config = OmegaConf.merge(\n dataset_config,\n OmegaConf.load(dataset_config_path),\n {\"datasets\": {dataset_name: config[\"datasets\"][dataset_name]}},\n )\n\n return dataset_config\n\n\n @staticmethod\n def build_evaluation_dataset_config(config):\n datasets = config.get(\"evaluation_datasets\", None)\n # if datasets is None:\n # raise KeyError(\n # \"Expecting 'datasets' as the root key for dataset configuration.\"\n # )\n\n dataset_config = OmegaConf.create()\n\n if datasets is not None:\n for dataset_name in datasets:\n builder_cls = registry.get_builder_class(dataset_name)\n\n # hierarchy override, customized config > default config\n dataset_config = OmegaConf.merge(\n dataset_config,\n {\"evaluation_datasets\": {dataset_name: config[\"evaluation_datasets\"][dataset_name]}},\n )\n\n return dataset_config\n\n def _convert_to_dot_list(self, opts):\n if opts is None:\n opts = []\n\n if len(opts) == 0:\n return opts\n\n has_equal = opts[0].find(\"=\") != -1\n\n if has_equal:\n return opts\n\n return [(opt + \"=\" + value) for opt, value in zip(opts[0::2], opts[1::2])]\n\n def get_config(self):\n return self.config\n\n @property\n def run_cfg(self):\n return self.config.run\n\n @property\n def datasets_cfg(self):\n return self.config.datasets\n\n @property\n def evaluation_datasets_cfg(self):\n return self.config.evaluation_datasets\n\n @property\n def model_cfg(self):\n return self.config.model\n\n def pretty_print(self):\n logging.info(\"\\n===== Running Parameters =====\")\n logging.info(self._convert_node_to_json(self.config.run))\n\n logging.info(\"\\n====== Dataset Attributes ======\")\n datasets = self.config.datasets\n\n for dataset in datasets:\n if dataset in self.config.datasets:\n logging.info(f\"\\n======== {dataset} =======\")\n dataset_config = self.config.datasets[dataset]\n logging.info(self._convert_node_to_json(dataset_config))\n else:\n logging.warning(f\"No dataset named '{dataset}' in config. Skipping\")\n\n logging.info(f\"\\n====== Model Attributes ======\")\n logging.info(self._convert_node_to_json(self.config.model))\n\n def _convert_node_to_json(self, node):\n container = OmegaConf.to_container(node, resolve=True)\n return json.dumps(container, indent=4, sort_keys=True)\n\n def to_dict(self):\n return OmegaConf.to_container(self.config)"
}
] | import os
import re
import json
import argparse
import numpy as np
import torch
from collections import defaultdict
from PIL import Image
from tqdm import tqdm
from torch.utils.data import DataLoader
from datasets import load_dataset
from minigpt4.datasets.datasets.vqa_datasets import OKVQAEvalData,VizWizEvalData,IconQAEvalData,GQAEvalData,VSREvalData,HMEvalData
from minigpt4.common.vqa_tools.VQA.PythonHelperTools.vqaTools.vqa import VQA
from minigpt4.common.vqa_tools.VQA.PythonEvaluationTools.vqaEvaluation.vqaEval import VQAEval
from minigpt4.common.eval_utils import prepare_texts, init_model, eval_parser
from minigpt4.conversation.conversation import CONV_VISION_minigptv2
from minigpt4.common.config import Config | 8,987 |
def list_of_str(arg):
return list(map(str, arg.split(',')))
parser = eval_parser()
parser.add_argument("--dataset", type=list_of_str, default='refcoco', help="dataset to evaluate")
args = parser.parse_args()
cfg = Config(args)
model, vis_processor = init_model(args)
conv_temp = CONV_VISION_minigptv2.copy()
conv_temp.system = ""
model.eval()
save_path = cfg.run_cfg.save_path
if 'okvqa' in args.dataset:
eval_file_path = cfg.evaluation_datasets_cfg["okvqa"]["eval_file_path"]
img_path = cfg.evaluation_datasets_cfg["okvqa"]["img_path"]
batch_size = cfg.evaluation_datasets_cfg["okvqa"]["batch_size"]
max_new_tokens = cfg.evaluation_datasets_cfg["okvqa"]["max_new_tokens"]
evaluation_annntation_path = os.path.join(eval_file_path, "okvqa_test_split.json")
with open(evaluation_annntation_path) as f:
ok_vqa_test_split = json.load(f)
|
def list_of_str(arg):
return list(map(str, arg.split(',')))
parser = eval_parser()
parser.add_argument("--dataset", type=list_of_str, default='refcoco', help="dataset to evaluate")
args = parser.parse_args()
cfg = Config(args)
model, vis_processor = init_model(args)
conv_temp = CONV_VISION_minigptv2.copy()
conv_temp.system = ""
model.eval()
save_path = cfg.run_cfg.save_path
if 'okvqa' in args.dataset:
eval_file_path = cfg.evaluation_datasets_cfg["okvqa"]["eval_file_path"]
img_path = cfg.evaluation_datasets_cfg["okvqa"]["img_path"]
batch_size = cfg.evaluation_datasets_cfg["okvqa"]["batch_size"]
max_new_tokens = cfg.evaluation_datasets_cfg["okvqa"]["max_new_tokens"]
evaluation_annntation_path = os.path.join(eval_file_path, "okvqa_test_split.json")
with open(evaluation_annntation_path) as f:
ok_vqa_test_split = json.load(f)
| data = OKVQAEvalData(ok_vqa_test_split, vis_processor, img_path) | 0 | 2023-12-28 05:47:18+00:00 | 12k |
ali-vilab/dreamtalk | inference_for_demo_video.py | [
{
"identifier": "get_cfg_defaults",
"path": "configs/default.py",
"snippet": "def get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n return _C.clone()"
},
{
"identifier": "DiffusionNet",
"path": "core/networks/diffusion_net.py",
"snippet": "class DiffusionNet(Module):\n def __init__(self, cfg, net, var_sched: VarianceSchedule):\n super().__init__()\n self.cfg = cfg\n self.net = net\n self.var_sched = var_sched\n self.face3d_latent_type = self.cfg.TRAIN.FACE3D_LATENT.TYPE\n self.predict_what = self.cfg.DIFFUSION.PREDICT_WHAT\n\n if self.cfg.CF_GUIDANCE.TRAINING:\n null_style_clip = torch.zeros(\n self.cfg.DATASET.STYLE_MAX_LEN, self.cfg.DATASET.FACE3D_DIM\n )\n self.register_buffer(\"null_style_clip\", null_style_clip)\n\n null_pad_mask = torch.tensor([False] * self.cfg.DATASET.STYLE_MAX_LEN)\n self.register_buffer(\"null_pad_mask\", null_pad_mask)\n\n def _face3d_to_latent(self, face3d):\n latent = None\n if self.face3d_latent_type == \"face3d\":\n latent = face3d\n elif self.face3d_latent_type == \"normalized_face3d\":\n latent = face3d_raw_to_norm(\n face3d, exp_min=self.exp_min, exp_max=self.exp_max\n )\n else:\n raise ValueError(f\"Invalid face3d latent type: {self.face3d_latent_type}\")\n return latent\n\n def _latent_to_face3d(self, latent):\n face3d = None\n if self.face3d_latent_type == \"face3d\":\n face3d = latent\n elif self.face3d_latent_type == \"normalized_face3d\":\n latent = torch.clamp(latent, min=-1, max=1)\n face3d = face3d_norm_to_raw(\n latent, exp_min=self.exp_min, exp_max=self.exp_max\n )\n else:\n raise ValueError(f\"Invalid face3d latent type: {self.face3d_latent_type}\")\n return face3d\n\n def ddim_sample(\n self,\n audio,\n style_clip,\n style_pad_mask,\n output_dim,\n flexibility=0.0,\n ret_traj=False,\n use_cf_guidance=False,\n cfg_scale=2.0,\n ddim_num_step=50,\n ready_style_code=None,\n ):\n \"\"\"\n\n Args:\n audio (_type_): (B, L, W) or (B, L, W, C)\n style_clip (_type_): (B, L_clipmax, C_face3d)\n style_pad_mask : (B, L_clipmax)\n pose_dim (_type_): int\n flexibility (float, optional): _description_. Defaults to 0.0.\n ret_traj (bool, optional): _description_. Defaults to False.\n\n\n Returns:\n _type_: (B, L, C_face)\n \"\"\"\n if self.predict_what != \"x0\":\n raise NotImplementedError(self.predict_what)\n\n if ready_style_code is not None and use_cf_guidance:\n raise NotImplementedError(\"not implement cfg for ready style code\")\n\n c = self.var_sched.num_steps // ddim_num_step\n time_steps = torch.tensor(\n np.asarray(list(range(0, self.var_sched.num_steps, c))) + 1\n )\n assert len(time_steps) == ddim_num_step\n prev_time_steps = torch.cat((torch.tensor([0]), time_steps[:-1]))\n\n batch_size, output_len = audio.shape[:2]\n # batch_size = context.size(0)\n context = {\n \"audio\": audio,\n \"style_clip\": style_clip,\n \"style_pad_mask\": style_pad_mask,\n \"ready_style_code\": ready_style_code,\n }\n if use_cf_guidance:\n uncond_style_clip = self.null_style_clip.unsqueeze(0).repeat(\n batch_size, 1, 1\n )\n uncond_pad_mask = self.null_pad_mask.unsqueeze(0).repeat(batch_size, 1)\n\n context_double = {\n \"audio\": torch.cat([audio] * 2, dim=0),\n \"style_clip\": torch.cat([style_clip, uncond_style_clip], dim=0),\n \"style_pad_mask\": torch.cat([style_pad_mask, uncond_pad_mask], dim=0),\n \"ready_style_code\": None\n if ready_style_code is None\n else torch.cat(\n [\n ready_style_code,\n self.net.style_encoder(uncond_style_clip, uncond_pad_mask),\n ],\n dim=0,\n ),\n }\n\n x_t = torch.randn([batch_size, output_len, output_dim]).to(audio.device)\n\n for idx in list(range(ddim_num_step))[::-1]:\n t = time_steps[idx]\n t_prev = prev_time_steps[idx]\n ddim_alpha = self.var_sched.alpha_bars[t]\n ddim_alpha_prev = self.var_sched.alpha_bars[t_prev]\n\n t_tensor = torch.tensor([t] * batch_size).to(audio.device).float()\n if use_cf_guidance:\n x_t_double = torch.cat([x_t] * 2, dim=0)\n t_tensor_double = torch.cat([t_tensor] * 2, dim=0)\n cond_output, uncond_output = self.net(\n x_t_double, t=t_tensor_double, **context_double\n ).chunk(2)\n diff_output = uncond_output + cfg_scale * (cond_output - uncond_output)\n else:\n diff_output = self.net(x_t, t=t_tensor, **context)\n\n pred_x0 = diff_output\n eps = (x_t - torch.sqrt(ddim_alpha) * pred_x0) / torch.sqrt(1 - ddim_alpha)\n c1 = torch.sqrt(ddim_alpha_prev)\n c2 = torch.sqrt(1 - ddim_alpha_prev)\n\n x_t = c1 * pred_x0 + c2 * eps\n\n latent_output = x_t\n face3d_output = self._latent_to_face3d(latent_output)\n return face3d_output\n\n def sample(\n self,\n audio,\n style_clip,\n style_pad_mask,\n output_dim,\n flexibility=0.0,\n ret_traj=False,\n use_cf_guidance=False,\n cfg_scale=2.0,\n sample_method=\"ddpm\",\n ddim_num_step=50,\n ready_style_code=None,\n ):\n # sample_method = kwargs[\"sample_method\"]\n if sample_method == \"ddpm\":\n if ready_style_code is not None:\n raise NotImplementedError(\"ready style code in ddpm\")\n return self.ddpm_sample(\n audio,\n style_clip,\n style_pad_mask,\n output_dim,\n flexibility=flexibility,\n ret_traj=ret_traj,\n use_cf_guidance=use_cf_guidance,\n cfg_scale=cfg_scale,\n )\n elif sample_method == \"ddim\":\n return self.ddim_sample(\n audio,\n style_clip,\n style_pad_mask,\n output_dim,\n flexibility=flexibility,\n ret_traj=ret_traj,\n use_cf_guidance=use_cf_guidance,\n cfg_scale=cfg_scale,\n ddim_num_step=ddim_num_step,\n ready_style_code=ready_style_code,\n )\n\n def ddpm_sample(\n self,\n audio,\n style_clip,\n style_pad_mask,\n output_dim,\n flexibility=0.0,\n ret_traj=False,\n use_cf_guidance=False,\n cfg_scale=2.0,\n ):\n \"\"\"\n\n Args:\n audio (_type_): (B, L, W) or (B, L, W, C)\n style_clip (_type_): (B, L_clipmax, C_face3d)\n style_pad_mask : (B, L_clipmax)\n pose_dim (_type_): int\n flexibility (float, optional): _description_. Defaults to 0.0.\n ret_traj (bool, optional): _description_. Defaults to False.\n\n\n Returns:\n _type_: (B, L, C_face)\n \"\"\"\n batch_size, output_len = audio.shape[:2]\n # batch_size = context.size(0)\n context = {\n \"audio\": audio,\n \"style_clip\": style_clip,\n \"style_pad_mask\": style_pad_mask,\n }\n if use_cf_guidance:\n uncond_style_clip = self.null_style_clip.unsqueeze(0).repeat(\n batch_size, 1, 1\n )\n uncond_pad_mask = self.null_pad_mask.unsqueeze(0).repeat(batch_size, 1)\n context_double = {\n \"audio\": torch.cat([audio] * 2, dim=0),\n \"style_clip\": torch.cat([style_clip, uncond_style_clip], dim=0),\n \"style_pad_mask\": torch.cat([style_pad_mask, uncond_pad_mask], dim=0),\n }\n\n x_T = torch.randn([batch_size, output_len, output_dim]).to(audio.device)\n traj = {self.var_sched.num_steps: x_T}\n for t in range(self.var_sched.num_steps, 0, -1):\n alpha = self.var_sched.alphas[t]\n alpha_bar = self.var_sched.alpha_bars[t]\n alpha_bar_prev = self.var_sched.alpha_bars[t - 1]\n sigma = self.var_sched.get_sigmas(t, flexibility)\n\n z = torch.randn_like(x_T) if t > 1 else torch.zeros_like(x_T)\n x_t = traj[t]\n t_tensor = torch.tensor([t] * batch_size).to(audio.device).float()\n if use_cf_guidance:\n x_t_double = torch.cat([x_t] * 2, dim=0)\n t_tensor_double = torch.cat([t_tensor] * 2, dim=0)\n cond_output, uncond_output = self.net(\n x_t_double, t=t_tensor_double, **context_double\n ).chunk(2)\n diff_output = uncond_output + cfg_scale * (cond_output - uncond_output)\n else:\n diff_output = self.net(x_t, t=t_tensor, **context)\n\n if self.predict_what == \"noise\":\n c0 = 1.0 / torch.sqrt(alpha)\n c1 = (1 - alpha) / torch.sqrt(1 - alpha_bar)\n x_next = c0 * (x_t - c1 * diff_output) + sigma * z\n elif self.predict_what == \"x0\":\n d0 = torch.sqrt(alpha) * (1 - alpha_bar_prev) / (1 - alpha_bar)\n d1 = torch.sqrt(alpha_bar_prev) * (1 - alpha) / (1 - alpha_bar)\n x_next = d0 * x_t + d1 * diff_output + sigma * z\n traj[t - 1] = x_next.detach()\n traj[t] = traj[t].cpu()\n if not ret_traj:\n del traj[t]\n\n if ret_traj:\n raise NotImplementedError\n return traj\n else:\n latent_output = traj[0]\n face3d_output = self._latent_to_face3d(latent_output)\n return face3d_output"
},
{
"identifier": "NoisePredictor",
"path": "core/networks/diffusion_util.py",
"snippet": "class NoisePredictor(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n\n content_encoder_class = get_network(cfg.CONTENT_ENCODER_TYPE)\n self.content_encoder = content_encoder_class(**cfg.CONTENT_ENCODER)\n\n style_encoder_class = get_network(cfg.STYLE_ENCODER_TYPE)\n cfg.defrost()\n cfg.STYLE_ENCODER.input_dim = cfg.DATASET.FACE3D_DIM\n cfg.freeze()\n self.style_encoder = style_encoder_class(**cfg.STYLE_ENCODER)\n\n decoder_class = get_network(cfg.DECODER_TYPE)\n cfg.defrost()\n cfg.DECODER.output_dim = cfg.DATASET.FACE3D_DIM\n cfg.freeze()\n self.decoder = decoder_class(**cfg.DECODER)\n\n self.content_xt_to_decoder_input_wo_time = nn.Sequential(\n nn.Linear(cfg.D_MODEL + cfg.DATASET.FACE3D_DIM, cfg.D_MODEL),\n nn.ReLU(),\n nn.Linear(cfg.D_MODEL, cfg.D_MODEL),\n nn.ReLU(),\n nn.Linear(cfg.D_MODEL, cfg.D_MODEL),\n )\n\n self.time_sinusoidal_dim = cfg.D_MODEL\n self.time_embed_net = nn.Sequential(\n nn.Linear(cfg.D_MODEL, cfg.D_MODEL),\n nn.SiLU(),\n nn.Linear(cfg.D_MODEL, cfg.D_MODEL),\n )\n\n def forward(self, x_t, t, audio, style_clip, style_pad_mask, ready_style_code=None):\n \"\"\"_summary_\n\n Args:\n x_t (_type_): (B, L, C_face)\n t (_type_): (B,) dtype:float32\n audio (_type_): (B, L, W)\n style_clip (_type_): (B, L_clipmax, C_face3d)\n style_pad_mask : (B, L_clipmax)\n ready_style_code: (B, C_model)\n Returns:\n e_theta : (B, L, C_face)\n \"\"\"\n W = audio.shape[2]\n content = self.content_encoder(audio)\n # (B, L, W, C_model)\n x_t_expand = x_t.unsqueeze(2).repeat(1, 1, W, 1)\n # (B, L, C_face) -> (B, L, W, C_face)\n content_xt_concat = torch.cat((content, x_t_expand), dim=3)\n # (B, L, W, C_model+C_face)\n decoder_input_without_time = self.content_xt_to_decoder_input_wo_time(\n content_xt_concat\n )\n # (B, L, W, C_model)\n\n time_sinusoidal = sinusoidal_embedding(t, self.time_sinusoidal_dim)\n # (B, C_embed)\n time_embedding = self.time_embed_net(time_sinusoidal)\n # (B, C_model)\n B, C = time_embedding.shape\n time_embed_expand = time_embedding.view(B, 1, 1, C)\n decoder_input = decoder_input_without_time + time_embed_expand\n # (B, L, W, C_model)\n\n if ready_style_code is not None:\n style_code = ready_style_code\n else:\n style_code = self.style_encoder(style_clip, style_pad_mask)\n # (B, C_model)\n\n e_theta = self.decoder(decoder_input, style_code)\n # (B, L, C_face)\n return e_theta"
},
{
"identifier": "VarianceSchedule",
"path": "core/networks/diffusion_util.py",
"snippet": "class VarianceSchedule(Module):\n def __init__(self, num_steps, beta_1, beta_T, mode=\"linear\"):\n super().__init__()\n assert mode in (\"linear\",)\n self.num_steps = num_steps\n self.beta_1 = beta_1\n self.beta_T = beta_T\n self.mode = mode\n\n if mode == \"linear\":\n betas = torch.linspace(beta_1, beta_T, steps=num_steps)\n\n betas = torch.cat([torch.zeros([1]), betas], dim=0) # Padding\n\n alphas = 1 - betas\n log_alphas = torch.log(alphas)\n for i in range(1, log_alphas.size(0)): # 1 to T\n log_alphas[i] += log_alphas[i - 1]\n alpha_bars = log_alphas.exp()\n\n sigmas_flex = torch.sqrt(betas)\n sigmas_inflex = torch.zeros_like(sigmas_flex)\n for i in range(1, sigmas_flex.size(0)):\n sigmas_inflex[i] = ((1 - alpha_bars[i - 1]) / (1 - alpha_bars[i])) * betas[\n i\n ]\n sigmas_inflex = torch.sqrt(sigmas_inflex)\n\n self.register_buffer(\"betas\", betas)\n self.register_buffer(\"alphas\", alphas)\n self.register_buffer(\"alpha_bars\", alpha_bars)\n self.register_buffer(\"sigmas_flex\", sigmas_flex)\n self.register_buffer(\"sigmas_inflex\", sigmas_inflex)\n\n def uniform_sample_t(self, batch_size):\n ts = np.random.choice(np.arange(1, self.num_steps + 1), batch_size)\n return ts.tolist()\n\n def get_sigmas(self, t, flexibility):\n assert 0 <= flexibility and flexibility <= 1\n sigmas = self.sigmas_flex[t] * flexibility + self.sigmas_inflex[t] * (\n 1 - flexibility\n )\n return sigmas"
},
{
"identifier": "crop_src_image",
"path": "core/utils.py",
"snippet": "def crop_src_image(src_img, save_img, increase_ratio, detector=None):\n if detector is None:\n detector = dlib.get_frontal_face_detector()\n\n img = cv2.imread(src_img)\n faces = detector(img, 0)\n h, width, _ = img.shape\n if len(faces) > 0:\n bbox = [faces[0].left(), faces[0].top(), faces[0].right(), faces[0].bottom()]\n l = bbox[3] - bbox[1]\n bbox[1] = bbox[1] - l * 0.1\n bbox[3] = bbox[3] - l * 0.1\n bbox[1] = max(0, bbox[1])\n bbox[3] = min(h, bbox[3])\n bbox = compute_aspect_preserved_bbox(\n tuple(bbox), increase_ratio, img.shape[0], img.shape[1]\n )\n img = img[bbox[1] : bbox[3], bbox[0] : bbox[2]]\n img = cv2.resize(img, (256, 256))\n cv2.imwrite(save_img, img)\n else:\n raise ValueError(\"No face detected in the input image\")\n # img = cv2.resize(img, (256, 256))\n # cv2.imwrite(save_img, img)"
},
{
"identifier": "get_pose_params",
"path": "core/utils.py",
"snippet": "def get_pose_params(mat_path):\n \"\"\"Get pose parameters from mat file\n\n Args:\n mat_path (str): path of mat file\n\n Returns:\n pose_params (numpy.ndarray): shape (L_video, 9), angle, translation, crop paramters\n \"\"\"\n mat_dict = loadmat(mat_path)\n\n np_3dmm = mat_dict[\"coeff\"]\n angles = np_3dmm[:, 224:227]\n translations = np_3dmm[:, 254:257]\n\n np_trans_params = mat_dict[\"transform_params\"]\n crop = np_trans_params[:, -3:]\n\n pose_params = np.concatenate((angles, translations, crop), axis=1)\n\n return pose_params"
},
{
"identifier": "get_video_style_clip",
"path": "core/utils.py",
"snippet": "def get_video_style_clip(\n video_name,\n video_root_dir,\n style_max_len,\n start_idx=\"random\",\n dtype=torch.float32,\n return_start_idx=False,\n):\n video_path = os.path.join(video_root_dir, video_name)\n if video_path[-3:] == \"mat\":\n face3d_all = loadmat(video_path)[\"coeff\"]\n face3d_exp = face3d_all[:, 80:144] # expression 3DMM range\n elif video_path[-3:] == \"txt\":\n face3d_exp = np.loadtxt(video_path)\n else:\n raise ValueError(\"Invalid 3DMM file extension\")\n\n face3d_exp = torch.tensor(face3d_exp, dtype=dtype)\n\n length = face3d_exp.shape[0]\n if length >= style_max_len:\n clip_num_frames = style_max_len\n if start_idx == \"random\":\n clip_start_idx = np.random.randint(low=0, high=length - clip_num_frames + 1)\n elif start_idx == \"middle\":\n clip_start_idx = (length - clip_num_frames + 1) // 2\n elif isinstance(start_idx, int):\n clip_start_idx = start_idx\n else:\n raise ValueError(f\"Invalid start_idx {start_idx}\")\n\n face3d_clip = face3d_exp[clip_start_idx : clip_start_idx + clip_num_frames]\n pad_mask = torch.tensor([False] * style_max_len)\n else:\n clip_start_idx = None\n padding = torch.zeros(style_max_len - length, face3d_exp.shape[1])\n face3d_clip = torch.cat((face3d_exp, padding), dim=0)\n pad_mask = torch.tensor([False] * length + [True] * (style_max_len - length))\n\n if return_start_idx:\n return face3d_clip, pad_mask, clip_start_idx\n else:\n return face3d_clip, pad_mask"
},
{
"identifier": "get_wav2vec_audio_window",
"path": "core/utils.py",
"snippet": "def get_wav2vec_audio_window(audio_feat, start_idx, num_frames, win_size):\n \"\"\"\n\n Args:\n audio_feat (np.ndarray): (N, 1024)\n start_idx (_type_): _description_\n num_frames (_type_): _description_\n \"\"\"\n center_idx_list = [2 * idx for idx in range(start_idx, start_idx + num_frames)]\n audio_window_list = []\n padding = np.zeros(audio_feat.shape[1], dtype=np.float32)\n for center_idx in center_idx_list:\n cur_audio_window = []\n for i in range(center_idx - win_size, center_idx + win_size + 1):\n if i < 0:\n cur_audio_window.append(padding)\n elif i >= len(audio_feat):\n cur_audio_window.append(padding)\n else:\n cur_audio_window.append(audio_feat[i])\n cur_audio_win_array = np.stack(cur_audio_window, axis=0)\n audio_window_list.append(cur_audio_win_array)\n\n audio_window_array = np.stack(audio_window_list, axis=0)\n return audio_window_array"
},
{
"identifier": "get_netG",
"path": "generators/utils.py",
"snippet": "@torch.no_grad()\ndef get_netG(checkpoint_path, device):\n import yaml\n\n from generators.face_model import FaceGenerator\n\n with open(\"generators/renderer_conf.yaml\", \"r\") as f:\n renderer_config = yaml.load(f, Loader=yaml.FullLoader)\n\n renderer = FaceGenerator(**renderer_config).to(device)\n\n checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)\n renderer.load_state_dict(checkpoint[\"net_G_ema\"], strict=False)\n\n renderer.eval()\n\n return renderer"
},
{
"identifier": "render_video",
"path": "generators/utils.py",
"snippet": "@torch.no_grad()\ndef render_video(\n net_G,\n src_img_path,\n exp_path,\n wav_path,\n output_path,\n device,\n silent=False,\n semantic_radius=13,\n fps=30,\n split_size=16,\n no_move=False,\n):\n \"\"\"\n exp: (N, 73)\n \"\"\"\n target_exp_seq = np.load(exp_path)\n if target_exp_seq.shape[1] == 257:\n exp_coeff = target_exp_seq[:, 80:144]\n angle_trans_crop = np.array(\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9370641, 126.84911, 129.03864],\n dtype=np.float32,\n )\n target_exp_seq = np.concatenate(\n [exp_coeff, angle_trans_crop[None, ...].repeat(exp_coeff.shape[0], axis=0)],\n axis=1,\n )\n # (L, 73)\n elif target_exp_seq.shape[1] == 73:\n if no_move:\n target_exp_seq[:, 64:] = np.array(\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9370641, 126.84911, 129.03864],\n dtype=np.float32,\n )\n else:\n raise NotImplementedError\n\n frame = cv2.imread(src_img_path)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n src_img_raw = Image.fromarray(frame)\n image_transform = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\n ]\n )\n src_img = image_transform(src_img_raw)\n\n target_win_exps = []\n for frame_idx in range(len(target_exp_seq)):\n win_indices = obtain_seq_index(\n frame_idx, target_exp_seq.shape[0], semantic_radius\n )\n win_exp = torch.tensor(target_exp_seq[win_indices]).permute(1, 0)\n # (73, 27)\n target_win_exps.append(win_exp)\n\n target_exp_concat = torch.stack(target_win_exps, dim=0)\n target_splited_exps = torch.split(target_exp_concat, split_size, dim=0)\n output_imgs = []\n for win_exp in target_splited_exps:\n win_exp = win_exp.to(device)\n cur_src_img = src_img.expand(win_exp.shape[0], -1, -1, -1).to(device)\n output_dict = net_G(cur_src_img, win_exp)\n output_imgs.append(output_dict[\"fake_image\"].cpu().clamp_(-1, 1))\n\n output_imgs = torch.cat(output_imgs, 0)\n transformed_imgs = ((output_imgs + 1) / 2 * 255).to(torch.uint8).permute(0, 2, 3, 1)\n\n if silent:\n torchvision.io.write_video(output_path, transformed_imgs.cpu(), fps)\n else:\n silent_video_path = f\"{output_path}-silent.mp4\"\n torchvision.io.write_video(silent_video_path, transformed_imgs.cpu(), fps)\n os.system(\n f\"ffmpeg -loglevel quiet -y -i {silent_video_path} -i {wav_path} -shortest {output_path}\"\n )\n os.remove(silent_video_path)"
}
] | import argparse
import json
import os
import shutil
import subprocess
import numpy as np
import torch
import torchaudio
from scipy.io import loadmat
from transformers import Wav2Vec2Processor
from transformers.models.wav2vec2.modeling_wav2vec2 import Wav2Vec2Model
from configs.default import get_cfg_defaults
from core.networks.diffusion_net import DiffusionNet
from core.networks.diffusion_util import NoisePredictor, VarianceSchedule
from core.utils import (
crop_src_image,
get_pose_params,
get_video_style_clip,
get_wav2vec_audio_window,
)
from generators.utils import get_netG, render_video | 7,797 | style_clip_path, "", style_max_len=256, start_idx=0
)
style_clip = style_clip_raw.unsqueeze(0).to(device)
style_pad_mask = (
style_pad_mask_raw.unsqueeze(0).to(device)
if style_pad_mask_raw is not None
else None
)
gen_exp_stack = diff_net.sample(
audio,
style_clip,
style_pad_mask,
output_dim=cfg.DATASET.FACE3D_DIM,
use_cf_guidance=cfg.CF_GUIDANCE.INFERENCE,
cfg_scale=cfg.CF_GUIDANCE.SCALE,
sample_method=sample_method,
ddim_num_step=ddim_num_step,
)
gen_exp = gen_exp_stack[0].cpu().numpy()
pose_ext = pose_path[-3:]
pose = None
pose = get_pose_params(pose_path)
# (L, 9)
selected_pose = None
if len(pose) >= len(gen_exp):
selected_pose = pose[: len(gen_exp)]
else:
selected_pose = pose[-1].unsqueeze(0).repeat(len(gen_exp), 1)
selected_pose[: len(pose)] = pose
gen_exp_pose = np.concatenate((gen_exp, selected_pose), axis=1)
np.save(output_path, gen_exp_pose)
return output_path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="inference for demo")
parser.add_argument("--wav_path", type=str, default="", help="path for wav")
parser.add_argument("--image_path", type=str, default="", help="path for image")
parser.add_argument("--disable_img_crop", dest="img_crop", action="store_false")
parser.set_defaults(img_crop=True)
parser.add_argument(
"--style_clip_path", type=str, default="", help="path for style_clip_mat"
)
parser.add_argument("--pose_path", type=str, default="", help="path for pose")
parser.add_argument(
"--max_gen_len",
type=int,
default=1000,
help="The maximum length (seconds) limitation for generating videos",
)
parser.add_argument(
"--cfg_scale",
type=float,
default=1.0,
help="The scale of classifier-free guidance",
)
parser.add_argument(
"--output_name",
type=str,
default="test",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
)
args = parser.parse_args()
if args.device == "cuda" and not torch.cuda.is_available():
print("CUDA is not available, set --device=cpu to use CPU.")
exit(1)
device = torch.device(args.device)
cfg = get_cfg_defaults()
cfg.CF_GUIDANCE.SCALE = args.cfg_scale
cfg.freeze()
tmp_dir = f"tmp/{args.output_name}"
os.makedirs(tmp_dir, exist_ok=True)
# get audio in 16000Hz
wav_16k_path = os.path.join(tmp_dir, f"{args.output_name}_16K.wav")
command = f"ffmpeg -y -i {args.wav_path} -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 {wav_16k_path}"
subprocess.run(command.split())
# get wav2vec feat from audio
wav2vec_processor = Wav2Vec2Processor.from_pretrained(
"jonatasgrosman/wav2vec2-large-xlsr-53-english"
)
wav2vec_model = (
Wav2Vec2Model.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
.eval()
.to(device)
)
speech_array, sampling_rate = torchaudio.load(wav_16k_path)
audio_data = speech_array.squeeze().numpy()
inputs = wav2vec_processor(
audio_data, sampling_rate=16_000, return_tensors="pt", padding=True
)
with torch.no_grad():
audio_embedding = wav2vec_model(
inputs.input_values.to(device), return_dict=False
)[0]
audio_feat_path = os.path.join(tmp_dir, f"{args.output_name}_wav2vec.npy")
np.save(audio_feat_path, audio_embedding[0].cpu().numpy())
# get src image
src_img_path = os.path.join(tmp_dir, "src_img.png")
if args.img_crop:
|
@torch.no_grad()
def get_diff_net(cfg, device):
diff_net = DiffusionNet(
cfg=cfg,
net=NoisePredictor(cfg),
var_sched=VarianceSchedule(
num_steps=cfg.DIFFUSION.SCHEDULE.NUM_STEPS,
beta_1=cfg.DIFFUSION.SCHEDULE.BETA_1,
beta_T=cfg.DIFFUSION.SCHEDULE.BETA_T,
mode=cfg.DIFFUSION.SCHEDULE.MODE,
),
)
checkpoint = torch.load(cfg.INFERENCE.CHECKPOINT, map_location=device)
model_state_dict = checkpoint["model_state_dict"]
diff_net_dict = {
k[9:]: v for k, v in model_state_dict.items() if k[:9] == "diff_net."
}
diff_net.load_state_dict(diff_net_dict, strict=True)
diff_net.eval()
return diff_net
@torch.no_grad()
def get_audio_feat(wav_path, output_name, wav2vec_model):
audio_feat_dir = os.path.dirname(audio_feat_path)
pass
@torch.no_grad()
def inference_one_video(
cfg,
audio_path,
style_clip_path,
pose_path,
output_path,
diff_net,
device,
max_audio_len=None,
sample_method="ddim",
ddim_num_step=10,
):
audio_raw = audio_data = np.load(audio_path)
if max_audio_len is not None:
audio_raw = audio_raw[: max_audio_len * 50]
gen_num_frames = len(audio_raw) // 2
audio_win_array = get_wav2vec_audio_window(
audio_raw,
start_idx=0,
num_frames=gen_num_frames,
win_size=cfg.WIN_SIZE,
)
audio_win = torch.tensor(audio_win_array).to(device)
audio = audio_win.unsqueeze(0)
# the second parameter is "" because of bad interface design...
style_clip_raw, style_pad_mask_raw = get_video_style_clip(
style_clip_path, "", style_max_len=256, start_idx=0
)
style_clip = style_clip_raw.unsqueeze(0).to(device)
style_pad_mask = (
style_pad_mask_raw.unsqueeze(0).to(device)
if style_pad_mask_raw is not None
else None
)
gen_exp_stack = diff_net.sample(
audio,
style_clip,
style_pad_mask,
output_dim=cfg.DATASET.FACE3D_DIM,
use_cf_guidance=cfg.CF_GUIDANCE.INFERENCE,
cfg_scale=cfg.CF_GUIDANCE.SCALE,
sample_method=sample_method,
ddim_num_step=ddim_num_step,
)
gen_exp = gen_exp_stack[0].cpu().numpy()
pose_ext = pose_path[-3:]
pose = None
pose = get_pose_params(pose_path)
# (L, 9)
selected_pose = None
if len(pose) >= len(gen_exp):
selected_pose = pose[: len(gen_exp)]
else:
selected_pose = pose[-1].unsqueeze(0).repeat(len(gen_exp), 1)
selected_pose[: len(pose)] = pose
gen_exp_pose = np.concatenate((gen_exp, selected_pose), axis=1)
np.save(output_path, gen_exp_pose)
return output_path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="inference for demo")
parser.add_argument("--wav_path", type=str, default="", help="path for wav")
parser.add_argument("--image_path", type=str, default="", help="path for image")
parser.add_argument("--disable_img_crop", dest="img_crop", action="store_false")
parser.set_defaults(img_crop=True)
parser.add_argument(
"--style_clip_path", type=str, default="", help="path for style_clip_mat"
)
parser.add_argument("--pose_path", type=str, default="", help="path for pose")
parser.add_argument(
"--max_gen_len",
type=int,
default=1000,
help="The maximum length (seconds) limitation for generating videos",
)
parser.add_argument(
"--cfg_scale",
type=float,
default=1.0,
help="The scale of classifier-free guidance",
)
parser.add_argument(
"--output_name",
type=str,
default="test",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
)
args = parser.parse_args()
if args.device == "cuda" and not torch.cuda.is_available():
print("CUDA is not available, set --device=cpu to use CPU.")
exit(1)
device = torch.device(args.device)
cfg = get_cfg_defaults()
cfg.CF_GUIDANCE.SCALE = args.cfg_scale
cfg.freeze()
tmp_dir = f"tmp/{args.output_name}"
os.makedirs(tmp_dir, exist_ok=True)
# get audio in 16000Hz
wav_16k_path = os.path.join(tmp_dir, f"{args.output_name}_16K.wav")
command = f"ffmpeg -y -i {args.wav_path} -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 {wav_16k_path}"
subprocess.run(command.split())
# get wav2vec feat from audio
wav2vec_processor = Wav2Vec2Processor.from_pretrained(
"jonatasgrosman/wav2vec2-large-xlsr-53-english"
)
wav2vec_model = (
Wav2Vec2Model.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
.eval()
.to(device)
)
speech_array, sampling_rate = torchaudio.load(wav_16k_path)
audio_data = speech_array.squeeze().numpy()
inputs = wav2vec_processor(
audio_data, sampling_rate=16_000, return_tensors="pt", padding=True
)
with torch.no_grad():
audio_embedding = wav2vec_model(
inputs.input_values.to(device), return_dict=False
)[0]
audio_feat_path = os.path.join(tmp_dir, f"{args.output_name}_wav2vec.npy")
np.save(audio_feat_path, audio_embedding[0].cpu().numpy())
# get src image
src_img_path = os.path.join(tmp_dir, "src_img.png")
if args.img_crop: | crop_src_image(args.image_path, src_img_path, 0.4) | 4 | 2023-12-28 05:39:31+00:00 | 12k |
jiawei-ren/dreamgaussian4d | diffusers/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py | [
{
"identifier": "ConfigMixin",
"path": "diffusers/src/diffusers/configuration_utils.py",
"snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and\n saving classes that inherit from [`ConfigMixin`].\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"The only reason we overwrite `getattr` here is to gracefully deprecate accessing\n config attributes directly. See https://github.com/huggingface/diffusers/pull/3129\n\n Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite:\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n \"\"\"\n\n is_in_config = \"_internal_dict\" in self.__dict__ and hasattr(self.__dict__[\"_internal_dict\"], name)\n is_attribute = name in self.__dict__\n\n if is_in_config and not is_attribute:\n deprecation_message = f\"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'.\"\n deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False)\n return self._internal_dict[name]\n\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file is saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary.\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class is instantiated. Make sure to only load configuration\n files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it is loaded) and initiate the Python class.\n `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually\n overwrite the same named arguments in `config`.\n\n Returns:\n [`ModelMixin`] or [`SchedulerMixin`]:\n A model or scheduler object instantiated from a config dictionary.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Load a model or scheduler configuration.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with\n [`~ConfigMixin.save_config`].\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config are returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the `commit_hash` of the loaded configuration are returned.\n\n Returns:\n `dict`:\n A dictionary of all the parameters stored in a JSON configuration file.\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # Skip keys that were not present in the original config, so default __init__ values were used\n used_defaults = config_dict.get(\"_use_default_values\", [])\n config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != \"_use_default_values\"}\n\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if (\n isinstance(orig_cls_name, str)\n and orig_cls_name != cls.__name__\n and hasattr(diffusers_library, orig_cls_name)\n ):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n elif not isinstance(orig_cls_name, str) and not isinstance(orig_cls_name, (list, tuple)):\n raise ValueError(\n \"Make sure that the `_class_name` is of type string or list of string (for custom pipelines).\"\n )\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes the configuration instance to a JSON string.\n\n Returns:\n `str`:\n String containing all the attributes that make up the configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n # Don't save \"_ignore_files\" or \"_use_default_values\"\n config_dict.pop(\"_ignore_files\", None)\n config_dict.pop(\"_use_default_values\", None)\n\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save the configuration instance's parameters to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file to save a configuration instance's parameters.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())"
},
{
"identifier": "register_to_config",
"path": "diffusers/src/diffusers/configuration_utils.py",
"snippet": "def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)"
},
{
"identifier": "randn_tensor",
"path": "diffusers/src/diffusers/utils/torch_utils.py",
"snippet": "def randn_tensor(\n shape: Union[Tuple, List],\n generator: Optional[Union[List[\"torch.Generator\"], \"torch.Generator\"]] = None,\n device: Optional[\"torch.device\"] = None,\n dtype: Optional[\"torch.dtype\"] = None,\n layout: Optional[\"torch.layout\"] = None,\n):\n \"\"\"A helper function to create random tensors on the desired `device` with the desired `dtype`. When\n passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor\n is always created on the CPU.\n \"\"\"\n # device on which tensor is created defaults to device\n rand_device = device\n batch_size = shape[0]\n\n layout = layout or torch.strided\n device = device or torch.device(\"cpu\")\n\n if generator is not None:\n gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type\n if gen_device_type != device.type and gen_device_type == \"cpu\":\n rand_device = \"cpu\"\n if device != \"mps\":\n logger.info(\n f\"The passed generator was created on 'cpu' even though a tensor on {device} was expected.\"\n f\" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably\"\n f\" slighly speed up this function by passing a generator that was created on the {device} device.\"\n )\n elif gen_device_type != device.type and gen_device_type == \"cuda\":\n raise ValueError(f\"Cannot generate a {device} tensor from a generator of type {gen_device_type}.\")\n\n # make sure generator list of length 1 is treated like a non-list\n if isinstance(generator, list) and len(generator) == 1:\n generator = generator[0]\n\n if isinstance(generator, list):\n shape = (1,) + shape[1:]\n latents = [\n torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout)\n for i in range(batch_size)\n ]\n latents = torch.cat(latents, dim=0).to(device)\n else:\n latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device)\n\n return latents"
},
{
"identifier": "KarrasDiffusionSchedulers",
"path": "diffusers/src/diffusers/schedulers/scheduling_utils.py",
"snippet": "class KarrasDiffusionSchedulers(Enum):\n DDIMScheduler = 1\n DDPMScheduler = 2\n PNDMScheduler = 3\n LMSDiscreteScheduler = 4\n EulerDiscreteScheduler = 5\n HeunDiscreteScheduler = 6\n EulerAncestralDiscreteScheduler = 7\n DPMSolverMultistepScheduler = 8\n DPMSolverSinglestepScheduler = 9\n KDPM2DiscreteScheduler = 10\n KDPM2AncestralDiscreteScheduler = 11\n DEISMultistepScheduler = 12\n UniPCMultistepScheduler = 13\n DPMSolverSDEScheduler = 14"
},
{
"identifier": "SchedulerMixin",
"path": "diffusers/src/diffusers/schedulers/scheduling_utils.py",
"snippet": "class SchedulerMixin(PushToHubMixin):\n \"\"\"\n Base class for all schedulers.\n\n [`SchedulerMixin`] contains common functions shared by all schedulers such as general loading and saving\n functionalities.\n\n [`ConfigMixin`] takes care of storing the configuration attributes (like `num_train_timesteps`) that are passed to\n the scheduler's `__init__` function, and the attributes can be accessed by `scheduler.config.num_train_timesteps`.\n\n Class attributes:\n - **_compatibles** (`List[str]`) -- A list of scheduler classes that are compatible with the parent scheduler\n class. Use [`~ConfigMixin.from_config`] to load a different compatible scheduler class (should be overridden\n by parent class).\n \"\"\"\n\n config_name = SCHEDULER_CONFIG_NAME\n _compatibles = []\n has_compatibles = True\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,\n subfolder: Optional[str] = None,\n return_unused_kwargs=False,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a scheduler from a pre-defined JSON configuration file in a local directory or Hub repository.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing the scheduler\n configuration saved with [`~SchedulerMixin.save_pretrained`].\n subfolder (`str`, *optional*):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n\n <Tip>\n\n To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with\n `huggingface-cli login`. You can also activate the special\n [\"offline-mode\"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a\n firewalled environment.\n\n </Tip>\n\n \"\"\"\n config, kwargs, commit_hash = cls.load_config(\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n subfolder=subfolder,\n return_unused_kwargs=True,\n return_commit_hash=True,\n **kwargs,\n )\n return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs)\n\n def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a scheduler configuration object to a directory so that it can be reloaded using the\n [`~SchedulerMixin.from_pretrained`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)\n\n @property\n def compatibles(self):\n \"\"\"\n Returns all schedulers that are compatible with this scheduler\n\n Returns:\n `List[SchedulerMixin]`: List of compatible schedulers\n \"\"\"\n return self._get_compatibles()\n\n @classmethod\n def _get_compatibles(cls):\n compatible_classes_str = list(set([cls.__name__] + cls._compatibles))\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n compatible_classes = [\n getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)\n ]\n return compatible_classes"
},
{
"identifier": "SchedulerOutput",
"path": "diffusers/src/diffusers/schedulers/scheduling_utils.py",
"snippet": "class SchedulerOutput(BaseOutput):\n \"\"\"\n Base class for the output of a scheduler's `step` function.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n \"\"\"\n\n prev_sample: torch.FloatTensor"
}
] | import math
import numpy as np
import torch
from collections import defaultdict
from typing import List, Optional, Tuple, Union
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils.torch_utils import randn_tensor
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput | 8,939 | # Copyright 2023 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
def betas_for_alpha_bar(
num_diffusion_timesteps,
max_beta=0.999,
alpha_transform_type="cosine",
):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
Choose from `cosine` or `exp`
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(t):
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(t):
return math.exp(t * -12.0)
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32)
| # Copyright 2023 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
def betas_for_alpha_bar(
num_diffusion_timesteps,
max_beta=0.999,
alpha_transform_type="cosine",
):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
Choose from `cosine` or `exp`
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(t):
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(t):
return math.exp(t * -12.0)
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32)
| class KDPM2AncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): | 4 | 2023-12-28 08:17:40+00:00 | 12k |
FoundationVision/UniRef | projects/UniRef/uniref/models/segment_anything/build_sam.py | [
{
"identifier": "ImageEncoderViT",
"path": "projects/UniRef/uniref/models/segment_anything/modeling/image_encoder.py",
"snippet": "class ImageEncoderViT(nn.Module):\n def __init__(\n self,\n img_size: int = 1024,\n patch_size: int = 16,\n in_chans: int = 3,\n embed_dim: int = 768,\n depth: int = 12,\n num_heads: int = 12,\n mlp_ratio: float = 4.0,\n out_chans: int = 256,\n qkv_bias: bool = True,\n norm_layer: Type[nn.Module] = nn.LayerNorm,\n act_layer: Type[nn.Module] = nn.GELU,\n use_abs_pos: bool = True,\n use_rel_pos: bool = False,\n rel_pos_zero_init: bool = True,\n window_size: int = 0,\n global_attn_indexes: Tuple[int, ...] = (),\n ) -> None:\n \"\"\"\n Args:\n img_size (int): Input image size.\n patch_size (int): Patch size.\n in_chans (int): Number of input image channels.\n embed_dim (int): Patch embedding dimension.\n depth (int): Depth of ViT.\n num_heads (int): Number of attention heads in each ViT block.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool): If True, add a learnable bias to query, key, value.\n norm_layer (nn.Module): Normalization layer.\n act_layer (nn.Module): Activation layer.\n use_abs_pos (bool): If True, use absolute positional embeddings.\n use_rel_pos (bool): If True, add relative positional embeddings to the attention map.\n rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\n window_size (int): Window size for window attention blocks.\n global_attn_indexes (list): Indexes for blocks using global attention.\n \"\"\"\n super().__init__()\n self.img_size = img_size\n self.embed_dim = embed_dim\n self.out_chans = out_chans\n\n self.patch_embed = PatchEmbed(\n kernel_size=(patch_size, patch_size),\n stride=(patch_size, patch_size),\n in_chans=in_chans,\n embed_dim=embed_dim,\n )\n\n self.pos_embed: Optional[nn.Parameter] = None\n if use_abs_pos:\n # Initialize absolute positional embedding with pretrain image size.\n self.pos_embed = nn.Parameter(\n torch.zeros(\n 1, img_size // patch_size, img_size // patch_size, embed_dim\n )\n )\n\n self.blocks = nn.ModuleList()\n for i in range(depth):\n block = Block(\n dim=embed_dim,\n num_heads=num_heads,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n norm_layer=norm_layer,\n act_layer=act_layer,\n use_rel_pos=use_rel_pos,\n rel_pos_zero_init=rel_pos_zero_init,\n window_size=window_size if i not in global_attn_indexes else 0,\n input_size=(img_size // patch_size, img_size // patch_size),\n )\n self.blocks.append(block)\n\n self.neck = nn.Sequential(\n nn.Conv2d(\n embed_dim,\n out_chans,\n kernel_size=1,\n bias=False,\n ),\n LayerNorm2d(out_chans),\n nn.Conv2d(\n out_chans,\n out_chans,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n LayerNorm2d(out_chans),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.patch_embed(x)\n if self.pos_embed is not None:\n x = x + self.pos_embed\n # x = x + get_abs_pos(\n # self.pos_embed, self.pretrain_use_cls_token, (x.shape[1], x.shape[2])\n # )\n\n for blk in self.blocks:\n x = blk(x)\n\n dtype = x.dtype\n if dtype == torch.float16: # prevent overflow\n with torch.autocast(device_type=\"cuda\", dtype=torch.float32):\n x = self.neck(x.permute(0, 3, 1, 2))\n x = x.to(dtype)\n else:\n x = self.neck(x.permute(0, 3, 1, 2))\n return x"
},
{
"identifier": "MaskDecoder",
"path": "projects/UniRef/uniref/models/segment_anything/modeling/mask_decoder.py",
"snippet": "class MaskDecoder(nn.Module):\n def __init__(\n self,\n *,\n transformer_dim: int,\n transformer: nn.Module,\n num_multimask_outputs: int = 3,\n activation: Type[nn.Module] = nn.GELU,\n iou_head_depth: int = 3,\n iou_head_hidden_dim: int = 256,\n ) -> None:\n \"\"\"\n Predicts masks given an image and prompt embeddings, using a\n transformer architecture.\n\n Arguments:\n transformer_dim (int): the channel dimension of the transformer\n transformer (nn.Module): the transformer used to predict masks\n num_multimask_outputs (int): the number of masks to predict\n when disambiguating masks\n activation (nn.Module): the type of activation to use when\n upscaling masks\n iou_head_depth (int): the depth of the MLP used to predict\n mask quality\n iou_head_hidden_dim (int): the hidden dimension of the MLP\n used to predict mask quality\n \"\"\"\n super().__init__()\n self.transformer_dim = transformer_dim\n self.transformer = transformer\n\n self.num_multimask_outputs = num_multimask_outputs\n\n self.iou_token = nn.Embedding(1, transformer_dim)\n self.num_mask_tokens = num_multimask_outputs + 1\n self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)\n\n self.output_upscaling = nn.Sequential(\n nn.ConvTranspose2d(\n transformer_dim, transformer_dim // 4, kernel_size=2, stride=2\n ),\n LayerNorm2d(transformer_dim // 4),\n activation(),\n nn.ConvTranspose2d(\n transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2\n ),\n activation(),\n )\n self.output_hypernetworks_mlps = nn.ModuleList(\n [\n MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)\n for i in range(self.num_mask_tokens)\n ]\n )\n\n self.iou_prediction_head = MLP(\n transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth\n )\n\n def forward(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n multimask_output: bool,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks given image and prompt embeddings.\n\n Arguments:\n image_embeddings (torch.Tensor): the embeddings from the image encoder\n image_pe (torch.Tensor): positional encoding with the shape of image_embeddings\n sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes\n dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs\n multimask_output (bool): Whether to return multiple masks or a single\n mask.\n\n Returns:\n torch.Tensor: batched predicted masks\n torch.Tensor: batched predictions of mask quality\n \"\"\"\n masks, iou_pred = self.predict_masks(\n image_embeddings=image_embeddings,\n image_pe=image_pe,\n sparse_prompt_embeddings=sparse_prompt_embeddings,\n dense_prompt_embeddings=dense_prompt_embeddings,\n )\n\n # Select the correct mask or masks for output\n if multimask_output:\n mask_slice = slice(1, None)\n else:\n mask_slice = slice(0, 1)\n masks = masks[:, mask_slice, :, :]\n iou_pred = iou_pred[:, mask_slice]\n\n # Prepare output\n return masks, iou_pred\n\n def predict_masks(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Predicts masks. See 'forward' for more details.\"\"\"\n # Concatenate output tokens\n output_tokens = torch.cat(\n [self.iou_token.weight, self.mask_tokens.weight], dim=0\n )\n output_tokens = output_tokens.unsqueeze(0).expand(\n sparse_prompt_embeddings.size(0), -1, -1\n )\n\n tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)\n\n # image_embeddings: [1, C, H, W], tokens: [B, N, C]\n # dense_prompt_embeddings: [B, C, H, W]\n # Expand per-image data in batch direction to be per-mask\n src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)\n src = src + dense_prompt_embeddings\n pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)\n b, c, h, w = src.shape\n\n # Run the transformer\n hs, src = self.transformer(src, pos_src, tokens)\n iou_token_out = hs[:, 0, :]\n mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]\n\n # Upscale mask embeddings and predict masks using the mask tokens\n src = src.transpose(1, 2).view(b, c, h, w)\n upscaled_embedding = self.output_upscaling(src)\n hyper_in_list: List[torch.Tensor] = []\n for i in range(self.num_mask_tokens):\n hyper_in_list.append(\n self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])\n )\n hyper_in = torch.stack(hyper_in_list, dim=1)\n b, c, h, w = upscaled_embedding.shape\n masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(\n b, self.num_mask_tokens, h, w\n )\n\n # Generate mask quality predictions\n iou_pred = self.iou_prediction_head(iou_token_out)\n\n return masks, iou_pred"
},
{
"identifier": "PromptEncoder",
"path": "projects/UniRef/uniref/models/segment_anything/modeling/prompt_encoder.py",
"snippet": "class PromptEncoder(nn.Module):\n def __init__(\n self,\n embed_dim: int,\n image_embedding_size: Tuple[int, int],\n input_image_size: Tuple[int, int],\n mask_in_chans: int,\n activation: Type[nn.Module] = nn.GELU,\n ) -> None:\n \"\"\"\n Encodes prompts for input to SAM's mask decoder.\n\n Arguments:\n embed_dim (int): The prompts' embedding dimension\n image_embedding_size (tuple(int, int)): The spatial size of the\n image embedding, as (H, W).\n input_image_size (int): The padded size of the image as input\n to the image encoder, as (H, W).\n mask_in_chans (int): The number of hidden channels used for\n encoding input masks.\n activation (nn.Module): The activation to use when encoding\n input masks.\n \"\"\"\n super().__init__()\n self.embed_dim = embed_dim\n self.input_image_size = input_image_size\n self.image_embedding_size = image_embedding_size\n self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)\n\n self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners\n point_embeddings = [\n nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)\n ]\n self.point_embeddings = nn.ModuleList(point_embeddings)\n self.not_a_point_embed = nn.Embedding(1, embed_dim)\n\n self.mask_input_size = (\n 4 * image_embedding_size[0],\n 4 * image_embedding_size[1],\n )\n self.mask_downscaling = nn.Sequential(\n nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),\n LayerNorm2d(mask_in_chans // 4),\n activation(),\n nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),\n LayerNorm2d(mask_in_chans),\n activation(),\n nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),\n )\n self.no_mask_embed = nn.Embedding(1, embed_dim)\n\n def get_dense_pe(self) -> torch.Tensor:\n \"\"\"\n Returns the positional encoding used to encode point prompts,\n applied to a dense set of points the shape of the image encoding.\n\n Returns:\n torch.Tensor: Positional encoding with shape\n 1x(embed_dim)x(embedding_h)x(embedding_w)\n \"\"\"\n return self.pe_layer(self.image_embedding_size).unsqueeze(0)\n\n def _embed_points(\n self,\n points: torch.Tensor,\n labels: torch.Tensor,\n pad: bool,\n ) -> torch.Tensor:\n \"\"\"Embeds point prompts.\"\"\"\n points = points + 0.5 # Shift to center of pixel\n if pad:\n padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)\n padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)\n points = torch.cat([points, padding_point], dim=1)\n labels = torch.cat([labels, padding_label], dim=1)\n point_embedding = self.pe_layer.forward_with_coords(\n points, self.input_image_size\n )\n point_embedding[labels == -1] = 0.0\n point_embedding[labels == -1] += self.not_a_point_embed.weight\n point_embedding[labels == 0] += self.point_embeddings[0].weight\n point_embedding[labels == 1] += self.point_embeddings[1].weight\n return point_embedding\n\n def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds box prompts.\"\"\"\n boxes = boxes + 0.5 # Shift to center of pixel\n coords = boxes.reshape(-1, 2, 2)\n corner_embedding = self.pe_layer.forward_with_coords(\n coords, self.input_image_size\n )\n corner_embedding[:, 0, :] += self.point_embeddings[2].weight\n corner_embedding[:, 1, :] += self.point_embeddings[3].weight\n return corner_embedding\n\n def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds mask inputs.\"\"\"\n mask_embedding = self.mask_downscaling(masks)\n return mask_embedding\n\n def _get_batch_size(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n text_embeds: Optional[torch.Tensor],\n ) -> int:\n \"\"\"\n Gets the batch size of the output given the batch size of the input prompts.\n \"\"\"\n if points is not None:\n return points[0].shape[0]\n elif boxes is not None:\n return boxes.shape[0]\n elif masks is not None:\n return masks.shape[0]\n elif text_embeds is not None:\n return text_embeds.shape[0]\n else:\n return 1\n\n def _get_device(self) -> torch.device:\n return self.point_embeddings[0].weight.device\n\n def forward(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n text_embeds: Optional[torch.Tensor],\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Embeds different types of prompts, returning both sparse and dense\n embeddings.\n\n Arguments:\n points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates\n and labels to embed.\n boxes (torch.Tensor or none): boxes to embed\n masks (torch.Tensor or none): masks to embed\n\n Returns:\n torch.Tensor: sparse embeddings for the points and boxes, with shape\n BxNx(embed_dim), where N is determined by the number of input points\n and boxes.\n torch.Tensor: dense embeddings for the masks, in the shape\n Bx(embed_dim)x(embed_H)x(embed_W)\n \"\"\"\n bs = self._get_batch_size(points, boxes, masks, text_embeds)\n sparse_embeddings = torch.empty(\n (bs, 0, self.embed_dim), device=self._get_device()\n )\n if points is not None:\n coords, labels = points\n point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))\n sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)\n if boxes is not None:\n box_embeddings = self._embed_boxes(boxes)\n sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)\n\n if text_embeds is not None:\n sparse_embeddings = torch.cat([sparse_embeddings, text_embeds], dim=1)\n\n if masks is not None:\n dense_embeddings = self._embed_masks(masks)\n else:\n dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(\n bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]\n )\n\n return sparse_embeddings, dense_embeddings"
},
{
"identifier": "Sam",
"path": "projects/UniRef/uniref/models/segment_anything/modeling/sam.py",
"snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\n \"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False\n )\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack(\n [self.preprocess(x[\"image\"]) for x in batched_input], dim=0\n )\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n\n dtype = masks.dtype\n\n masks = F.interpolate(\n masks.float(),\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n # masks = masks.to(dtype)\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(\n masks, original_size, mode=\"bilinear\", align_corners=False\n )\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x"
},
{
"identifier": "TwoWayTransformer",
"path": "projects/UniRef/uniref/models/segment_anything/modeling/transformer.py",
"snippet": "class TwoWayTransformer(nn.Module):\n def __init__(\n self,\n depth: int,\n embedding_dim: int,\n num_heads: int,\n mlp_dim: int,\n activation: Type[nn.Module] = nn.ReLU,\n attention_downsample_rate: int = 2,\n ) -> None:\n \"\"\"\n A transformer decoder that attends to an input image using\n queries whose positional embedding is supplied.\n\n Args:\n depth (int): number of layers in the transformer\n embedding_dim (int): the channel dimension for the input embeddings\n num_heads (int): the number of heads for multihead attention. Must\n divide embedding_dim\n mlp_dim (int): the channel dimension internal to the MLP block\n activation (nn.Module): the activation to use in the MLP block\n \"\"\"\n super().__init__()\n self.depth = depth\n self.embedding_dim = embedding_dim\n self.num_heads = num_heads\n self.mlp_dim = mlp_dim\n self.layers = nn.ModuleList()\n\n for i in range(depth):\n self.layers.append(\n TwoWayAttentionBlock(\n embedding_dim=embedding_dim,\n num_heads=num_heads,\n mlp_dim=mlp_dim,\n activation=activation,\n attention_downsample_rate=attention_downsample_rate,\n skip_first_layer_pe=(i == 0),\n )\n )\n\n self.final_attn_token_to_image = Attention(\n embedding_dim, num_heads, downsample_rate=attention_downsample_rate\n )\n self.norm_final_attn = nn.LayerNorm(embedding_dim)\n\n def forward(\n self,\n image_embedding: Tensor,\n image_pe: Tensor,\n point_embedding: Tensor,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"\n Args:\n image_embedding (torch.Tensor): image to attend to. Should be shape\n B x embedding_dim x h x w for any h and w.\n image_pe (torch.Tensor): the positional encoding to add to the image. Must\n have the same shape as image_embedding.\n point_embedding (torch.Tensor): the embedding to add to the query points.\n Must have shape B x N_points x embedding_dim for any N_points.\n\n Returns:\n torch.Tensor: the processed point_embedding\n torch.Tensor: the processed image_embedding\n \"\"\"\n # BxCxHxW -> BxHWxC == B x N_image_tokens x C\n bs, c, h, w = image_embedding.shape\n image_embedding = image_embedding.flatten(2).permute(0, 2, 1)\n image_pe = image_pe.flatten(2).permute(0, 2, 1)\n\n # Prepare queries\n queries = point_embedding\n keys = image_embedding\n\n # Apply transformer blocks and final layernorm\n for layer in self.layers:\n queries, keys = layer(\n queries=queries,\n keys=keys,\n query_pe=point_embedding,\n key_pe=image_pe,\n )\n\n # Apply the final attention layer from the points to the image\n q = queries + point_embedding\n k = keys + image_pe\n attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)\n queries = queries + attn_out\n queries = self.norm_final_attn(queries)\n\n return queries, keys"
}
] | from functools import partial
from .modeling import (ImageEncoderViT, MaskDecoder, PromptEncoder, Sam,
TwoWayTransformer)
import torch | 7,329 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
build_sam = build_sam_vit_h
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
sam_model_registry = {
"default": build_sam_vit_h,
"vit_h": build_sam_vit_h,
"vit_l": build_sam_vit_l,
"vit_b": build_sam_vit_b,
}
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
sam = Sam(
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
),
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
build_sam = build_sam_vit_h
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
sam_model_registry = {
"default": build_sam_vit_h,
"vit_h": build_sam_vit_h,
"vit_l": build_sam_vit_l,
"vit_b": build_sam_vit_b,
}
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
sam = Sam(
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
),
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
), | mask_decoder=MaskDecoder( | 1 | 2023-12-22 13:31:33+00:00 | 12k |
xhuangcv/humannorm | threestudio/systems/base.py | [
{
"identifier": "Exporter",
"path": "threestudio/models/exporters/base.py",
"snippet": "class Exporter(BaseObject):\n @dataclass\n class Config(BaseObject.Config):\n save_video: bool = False\n\n cfg: Config\n\n def configure(\n self,\n geometry: BaseImplicitGeometry,\n material: BaseMaterial,\n background: BaseBackground,\n ) -> None:\n @dataclass\n class SubModules:\n geometry: BaseImplicitGeometry\n material: BaseMaterial\n background: BaseBackground\n\n self.sub_modules = SubModules(geometry, material, background)\n\n @property\n def geometry(self) -> BaseImplicitGeometry:\n return self.sub_modules.geometry\n\n @property\n def material(self) -> BaseMaterial:\n return self.sub_modules.material\n\n @property\n def background(self) -> BaseBackground:\n return self.sub_modules.background\n\n def __call__(self, *args, **kwargs) -> List[ExporterOutput]:\n raise NotImplementedError"
},
{
"identifier": "ExporterOutput",
"path": "threestudio/models/exporters/base.py",
"snippet": "class ExporterOutput:\n save_name: str\n save_type: str\n params: Dict[str, Any]"
},
{
"identifier": "parse_optimizer",
"path": "threestudio/systems/utils.py",
"snippet": "def parse_optimizer(config, model):\n if hasattr(config, \"params\"):\n params = [\n {\"params\": get_parameters(model, name), \"name\": name, **args}\n for name, args in config.params.items()\n ]\n threestudio.debug(f\"Specify optimizer params: {config.params}\")\n else:\n params = model.parameters()\n if config.name in [\"FusedAdam\"]:\n import apex\n\n optim = getattr(apex.optimizers, config.name)(params, **config.args)\n elif config.name in [\"Adan\"]:\n from threestudio.systems import optimizers\n\n optim = getattr(optimizers, config.name)(params, **config.args)\n else:\n optim = getattr(torch.optim, config.name)(params, **config.args)\n return optim"
},
{
"identifier": "parse_scheduler",
"path": "threestudio/systems/utils.py",
"snippet": "def parse_scheduler(config, optimizer):\n interval = config.get(\"interval\", \"epoch\")\n assert interval in [\"epoch\", \"step\"]\n if config.name == \"SequentialLR\":\n scheduler = {\n \"scheduler\": lr_scheduler.SequentialLR(\n optimizer,\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ],\n milestones=config.milestones,\n ),\n \"interval\": interval,\n }\n elif config.name == \"ChainedScheduler\":\n scheduler = {\n \"scheduler\": lr_scheduler.ChainedScheduler(\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ]\n ),\n \"interval\": interval,\n }\n else:\n scheduler = {\n \"scheduler\": get_scheduler(config.name)(optimizer, **config.args),\n \"interval\": interval,\n }\n return scheduler"
},
{
"identifier": "Updateable",
"path": "threestudio/utils/base.py",
"snippet": "class Updateable:\n def do_update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ):\n for attr in self.__dir__():\n if attr.startswith(\"_\"):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step(\n epoch, global_step, on_load_weights=on_load_weights\n )\n self.update_step(epoch, global_step, on_load_weights=on_load_weights)\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n # override this method to implement custom update logic\n # if on_load_weights is True, you should be careful doing things related to model evaluations,\n # as the models and tensors are not guarenteed to be on the same device\n pass"
},
{
"identifier": "update_if_possible",
"path": "threestudio/utils/base.py",
"snippet": "def update_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step(epoch, global_step)"
},
{
"identifier": "parse_structured",
"path": "threestudio/utils/config.py",
"snippet": "def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]] = None) -> Any:\n scfg = OmegaConf.structured(fields(**cfg))\n return scfg"
},
{
"identifier": "C",
"path": "threestudio/utils/misc.py",
"snippet": "def C(value: Any, epoch: int, global_step: int) -> float:\n if isinstance(value, int) or isinstance(value, float):\n pass\n else:\n value = config_to_primitive(value)\n if not isinstance(value, list):\n raise TypeError(\"Scalar specification only supports list, got\", type(value))\n if len(value) == 3:\n value = [0] + value\n assert len(value) == 4\n start_step, start_value, end_value, end_step = value\n if isinstance(end_step, int):\n current_step = global_step\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n elif isinstance(end_step, float):\n current_step = epoch\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n return value"
},
{
"identifier": "cleanup",
"path": "threestudio/utils/misc.py",
"snippet": "def cleanup():\n gc.collect()\n torch.cuda.empty_cache()\n tcnn.free_temporary_memory()"
},
{
"identifier": "get_device",
"path": "threestudio/utils/misc.py",
"snippet": "def get_device():\n return torch.device(f\"cuda:{get_rank()}\")"
},
{
"identifier": "load_module_weights",
"path": "threestudio/utils/misc.py",
"snippet": "def load_module_weights(\n path, module_name=None, ignore_modules=None, map_location=None\n) -> Tuple[dict, int, int]:\n if module_name is not None and ignore_modules is not None:\n raise ValueError(\"module_name and ignore_modules cannot be both set\")\n if map_location is None:\n map_location = get_device()\n\n ckpt = torch.load(path, map_location=map_location)\n state_dict = ckpt[\"state_dict\"]\n state_dict_to_load = state_dict\n\n if ignore_modules is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n ignore = any(\n [k.startswith(ignore_module + \".\") for ignore_module in ignore_modules]\n )\n if ignore:\n continue\n state_dict_to_load[k] = v\n\n if module_name is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n m = re.match(rf\"^{module_name}\\.(.*)$\", k)\n if m is None:\n continue\n state_dict_to_load[m.group(1)] = v\n\n return state_dict_to_load, ckpt[\"epoch\"], ckpt[\"global_step\"]"
},
{
"identifier": "SaverMixin",
"path": "threestudio/utils/saving.py",
"snippet": "class SaverMixin:\n _save_dir: Optional[str] = None\n _wandb_logger: Optional[WandbLogger] = None\n\n def set_save_dir(self, save_dir: str):\n self._save_dir = save_dir\n\n def get_save_dir(self):\n if self._save_dir is None:\n raise ValueError(\"Save dir is not set\")\n return self._save_dir\n\n def convert_data(self, data):\n if data is None:\n return None\n elif isinstance(data, np.ndarray):\n return data\n elif isinstance(data, torch.Tensor):\n return data.detach().cpu().numpy()\n elif isinstance(data, list):\n return [self.convert_data(d) for d in data]\n elif isinstance(data, dict):\n return {k: self.convert_data(v) for k, v in data.items()}\n else:\n raise TypeError(\n \"Data must be in type numpy.ndarray, torch.Tensor, list or dict, getting\",\n type(data),\n )\n\n def get_save_path(self, filename):\n save_path = os.path.join(self.get_save_dir(), filename)\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n return save_path\n\n def create_loggers(self, cfg_loggers: DictConfig) -> None:\n if \"wandb\" in cfg_loggers.keys() and cfg_loggers.wandb.enable:\n self._wandb_logger = WandbLogger(\n project=cfg_loggers.wandb.project, name=cfg_loggers.wandb.name\n )\n\n def get_loggers(self) -> List:\n if self._wandb_logger:\n return [self._wandb_logger]\n else:\n return []\n\n DEFAULT_RGB_KWARGS = {\"data_format\": \"HWC\", \"data_range\": (0, 1)}\n DEFAULT_UV_KWARGS = {\n \"data_format\": \"HWC\",\n \"data_range\": (0, 1),\n \"cmap\": \"checkerboard\",\n }\n DEFAULT_GRAYSCALE_KWARGS = {\"data_range\": None, \"cmap\": \"jet\"}\n DEFAULT_GRID_KWARGS = {\"align\": \"max\"}\n\n def get_rgb_image_(self, img, data_format, data_range, rgba=False):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n if img.dtype != np.uint8:\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (\n (img - data_range[0]) / (data_range[1] - data_range[0]) * 255.0\n ).astype(np.uint8)\n nc = 4 if rgba else 3\n imgs = [img[..., start : start + nc] for start in range(0, img.shape[-1], nc)]\n imgs = [\n img_\n if img_.shape[-1] == nc\n else np.concatenate(\n [\n img_,\n np.zeros(\n (img_.shape[0], img_.shape[1], nc - img_.shape[2]),\n dtype=img_.dtype,\n ),\n ],\n axis=-1,\n )\n for img_ in imgs\n ]\n img = np.concatenate(imgs, axis=1)\n if rgba:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n else:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_rgb_image(\n self,\n filename,\n img,\n data_format,\n data_range,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_rgb_image_(img, data_format, data_range)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_rgb_image(\n self,\n filename,\n img,\n data_format=DEFAULT_RGB_KWARGS[\"data_format\"],\n data_range=DEFAULT_RGB_KWARGS[\"data_range\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_rgb_image(save_path, img, data_format, data_range, name, step)\n return save_path\n\n def get_uv_image_(self, img, data_format, data_range, cmap):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [\"checkerboard\", \"color\"]\n if cmap == \"checkerboard\":\n n_grid = 64\n mask = (img * n_grid).astype(int)\n mask = (mask[..., 0] + mask[..., 1]) % 2 == 0\n img = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8) * 255\n img[mask] = np.array([255, 0, 255], dtype=np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif cmap == \"color\":\n img_ = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n img_[..., 0] = (img[..., 0] * 255).astype(np.uint8)\n img_[..., 1] = (img[..., 1] * 255).astype(np.uint8)\n img_ = cv2.cvtColor(img_, cv2.COLOR_RGB2BGR)\n img = img_\n return img\n\n def save_uv_image(\n self,\n filename,\n img,\n data_format=DEFAULT_UV_KWARGS[\"data_format\"],\n data_range=DEFAULT_UV_KWARGS[\"data_range\"],\n cmap=DEFAULT_UV_KWARGS[\"cmap\"],\n ) -> str:\n save_path = self.get_save_path(filename)\n img = self.get_uv_image_(img, data_format, data_range, cmap)\n cv2.imwrite(save_path, img)\n return save_path\n\n def get_grayscale_image_(self, img, data_range, cmap):\n img = self.convert_data(img)\n img = np.nan_to_num(img)\n if data_range is None:\n img = (img - img.min()) / (img.max() - img.min())\n else:\n img = img.clip(data_range[0], data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [None, \"jet\", \"magma\", \"spectral\"]\n if cmap == None:\n img = (img * 255.0).astype(np.uint8)\n img = np.repeat(img[..., None], 3, axis=2)\n elif cmap == \"jet\":\n img = (img * 255.0).astype(np.uint8)\n img = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n elif cmap == \"magma\":\n img = 1.0 - img\n base = cm.get_cmap(\"magma\")\n num_bins = 256\n colormap = LinearSegmentedColormap.from_list(\n f\"{base.name}{num_bins}\", base(np.linspace(0, 1, num_bins)), num_bins\n )(np.linspace(0, 1, num_bins))[:, :3]\n a = np.floor(img * 255.0)\n b = (a + 1).clip(max=255.0)\n f = img * 255.0 - a\n a = a.astype(np.uint16).clip(0, 255)\n b = b.astype(np.uint16).clip(0, 255)\n img = colormap[a] + (colormap[b] - colormap[a]) * f[..., None]\n img = (img * 255.0).astype(np.uint8)\n elif cmap == \"spectral\":\n colormap = plt.get_cmap(\"Spectral\")\n\n def blend_rgba(image):\n image = image[..., :3] * image[..., -1:] + (\n 1.0 - image[..., -1:]\n ) # blend A to RGB\n return image\n\n img = colormap(img)\n img = blend_rgba(img)\n img = (img * 255).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_grayscale_image(\n self,\n filename,\n img,\n data_range,\n cmap,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_grayscale_image_(img, data_range, cmap)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_grayscale_image(\n self,\n filename,\n img,\n data_range=DEFAULT_GRAYSCALE_KWARGS[\"data_range\"],\n cmap=DEFAULT_GRAYSCALE_KWARGS[\"cmap\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_grayscale_image(save_path, img, data_range, cmap, name, step)\n return save_path\n\n def get_image_grid_(self, imgs, align):\n if isinstance(imgs[0], list):\n return np.concatenate(\n [self.get_image_grid_(row, align) for row in imgs], axis=0\n )\n cols = []\n for col in imgs:\n assert col[\"type\"] in [\"rgb\", \"uv\", \"grayscale\"]\n if col[\"type\"] == \"rgb\":\n rgb_kwargs = self.DEFAULT_RGB_KWARGS.copy()\n rgb_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_rgb_image_(col[\"img\"], **rgb_kwargs))\n elif col[\"type\"] == \"uv\":\n uv_kwargs = self.DEFAULT_UV_KWARGS.copy()\n uv_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_uv_image_(col[\"img\"], **uv_kwargs))\n elif col[\"type\"] == \"grayscale\":\n grayscale_kwargs = self.DEFAULT_GRAYSCALE_KWARGS.copy()\n grayscale_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_grayscale_image_(col[\"img\"], **grayscale_kwargs))\n\n if align == \"max\":\n h = max([col.shape[0] for col in cols])\n w = max([col.shape[1] for col in cols])\n elif align == \"min\":\n h = min([col.shape[0] for col in cols])\n w = min([col.shape[1] for col in cols])\n elif isinstance(align, int):\n h = align\n w = align\n elif (\n isinstance(align, tuple)\n and isinstance(align[0], int)\n and isinstance(align[1], int)\n ):\n h, w = align\n else:\n raise ValueError(\n f\"Unsupported image grid align: {align}, should be min, max, int or (int, int)\"\n )\n\n for i in range(len(cols)):\n if cols[i].shape[0] != h or cols[i].shape[1] != w:\n cols[i] = cv2.resize(cols[i], (w, h), interpolation=cv2.INTER_LINEAR)\n return np.concatenate(cols, axis=1)\n\n def save_image_grid(\n self,\n filename,\n imgs,\n align=DEFAULT_GRID_KWARGS[\"align\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n texts: Optional[List[float]] = None,\n ):\n save_path = self.get_save_path(filename)\n img = self.get_image_grid_(imgs, align=align)\n\n if texts is not None:\n img = Image.fromarray(img)\n draw = ImageDraw.Draw(img)\n black, white = (0, 0, 0), (255, 255, 255)\n for i, text in enumerate(texts):\n draw.text((2, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((2, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((1, (img.size[1] // len(texts)) * i), f\"{text}\", black)\n img = np.asarray(img)\n\n cv2.imwrite(save_path, img)\n if name and self._wandb_logger:\n wandb.log({name: wandb.Image(save_path), \"trainer/global_step\": step})\n return save_path\n\n def save_image(self, filename, img) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.dtype == np.uint8 or img.dtype == np.uint16\n if img.ndim == 3 and img.shape[-1] == 3:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif img.ndim == 3 and img.shape[-1] == 4:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n cv2.imwrite(save_path, img)\n return save_path\n\n def save_cubemap(self, filename, img, data_range=(0, 1), rgba=False) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.ndim == 4 and img.shape[0] == 6 and img.shape[1] == img.shape[2]\n\n imgs_full = []\n for start in range(0, img.shape[-1], 3):\n img_ = img[..., start : start + 3]\n img_ = np.stack(\n [\n self.get_rgb_image_(img_[i], \"HWC\", data_range, rgba=rgba)\n for i in range(img_.shape[0])\n ],\n axis=0,\n )\n size = img_.shape[1]\n placeholder = np.zeros((size, size, 3), dtype=np.float32)\n img_full = np.concatenate(\n [\n np.concatenate(\n [placeholder, img_[2], placeholder, placeholder], axis=1\n ),\n np.concatenate([img_[1], img_[4], img_[0], img_[5]], axis=1),\n np.concatenate(\n [placeholder, img_[3], placeholder, placeholder], axis=1\n ),\n ],\n axis=0,\n )\n imgs_full.append(img_full)\n\n imgs_full = np.concatenate(imgs_full, axis=1)\n cv2.imwrite(save_path, imgs_full)\n return save_path\n\n def save_data(self, filename, data) -> str:\n data = self.convert_data(data)\n if isinstance(data, dict):\n if not filename.endswith(\".npz\"):\n filename += \".npz\"\n save_path = self.get_save_path(filename)\n np.savez(save_path, **data)\n else:\n if not filename.endswith(\".npy\"):\n filename += \".npy\"\n save_path = self.get_save_path(filename)\n np.save(save_path, data)\n return save_path\n\n def save_state_dict(self, filename, data) -> str:\n save_path = self.get_save_path(filename)\n torch.save(data, save_path)\n return save_path\n\n def save_img_sequence(\n self,\n filename,\n img_dir,\n matcher,\n save_format=\"mp4\",\n fps=30,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n assert save_format in [\"gif\", \"mp4\"]\n if not filename.endswith(save_format):\n filename += f\".{save_format}\"\n save_path = self.get_save_path(filename)\n matcher = re.compile(matcher)\n img_dir = os.path.join(self.get_save_dir(), img_dir)\n imgs = []\n for f in os.listdir(img_dir):\n if matcher.search(f):\n imgs.append(f)\n imgs = sorted(imgs, key=lambda f: int(matcher.search(f).groups()[0]))\n imgs = [cv2.imread(os.path.join(img_dir, f)) for f in imgs]\n\n if save_format == \"gif\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps, palettesize=256)\n elif save_format == \"mp4\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Video(save_path, format=\"mp4\"),\n \"trainer/global_step\": step,\n }\n )\n return save_path\n\n def save_mesh(self, filename, v_pos, t_pos_idx, v_tex=None, t_tex_idx=None) -> str:\n save_path = self.get_save_path(filename)\n v_pos = self.convert_data(v_pos)\n t_pos_idx = self.convert_data(t_pos_idx)\n mesh = trimesh.Trimesh(vertices=v_pos, faces=t_pos_idx)\n mesh.export(save_path)\n return save_path\n\n def save_obj(\n self,\n filename: str,\n mesh: Mesh,\n save_mat: bool = False,\n save_normal: bool = False,\n save_uv: bool = False,\n save_vertex_color: bool = False,\n map_Kd: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Ks: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Bump: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Pm: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_Pr: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_format: str = \"jpg\",\n ) -> List[str]:\n save_paths: List[str] = []\n if not filename.endswith(\".obj\"):\n filename += \".obj\"\n v_pos, t_pos_idx = self.convert_data(mesh.v_pos), self.convert_data(\n mesh.t_pos_idx\n )\n v_nrm, v_tex, t_tex_idx, v_rgb = None, None, None, None\n if save_normal:\n v_nrm = self.convert_data(mesh.v_nrm)\n if save_uv:\n v_tex, t_tex_idx = self.convert_data(mesh.v_tex), self.convert_data(\n mesh.t_tex_idx\n )\n if save_vertex_color:\n v_rgb = self.convert_data(mesh.v_rgb)\n matname, mtllib = None, None\n if save_mat:\n matname = \"default\"\n mtl_filename = filename.replace(\".obj\", \".mtl\")\n mtllib = os.path.basename(mtl_filename)\n mtl_save_paths = self._save_mtl(\n mtl_filename,\n matname,\n map_Kd=self.convert_data(map_Kd),\n map_Ks=self.convert_data(map_Ks),\n map_Bump=self.convert_data(map_Bump),\n map_Pm=self.convert_data(map_Pm),\n map_Pr=self.convert_data(map_Pr),\n map_format=map_format,\n )\n save_paths += mtl_save_paths\n obj_save_path = self._save_obj(\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=v_nrm,\n v_tex=v_tex,\n t_tex_idx=t_tex_idx,\n v_rgb=v_rgb,\n matname=matname,\n mtllib=mtllib,\n )\n save_paths.append(obj_save_path)\n return save_paths\n\n def _save_obj(\n self,\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=None,\n v_tex=None,\n t_tex_idx=None,\n v_rgb=None,\n matname=None,\n mtllib=None,\n ) -> str:\n obj_str = \"\"\n if matname is not None:\n obj_str += f\"mtllib {mtllib}\\n\"\n obj_str += f\"g object\\n\"\n obj_str += f\"usemtl {matname}\\n\"\n for i in range(len(v_pos)):\n obj_str += f\"v {v_pos[i][0]} {v_pos[i][1]} {v_pos[i][2]}\"\n if v_rgb is not None:\n obj_str += f\" {v_rgb[i][0]} {v_rgb[i][1]} {v_rgb[i][2]}\"\n obj_str += \"\\n\"\n if v_nrm is not None:\n for v in v_nrm:\n obj_str += f\"vn {v[0]} {v[1]} {v[2]}\\n\"\n if v_tex is not None:\n for v in v_tex:\n obj_str += f\"vt {v[0]} {1.0 - v[1]}\\n\"\n\n for i in range(len(t_pos_idx)):\n obj_str += \"f\"\n for j in range(3):\n obj_str += f\" {t_pos_idx[i][j] + 1}/\"\n if v_tex is not None:\n obj_str += f\"{t_tex_idx[i][j] + 1}\"\n obj_str += \"/\"\n if v_nrm is not None:\n obj_str += f\"{t_pos_idx[i][j] + 1}\"\n obj_str += \"\\n\"\n\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(obj_str)\n return save_path\n\n def _save_mtl(\n self,\n filename,\n matname,\n Ka=(0.0, 0.0, 0.0),\n Kd=(1.0, 1.0, 1.0),\n Ks=(0.0, 0.0, 0.0),\n map_Kd=None,\n map_Ks=None,\n map_Bump=None,\n map_Pm=None,\n map_Pr=None,\n map_format=\"jpg\",\n step: Optional[int] = None,\n ) -> List[str]:\n mtl_save_path = self.get_save_path(filename)\n save_paths = [mtl_save_path]\n mtl_str = f\"newmtl {matname}\\n\"\n mtl_str += f\"Ka {Ka[0]} {Ka[1]} {Ka[2]}\\n\"\n if map_Kd is not None:\n map_Kd_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_kd.{map_format}\"\n )\n mtl_str += f\"map_Kd texture_kd.{map_format}\\n\"\n self._save_rgb_image(\n map_Kd_save_path,\n map_Kd,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Kd\",\n step=step,\n )\n save_paths.append(map_Kd_save_path)\n else:\n mtl_str += f\"Kd {Kd[0]} {Kd[1]} {Kd[2]}\\n\"\n if map_Ks is not None:\n map_Ks_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_ks.{map_format}\"\n )\n mtl_str += f\"map_Ks texture_ks.{map_format}\\n\"\n self._save_rgb_image(\n map_Ks_save_path,\n map_Ks,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Ks\",\n step=step,\n )\n save_paths.append(map_Ks_save_path)\n else:\n mtl_str += f\"Ks {Ks[0]} {Ks[1]} {Ks[2]}\\n\"\n if map_Bump is not None:\n map_Bump_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_nrm.{map_format}\"\n )\n mtl_str += f\"map_Bump texture_nrm.{map_format}\\n\"\n self._save_rgb_image(\n map_Bump_save_path,\n map_Bump,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Bump\",\n step=step,\n )\n save_paths.append(map_Bump_save_path)\n if map_Pm is not None:\n map_Pm_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_metallic.{map_format}\"\n )\n mtl_str += f\"map_Pm texture_metallic.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pm_save_path,\n map_Pm,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_refl\",\n step=step,\n )\n save_paths.append(map_Pm_save_path)\n if map_Pr is not None:\n map_Pr_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_roughness.{map_format}\"\n )\n mtl_str += f\"map_Pr texture_roughness.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pr_save_path,\n map_Pr,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_Ns\",\n step=step,\n )\n save_paths.append(map_Pr_save_path)\n with open(self.get_save_path(filename), \"w\") as f:\n f.write(mtl_str)\n return save_paths\n\n def save_file(self, filename, src_path) -> str:\n save_path = self.get_save_path(filename)\n shutil.copyfile(src_path, save_path)\n return save_path\n\n def save_json(self, filename, payload) -> str:\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(json.dumps(payload))\n return save_path"
}
] | import os
import pytorch_lightning as pl
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from threestudio.models.exporters.base import Exporter, ExporterOutput
from threestudio.systems.utils import parse_optimizer, parse_scheduler
from threestudio.utils.base import Updateable, update_if_possible
from threestudio.utils.config import parse_structured
from threestudio.utils.misc import C, cleanup, get_device, load_module_weights
from threestudio.utils.saving import SaverMixin
from threestudio.utils.typing import *
from threestudio.utils.config import load_config, parse_structured | 10,445 |
class BaseLift3DSystem(BaseSystem):
@dataclass
class Config(BaseSystem.Config):
geometry_type: str = ""
geometry: dict = field(default_factory=dict)
geometry_convert_from: Optional[str] = None
geometry_convert_inherit_texture: bool = False
# used to override configurations of the previous geometry being converted from,
# for example isosurface_threshold
geometry_convert_override: dict = field(default_factory=dict)
material_type: str = ""
material: dict = field(default_factory=dict)
background_type: str = ""
background: dict = field(default_factory=dict)
renderer_type: str = ""
renderer: dict = field(default_factory=dict)
guidance_type: str = ""
guidance: dict = field(default_factory=dict)
guidance_type_add: str = ""
guidance_add: dict = field(default_factory=dict)
prompt_processor_type: str = ""
prompt_processor: dict = field(default_factory=dict)
prompt_processor_type_add: str = ""
prompt_processor_add: dict = field(default_factory=dict)
# geometry export configurations, no need to specify in training
exporter_type: str = "mesh-exporter"
exporter: dict = field(default_factory=dict)
cfg: Config
def configure(self) -> None:
if (
self.cfg.geometry_convert_from # from_coarse must be specified
and not self.cfg.weights # not initialized from coarse when weights are specified
and not self.resumed # not initialized from coarse when resumed from checkpoints
):
threestudio.info("Initializing geometry from a given checkpoint ...")
prev_cfg = load_config(
os.path.join(
os.path.dirname(self.cfg.geometry_convert_from),
"../configs/parsed.yaml",
)
) # TODO: hard-coded relative path
prev_system_cfg: BaseLift3DSystem.Config = parse_structured(
self.Config, prev_cfg.system
)
prev_geometry_cfg = prev_system_cfg.geometry
prev_geometry_cfg.update(self.cfg.geometry_convert_override)
prev_geometry = threestudio.find(prev_system_cfg.geometry_type)(
prev_geometry_cfg
)
state_dict, epoch, global_step = load_module_weights(
self.cfg.geometry_convert_from,
module_name="geometry",
map_location="cpu",
)
prev_geometry.load_state_dict(state_dict, strict=False)
# restore step-dependent states
prev_geometry.do_update_step(epoch, global_step, on_load_weights=True)
# convert from coarse stage geometry
prev_geometry = prev_geometry.to(get_device())
self.geometry = threestudio.find(self.cfg.geometry_type).create_from(
prev_geometry,
self.cfg.geometry,
copy_net=self.cfg.geometry_convert_inherit_texture,
)
del prev_geometry
cleanup()
else:
self.geometry = threestudio.find(self.cfg.geometry_type)(self.cfg.geometry)
self.material = threestudio.find(self.cfg.material_type)(self.cfg.material)
self.background = threestudio.find(self.cfg.background_type)(
self.cfg.background
)
self.renderer = threestudio.find(self.cfg.renderer_type)(
self.cfg.renderer,
geometry=self.geometry,
material=self.material,
background=self.background,
)
def on_fit_start(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Validation results will be saved to {self._save_dir}")
else:
threestudio.warn(
f"Saving directory not set for the system, visualization results will not be saved"
)
def on_test_end(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Test results saved to {self._save_dir}")
def on_predict_start(self) -> None:
self.exporter: Exporter = threestudio.find(self.cfg.exporter_type)(
self.cfg.exporter,
geometry=self.geometry,
material=self.material,
background=self.background,
)
def predict_step(self, batch, batch_idx):
if self.exporter.cfg.save_video:
self.test_step(batch, batch_idx)
def on_predict_epoch_end(self) -> None:
if self.exporter.cfg.save_video:
self.on_test_epoch_end()
|
class BaseSystem(pl.LightningModule, Updateable, SaverMixin):
@dataclass
class Config:
loggers: dict = field(default_factory=dict)
loss: dict = field(default_factory=dict)
optimizer: dict = field(default_factory=dict)
scheduler: Optional[dict] = None
weights: Optional[str] = None
weights_ignore_modules: Optional[List[str]] = None
cleanup_after_validation_step: bool = False
cleanup_after_test_step: bool = False
cfg: Config
def __init__(self, cfg, resumed=False) -> None:
super().__init__()
self.cfg = parse_structured(self.Config, cfg)
self._save_dir: Optional[str] = None
self._resumed: bool = resumed
self._resumed_eval: bool = False
self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0}
if "loggers" in cfg:
self.create_loggers(cfg.loggers)
self.configure()
if self.cfg.weights is not None:
self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules)
self.post_configure()
def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None):
state_dict, epoch, global_step = load_module_weights(
weights, ignore_modules=ignore_modules, map_location="cpu"
)
self.load_state_dict(state_dict, strict=False)
# restore step-dependent states
self.do_update_step(epoch, global_step, on_load_weights=True)
def set_resume_status(self, current_epoch: int, global_step: int):
# restore correct epoch and global step in eval
self._resumed_eval = True
self._resumed_eval_status["current_epoch"] = current_epoch
self._resumed_eval_status["global_step"] = global_step
@property
def resumed(self):
# whether from resumed checkpoint
return self._resumed
@property
def true_global_step(self):
if self._resumed_eval:
return self._resumed_eval_status["global_step"]
else:
return self.global_step
@property
def true_current_epoch(self):
if self._resumed_eval:
return self._resumed_eval_status["current_epoch"]
else:
return self.current_epoch
def configure(self) -> None:
pass
def post_configure(self) -> None:
"""
executed after weights are loaded
"""
pass
def C(self, value: Any) -> float:
return C(value, self.true_current_epoch, self.true_global_step)
def configure_optimizers(self):
optim = parse_optimizer(self.cfg.optimizer, self)
ret = {
"optimizer": optim,
}
if self.cfg.scheduler is not None:
ret.update(
{
"lr_scheduler": parse_scheduler(self.cfg.scheduler, optim),
}
)
return ret
def training_step(self, batch, batch_idx):
raise NotImplementedError
def validation_step(self, batch, batch_idx):
raise NotImplementedError
def on_validation_batch_end(self, outputs, batch, batch_idx):
if self.cfg.cleanup_after_validation_step:
# cleanup to save vram
cleanup()
def on_validation_epoch_end(self):
raise NotImplementedError
def test_step(self, batch, batch_idx):
raise NotImplementedError
def on_test_batch_end(self, outputs, batch, batch_idx):
if self.cfg.cleanup_after_test_step:
# cleanup to save vram
cleanup()
def on_test_epoch_end(self):
pass
def predict_step(self, batch, batch_idx):
raise NotImplementedError
def on_predict_batch_end(self, outputs, batch, batch_idx):
if self.cfg.cleanup_after_test_step:
# cleanup to save vram
cleanup()
def on_predict_epoch_end(self):
pass
def preprocess_data(self, batch, stage):
pass
"""
Implementing on_after_batch_transfer of DataModule does the same.
But on_after_batch_transfer does not support DP.
"""
def on_train_batch_start(self, batch, batch_idx, unused=0):
self.preprocess_data(batch, "train")
self.dataset = self.trainer.train_dataloader.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_validation_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "validation")
self.dataset = self.trainer.val_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_test_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "test")
self.dataset = self.trainer.test_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_predict_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "predict")
self.dataset = self.trainer.predict_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
pass
def on_before_optimizer_step(self, optimizer):
"""
# some gradient-related debugging goes here, example:
from lightning.pytorch.utilities import grad_norm
norms = grad_norm(self.geometry, norm_type=2)
print(norms)
"""
pass
class BaseLift3DSystem(BaseSystem):
@dataclass
class Config(BaseSystem.Config):
geometry_type: str = ""
geometry: dict = field(default_factory=dict)
geometry_convert_from: Optional[str] = None
geometry_convert_inherit_texture: bool = False
# used to override configurations of the previous geometry being converted from,
# for example isosurface_threshold
geometry_convert_override: dict = field(default_factory=dict)
material_type: str = ""
material: dict = field(default_factory=dict)
background_type: str = ""
background: dict = field(default_factory=dict)
renderer_type: str = ""
renderer: dict = field(default_factory=dict)
guidance_type: str = ""
guidance: dict = field(default_factory=dict)
guidance_type_add: str = ""
guidance_add: dict = field(default_factory=dict)
prompt_processor_type: str = ""
prompt_processor: dict = field(default_factory=dict)
prompt_processor_type_add: str = ""
prompt_processor_add: dict = field(default_factory=dict)
# geometry export configurations, no need to specify in training
exporter_type: str = "mesh-exporter"
exporter: dict = field(default_factory=dict)
cfg: Config
def configure(self) -> None:
if (
self.cfg.geometry_convert_from # from_coarse must be specified
and not self.cfg.weights # not initialized from coarse when weights are specified
and not self.resumed # not initialized from coarse when resumed from checkpoints
):
threestudio.info("Initializing geometry from a given checkpoint ...")
prev_cfg = load_config(
os.path.join(
os.path.dirname(self.cfg.geometry_convert_from),
"../configs/parsed.yaml",
)
) # TODO: hard-coded relative path
prev_system_cfg: BaseLift3DSystem.Config = parse_structured(
self.Config, prev_cfg.system
)
prev_geometry_cfg = prev_system_cfg.geometry
prev_geometry_cfg.update(self.cfg.geometry_convert_override)
prev_geometry = threestudio.find(prev_system_cfg.geometry_type)(
prev_geometry_cfg
)
state_dict, epoch, global_step = load_module_weights(
self.cfg.geometry_convert_from,
module_name="geometry",
map_location="cpu",
)
prev_geometry.load_state_dict(state_dict, strict=False)
# restore step-dependent states
prev_geometry.do_update_step(epoch, global_step, on_load_weights=True)
# convert from coarse stage geometry
prev_geometry = prev_geometry.to(get_device())
self.geometry = threestudio.find(self.cfg.geometry_type).create_from(
prev_geometry,
self.cfg.geometry,
copy_net=self.cfg.geometry_convert_inherit_texture,
)
del prev_geometry
cleanup()
else:
self.geometry = threestudio.find(self.cfg.geometry_type)(self.cfg.geometry)
self.material = threestudio.find(self.cfg.material_type)(self.cfg.material)
self.background = threestudio.find(self.cfg.background_type)(
self.cfg.background
)
self.renderer = threestudio.find(self.cfg.renderer_type)(
self.cfg.renderer,
geometry=self.geometry,
material=self.material,
background=self.background,
)
def on_fit_start(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Validation results will be saved to {self._save_dir}")
else:
threestudio.warn(
f"Saving directory not set for the system, visualization results will not be saved"
)
def on_test_end(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Test results saved to {self._save_dir}")
def on_predict_start(self) -> None:
self.exporter: Exporter = threestudio.find(self.cfg.exporter_type)(
self.cfg.exporter,
geometry=self.geometry,
material=self.material,
background=self.background,
)
def predict_step(self, batch, batch_idx):
if self.exporter.cfg.save_video:
self.test_step(batch, batch_idx)
def on_predict_epoch_end(self) -> None:
if self.exporter.cfg.save_video:
self.on_test_epoch_end() | exporter_output: List[ExporterOutput] = self.exporter() | 1 | 2023-12-23 12:37:48+00:00 | 12k |
jesenzhang/ComfyUI_StreamDiffusion | streamdiffusion/wrapper.py | [
{
"identifier": "StreamDiffusion",
"path": "streamdiffusion/pipeline.py",
"snippet": "class StreamDiffusion:\n def __init__(\n self,\n pipe: StableDiffusionPipeline,\n t_index_list: List[int],\n torch_dtype: torch.dtype = torch.float16,\n width: int = 512,\n height: int = 512,\n do_add_noise: bool = True,\n use_denoising_batch: bool = True,\n frame_buffer_size: int = 1,\n cfg_type: Literal[\"none\", \"full\", \"self\", \"initialize\"] = \"self\",\n ) -> None:\n self.device = pipe.device\n self.dtype = torch_dtype\n self.generator = None\n\n self.height = height\n self.width = width\n\n self.latent_height = int(height // pipe.vae_scale_factor)\n self.latent_width = int(width // pipe.vae_scale_factor)\n\n self.frame_bff_size = frame_buffer_size\n self.denoising_steps_num = len(t_index_list)\n\n self.cfg_type = cfg_type\n\n if use_denoising_batch:\n self.batch_size = self.denoising_steps_num * frame_buffer_size\n if self.cfg_type == \"initialize\":\n self.trt_unet_batch_size = (\n self.denoising_steps_num + 1\n ) * self.frame_bff_size\n elif self.cfg_type == \"full\":\n self.trt_unet_batch_size = (\n 2 * self.denoising_steps_num * self.frame_bff_size\n )\n else:\n self.trt_unet_batch_size = self.denoising_steps_num * frame_buffer_size\n else:\n self.trt_unet_batch_size = self.frame_bff_size\n self.batch_size = frame_buffer_size\n\n self.t_list = t_index_list\n\n self.do_add_noise = do_add_noise\n self.use_denoising_batch = use_denoising_batch\n\n self.similar_image_filter = False\n self.similar_filter = SimilarImageFilter()\n self.prev_image_result = None\n\n self.pipe = pipe\n self.image_processor = VaeImageProcessor(pipe.vae_scale_factor)\n\n self.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)\n self.text_encoder = pipe.text_encoder\n self.unet = pipe.unet\n self.vae = pipe.vae\n\n self.inference_time_ema = 0\n\n def set_sampler_param(self,\n t_index_list: List[int],\n width: int = 512,\n height: int = 512,\n do_add_noise: bool = True,\n use_denoising_batch: bool = True,\n frame_buffer_size: int = 1,\n cfg_type: Literal[\"none\", \"full\", \"self\", \"initialize\"] = \"self\",):\n self.height = height\n self.width = width\n\n self.latent_height = int(height // self.pipe.vae_scale_factor)\n self.latent_width = int(width // self.pipe.vae_scale_factor)\n\n self.frame_bff_size = frame_buffer_size\n \n self.cfg_type = cfg_type\n self.t_list = t_index_list\n \n self.do_add_noise = do_add_noise\n self.use_denoising_batch = use_denoising_batch\n\n self.inference_time_ema = 0\n\n self.denoising_steps_num = len(self.t_list)\n if self.use_denoising_batch:\n self.batch_size = self.denoising_steps_num * self.frame_bff_size\n if self.cfg_type == \"initialize\":\n self.trt_unet_batch_size = (\n self.denoising_steps_num + 1\n ) * self.frame_bff_size\n elif self.cfg_type == \"full\":\n self.trt_unet_batch_size = (\n 2 * self.denoising_steps_num * self.frame_bff_size\n )\n else:\n self.trt_unet_batch_size = self.denoising_steps_num * self.frame_bff_size\n else:\n self.trt_unet_batch_size = self.frame_bff_size\n self.batch_size = self.frame_bff_size\n \n def load_lcm_lora(\n self,\n pretrained_model_name_or_path_or_dict: Union[\n str, Dict[str, torch.Tensor]\n ] = \"latent-consistency/lcm-lora-sdv1-5\",\n adapter_name: Optional[Any] = None,\n **kwargs,\n ) -> None:\n self.pipe.load_lora_weights(\n pretrained_model_name_or_path_or_dict, adapter_name, **kwargs\n )\n\n def load_lora(\n self,\n pretrained_lora_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],\n adapter_name: Optional[Any] = None,\n **kwargs,\n ) -> None:\n self.pipe.load_lora_weights(\n pretrained_lora_model_name_or_path_or_dict, adapter_name, **kwargs\n )\n\n def fuse_lora(\n self,\n fuse_unet: bool = True,\n fuse_text_encoder: bool = True,\n lora_scale: float = 1.0,\n safe_fusing: bool = False,\n ) -> None:\n self.pipe.fuse_lora(\n fuse_unet=fuse_unet,\n fuse_text_encoder=fuse_text_encoder,\n lora_scale=lora_scale,\n safe_fusing=safe_fusing,\n )\n\n def enable_similar_image_filter(self, threshold: float = 0.98, max_skip_frame: float = 10) -> None:\n self.similar_image_filter = True\n self.similar_filter.set_threshold(threshold)\n self.similar_filter.set_max_skip_frame(max_skip_frame)\n\n def disable_similar_image_filter(self) -> None:\n self.similar_image_filter = False\n\n @torch.no_grad()\n def prepare(\n self,\n prompt: str,\n negative_prompt: str = \"\",\n num_inference_steps: int = 50,\n guidance_scale: float = 1.2,\n delta: float = 1.0,\n generator: Optional[torch.Generator] = torch.Generator(),\n seed: int = 2,\n ) -> None:\n self.generator = generator\n self.generator.manual_seed(seed)\n # initialize x_t_latent (it can be any random tensor)\n if self.denoising_steps_num > 1:\n self.x_t_latent_buffer = torch.zeros(\n (\n (self.denoising_steps_num - 1) * self.frame_bff_size,\n 4,\n self.latent_height,\n self.latent_width,\n ),\n dtype=self.dtype,\n device=self.device,\n )\n else:\n self.x_t_latent_buffer = None\n\n if self.cfg_type == \"none\":\n self.guidance_scale = 1.0\n else:\n self.guidance_scale = guidance_scale\n self.delta = delta\n\n do_classifier_free_guidance = False\n if self.guidance_scale > 1.0:\n do_classifier_free_guidance = True\n\n encoder_output = self.pipe.encode_prompt(\n prompt=prompt,\n device=self.device,\n num_images_per_prompt=1,\n do_classifier_free_guidance=do_classifier_free_guidance,\n negative_prompt=negative_prompt,\n )\n self.prompt_embeds = encoder_output[0].repeat(self.batch_size, 1, 1)\n\n if self.use_denoising_batch and self.cfg_type == \"full\":\n uncond_prompt_embeds = encoder_output[1].repeat(self.batch_size, 1, 1)\n elif self.cfg_type == \"initialize\":\n uncond_prompt_embeds = encoder_output[1].repeat(self.frame_bff_size, 1, 1)\n\n if self.guidance_scale > 1.0 and (\n self.cfg_type == \"initialize\" or self.cfg_type == \"full\"\n ):\n self.prompt_embeds = torch.cat(\n [uncond_prompt_embeds, self.prompt_embeds], dim=0\n )\n\n self.scheduler.set_timesteps(num_inference_steps, self.device)\n self.timesteps = self.scheduler.timesteps.to(self.device)\n\n # make sub timesteps list based on the indices in the t_list list and the values in the timesteps list\n self.sub_timesteps = []\n for t in self.t_list:\n self.sub_timesteps.append(self.timesteps[t])\n\n sub_timesteps_tensor = torch.tensor(\n self.sub_timesteps, dtype=torch.long, device=self.device\n )\n self.sub_timesteps_tensor = torch.repeat_interleave(\n sub_timesteps_tensor,\n repeats=self.frame_bff_size if self.use_denoising_batch else 1,\n dim=0,\n )\n\n self.init_noise = torch.randn(\n (self.batch_size, 4, self.latent_height, self.latent_width),\n generator=generator,\n ).to(device=self.device, dtype=self.dtype)\n\n self.stock_noise = torch.zeros_like(self.init_noise)\n\n c_skip_list = []\n c_out_list = []\n for timestep in self.sub_timesteps:\n c_skip, c_out = self.scheduler.get_scalings_for_boundary_condition_discrete(\n timestep\n )\n c_skip_list.append(c_skip)\n c_out_list.append(c_out)\n\n self.c_skip = (\n torch.stack(c_skip_list)\n .view(len(self.t_list), 1, 1, 1)\n .to(dtype=self.dtype, device=self.device)\n )\n self.c_out = (\n torch.stack(c_out_list)\n .view(len(self.t_list), 1, 1, 1)\n .to(dtype=self.dtype, device=self.device)\n )\n\n alpha_prod_t_sqrt_list = []\n beta_prod_t_sqrt_list = []\n for timestep in self.sub_timesteps:\n alpha_prod_t_sqrt = self.scheduler.alphas_cumprod[timestep].sqrt()\n beta_prod_t_sqrt = (1 - self.scheduler.alphas_cumprod[timestep]).sqrt()\n alpha_prod_t_sqrt_list.append(alpha_prod_t_sqrt)\n beta_prod_t_sqrt_list.append(beta_prod_t_sqrt)\n alpha_prod_t_sqrt = (\n torch.stack(alpha_prod_t_sqrt_list)\n .view(len(self.t_list), 1, 1, 1)\n .to(dtype=self.dtype, device=self.device)\n )\n beta_prod_t_sqrt = (\n torch.stack(beta_prod_t_sqrt_list)\n .view(len(self.t_list), 1, 1, 1)\n .to(dtype=self.dtype, device=self.device)\n )\n self.alpha_prod_t_sqrt = torch.repeat_interleave(\n alpha_prod_t_sqrt,\n repeats=self.frame_bff_size if self.use_denoising_batch else 1,\n dim=0,\n )\n self.beta_prod_t_sqrt = torch.repeat_interleave(\n beta_prod_t_sqrt,\n repeats=self.frame_bff_size if self.use_denoising_batch else 1,\n dim=0,\n )\n\n @torch.no_grad()\n def update_prompt(self, prompt: str,negative_prompt: Optional[str] = None) -> None:\n encoder_output = self.pipe.encode_prompt(\n prompt=prompt,\n negative_prompt=negative_prompt,\n device=self.device,\n num_images_per_prompt=1,\n do_classifier_free_guidance=False,\n )\n self.prompt_embeds = encoder_output[0].repeat(self.batch_size, 1, 1)\n\n def add_noise(\n self,\n original_samples: torch.Tensor,\n noise: torch.Tensor,\n t_index: int,\n ) -> torch.Tensor:\n noisy_samples = (\n self.alpha_prod_t_sqrt[t_index] * original_samples\n + self.beta_prod_t_sqrt[t_index] * noise\n )\n return noisy_samples\n\n def scheduler_step_batch(\n self,\n model_pred_batch: torch.Tensor,\n x_t_latent_batch: torch.Tensor,\n idx: Optional[int] = None,\n ) -> torch.Tensor:\n # TODO: use t_list to select beta_prod_t_sqrt\n if idx is None:\n F_theta = (\n x_t_latent_batch - self.beta_prod_t_sqrt * model_pred_batch\n ) / self.alpha_prod_t_sqrt\n denoised_batch = self.c_out * F_theta + self.c_skip * x_t_latent_batch\n else:\n F_theta = (\n x_t_latent_batch - self.beta_prod_t_sqrt[idx] * model_pred_batch\n ) / self.alpha_prod_t_sqrt[idx]\n denoised_batch = (\n self.c_out[idx] * F_theta + self.c_skip[idx] * x_t_latent_batch\n )\n\n return denoised_batch\n\n def unet_step(\n self,\n x_t_latent: torch.Tensor,\n t_list: Union[torch.Tensor, list[int]],\n idx: Optional[int] = None,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n if self.guidance_scale > 1.0 and (self.cfg_type == \"initialize\"):\n x_t_latent_plus_uc = torch.concat([x_t_latent[0:1], x_t_latent], dim=0)\n t_list = torch.concat([t_list[0:1], t_list], dim=0)\n elif self.guidance_scale > 1.0 and (self.cfg_type == \"full\"):\n x_t_latent_plus_uc = torch.concat([x_t_latent, x_t_latent], dim=0)\n t_list = torch.concat([t_list, t_list], dim=0)\n else:\n x_t_latent_plus_uc = x_t_latent\n\n model_pred = self.unet(\n x_t_latent_plus_uc,\n t_list,\n encoder_hidden_states=self.prompt_embeds,\n return_dict=False,\n )[0]\n\n if self.guidance_scale > 1.0 and (self.cfg_type == \"initialize\"):\n noise_pred_text = model_pred[1:]\n self.stock_noise = torch.concat(\n [model_pred[0:1], self.stock_noise[1:]], dim=0\n ) # ここコメントアウトでself out cfg\n elif self.guidance_scale > 1.0 and (self.cfg_type == \"full\"):\n noise_pred_uncond, noise_pred_text = model_pred.chunk(2)\n else:\n noise_pred_text = model_pred\n if self.guidance_scale > 1.0 and (\n self.cfg_type == \"self\" or self.cfg_type == \"initialize\"\n ):\n noise_pred_uncond = self.stock_noise * self.delta\n if self.guidance_scale > 1.0 and self.cfg_type != \"none\":\n model_pred = noise_pred_uncond + self.guidance_scale * (\n noise_pred_text - noise_pred_uncond\n )\n else:\n model_pred = noise_pred_text\n\n # compute the previous noisy sample x_t -> x_t-1\n if self.use_denoising_batch:\n denoised_batch = self.scheduler_step_batch(model_pred, x_t_latent, idx)\n if self.cfg_type == \"self\" or self.cfg_type == \"initialize\":\n scaled_noise = self.beta_prod_t_sqrt * self.stock_noise\n delta_x = self.scheduler_step_batch(model_pred, scaled_noise, idx)\n alpha_next = torch.concat(\n [\n self.alpha_prod_t_sqrt[1:],\n torch.ones_like(self.alpha_prod_t_sqrt[0:1]),\n ],\n dim=0,\n )\n delta_x = alpha_next * delta_x\n beta_next = torch.concat(\n [\n self.beta_prod_t_sqrt[1:],\n torch.ones_like(self.beta_prod_t_sqrt[0:1]),\n ],\n dim=0,\n )\n delta_x = delta_x / beta_next\n init_noise = torch.concat(\n [self.init_noise[1:], self.init_noise[0:1]], dim=0\n )\n self.stock_noise = init_noise + delta_x\n\n else:\n # denoised_batch = self.scheduler.step(model_pred, t_list[0], x_t_latent).denoised\n denoised_batch = self.scheduler_step_batch(model_pred, x_t_latent, idx)\n\n return denoised_batch, model_pred\n\n def encode_image(self, image_tensors: torch.Tensor) -> torch.Tensor:\n image_tensors = image_tensors.to(\n device=self.device,\n dtype=self.vae.dtype,\n )\n img_latent = retrieve_latents(self.vae.encode(image_tensors), self.generator)\n img_latent = img_latent * self.vae.config.scaling_factor\n x_t_latent = self.add_noise(img_latent, self.init_noise[0], 0)\n return x_t_latent\n\n def decode_image(self, x_0_pred_out: torch.Tensor) -> torch.Tensor:\n output_latents = self.vae.decode(\n x_0_pred_out / self.vae.config.scaling_factor, return_dict=False\n )\n output_latent =output_latents[0]\n return output_latent\n\n def predict_x0_batch(self, x_t_latent: torch.Tensor) -> torch.Tensor:\n prev_latent_batch = self.x_t_latent_buffer\n\n if self.use_denoising_batch:\n t_list = self.sub_timesteps_tensor\n if self.denoising_steps_num > 1:\n x_t_latent = torch.cat((x_t_latent, prev_latent_batch), dim=0)\n self.stock_noise = torch.cat(\n (self.init_noise[0:1], self.stock_noise[:-1]), dim=0\n )\n x_0_pred_batch, model_pred = self.unet_step(x_t_latent, t_list)\n\n if self.denoising_steps_num > 1:\n x_0_pred_out = x_0_pred_batch[-1].unsqueeze(0)\n if self.do_add_noise:\n self.x_t_latent_buffer = (\n self.alpha_prod_t_sqrt[1:] * x_0_pred_batch[:-1]\n + self.beta_prod_t_sqrt[1:] * self.init_noise[1:]\n )\n else:\n self.x_t_latent_buffer = (\n self.alpha_prod_t_sqrt[1:] * x_0_pred_batch[:-1]\n )\n else:\n x_0_pred_out = x_0_pred_batch\n self.x_t_latent_buffer = None\n else:\n self.init_noise = x_t_latent\n for idx, t in enumerate(self.sub_timesteps_tensor):\n t = t.view(\n 1,\n ).repeat(\n self.frame_bff_size,\n )\n x_0_pred, model_pred = self.unet_step(x_t_latent, t, idx)\n if idx < len(self.sub_timesteps_tensor) - 1:\n if self.do_add_noise:\n x_t_latent = self.alpha_prod_t_sqrt[\n idx + 1\n ] * x_0_pred + self.beta_prod_t_sqrt[\n idx + 1\n ] * torch.randn_like(\n x_0_pred, device=self.device, dtype=self.dtype\n )\n else:\n x_t_latent = self.alpha_prod_t_sqrt[idx + 1] * x_0_pred\n x_0_pred_out = x_0_pred\n\n return x_0_pred_out\n\n @torch.no_grad()\n def __call__(\n self, x: Union[torch.Tensor, PIL.Image.Image, np.ndarray] = None\n ) -> torch.Tensor:\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n start.record()\n if x is not None:\n x = self.image_processor.preprocess(x, self.height, self.width).to(\n device=self.device, dtype=self.dtype\n )\n if self.similar_image_filter:\n x = self.similar_filter(x)\n if x is None:\n time.sleep(self.inference_time_ema)\n return self.prev_image_result\n x_t_latent = self.encode_image(x)\n else:\n # TODO: check the dimension of x_t_latent\n x_t_latent = torch.randn((1, 4, self.latent_height, self.latent_width)).to(\n device=self.device, dtype=self.dtype\n )\n x_0_pred_out = self.predict_x0_batch(x_t_latent)\n x_output = self.decode_image(x_0_pred_out).detach().clone()\n\n self.prev_image_result = x_output\n end.record()\n torch.cuda.synchronize()\n inference_time = start.elapsed_time(end) / 1000\n self.inference_time_ema = 0.9 * self.inference_time_ema + 0.1 * inference_time\n return x_output\n \n @torch.no_grad()\n def sample(\n self, x: Union[torch.Tensor, PIL.Image.Image, np.ndarray] = None\n ) -> torch.Tensor:\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n start.record()\n if x is not None:\n x = self.image_processor.preprocess(x, self.height, self.width).to(\n device=self.device, dtype=self.dtype\n )\n if self.similar_image_filter:\n x = self.similar_filter(x)\n if x is None:\n time.sleep(self.inference_time_ema)\n return self.prev_image_result\n x_t_latent = self.encode_image(x)\n b,c,h,w=x_t_latent.shape\n \n # x_t_latent=x_t_latent.repeat((2, 1,1,1))\n else:\n # TODO: check the dimension of x_t_latent\n x_t_latent = torch.randn((self.frame_bff_size, 4, self.latent_height, self.latent_width)).to(\n device=self.device, dtype=self.dtype\n )\n x_0_pred_out = self.predict_x0_batch(x_t_latent)\n x_output = self.decode_image(x_0_pred_out).detach().clone()\n self.prev_image_result = x_output\n end.record()\n torch.cuda.synchronize()\n inference_time = start.elapsed_time(end) / 1000\n self.inference_time_ema = 0.9 * self.inference_time_ema + 0.1 * inference_time\n return x_output\n \n\n @torch.no_grad()\n def txt2img(self, batch_size: int = 1) -> torch.Tensor:\n x_0_pred_out = self.predict_x0_batch(\n torch.randn((batch_size, 4, self.latent_height, self.latent_width)).to(\n device=self.device, dtype=self.dtype\n )\n )\n x_output = self.decode_image(x_0_pred_out).detach().clone()\n return x_output\n\n def txt2img_sd_turbo(self, batch_size: int = 1) -> torch.Tensor:\n x_t_latent = torch.randn(\n (batch_size, 4, self.latent_height, self.latent_width),\n device=self.device,\n dtype=self.dtype,\n )\n model_pred = self.unet(\n x_t_latent,\n self.sub_timesteps_tensor,\n encoder_hidden_states=self.prompt_embeds,\n return_dict=False,\n )[0]\n x_0_pred_out = (\n x_t_latent - self.beta_prod_t_sqrt * model_pred\n ) / self.alpha_prod_t_sqrt\n return self.decode_image(x_0_pred_out)"
},
{
"identifier": "postprocess_image",
"path": "streamdiffusion/image_utils.py",
"snippet": "def postprocess_image(\n image: torch.Tensor,\n output_type: str = \"pil\",\n do_denormalize: Optional[List[bool]] = None,\n) -> Union[torch.Tensor, np.ndarray, PIL.Image.Image]:\n if not isinstance(image, torch.Tensor):\n raise ValueError(\n f\"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor\"\n )\n\n if output_type == \"latent\":\n return image\n\n do_normalize_flg = True\n if do_denormalize is None:\n do_denormalize = [do_normalize_flg] * image.shape[0]\n\n image = torch.stack(\n [\n denormalize(image[i]) if do_denormalize[i] else image[i]\n for i in range(image.shape[0])\n ]\n )\n\n if output_type == \"pt\":\n return image\n\n image = pt_to_numpy(image)\n\n if output_type == \"np\":\n return image\n\n if output_type == \"pil\":\n return numpy_to_pil(image)"
}
] | import gc
import os
import traceback
import numpy as np
import torch
from pathlib import Path
from typing import List, Literal, Optional, Union, Dict
from diffusers import AutoencoderTiny, StableDiffusionPipeline
from PIL import Image
from .pipeline import StreamDiffusion
from .image_utils import postprocess_image
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from polygraphy import cuda
from streamdiffusion.acceleration.tensorrt import (
TorchVAEEncoder,
compile_unet,
compile_vae_decoder,
compile_vae_encoder,
)
from streamdiffusion.acceleration.tensorrt.engine import (
AutoencoderKLEngine,
UNet2DConditionModelEngine,
)
from streamdiffusion.acceleration.tensorrt.models import (
VAE,
UNet,
VAEEncoder,
)
from streamdiffusion.acceleration.sfast import (
accelerate_with_stable_fast,
)
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
) | 8,713 | """
# self.stream.prepare(
# prompt,
# negative_prompt,
# num_inference_steps=num_inference_steps,
# guidance_scale=guidance_scale,
# delta=delta,
# )
self.prompt =prompt
self.negative_prompt=negative_prompt
self.num_inference_steps=num_inference_steps
self.guidance_scale=guidance_scale
self.delta=delta
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.t_index_list =t_index_list
self.cfg_type =cfg_type
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.do_add_noise =do_add_noise
self.seed=seed
if enable_similar_image_filter:
self.stream.enable_similar_image_filter(similar_image_filter_threshold, similar_image_filter_max_skip_frame)
else:
self.stream.disable_similar_image_filter()
if self.use_safety_checker:
if self.safety_checker==None or self.feature_extractor==None:
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
"CompVis/stable-diffusion-safety-checker"
).to(self.stream.device)
self.feature_extractor = CLIPFeatureExtractor.from_pretrained(
"openai/clip-vit-base-patch32"
)
self.nsfw_fallback_img = Image.new("RGB", (512, 512), (0, 0, 0))
def __call__(
self,
image: Optional[Union[str, Image.Image, torch.Tensor]] = None,
prompt: Optional[str] = None,
) -> Union[Image.Image, List[Image.Image]]:
"""
Performs img2img or txt2img based on the mode.
Parameters
----------
image : Optional[Union[str, Image.Image, torch.Tensor]]
The image to generate from.
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if self.mode == "img2img":
return self.img2img(image)
else:
return self.txt2img(prompt)
def sample(self, image: Optional[Union[str, Image.Image, torch.Tensor]] = None,
prompt: Optional[str] = None,negative_prompt: Optional[str] = None)-> List[Image.Image]:
use_denoising_batch=self.use_denoising_batch
if not image == None:
#图生图
if isinstance(image, str) or isinstance(image, Image.Image):
image = self.preprocess_image(image)
use_denoising_batch = True
self.stream.set_sampler_param(t_index_list=self.t_index_list,
width=self.width,
height=self.height,
do_add_noise=self.do_add_noise,
use_denoising_batch=use_denoising_batch,
frame_buffer_size=self.frame_buffer_size,
cfg_type=self.cfg_type)
else:
#文生图
if self.frame_buffer_size >1 and self.use_denoising_batch:
use_denoising_batch = False
self.stream.set_sampler_param(t_index_list=self.t_index_list,
width=self.width,
height=self.height,
do_add_noise=self.do_add_noise,
use_denoising_batch=use_denoising_batch,
frame_buffer_size=self.frame_buffer_size,
cfg_type='none')
self.stream.prepare(
prompt=self.prompt,
negative_prompt=self.negative_prompt,
num_inference_steps=self.num_inference_steps,
guidance_scale=self.guidance_scale,
delta=self.delta,
seed=self.seed,
)
if prompt is not None:
self.stream.update_prompt(prompt,negative_prompt)
self.batch_size = (
len(self.t_index_list) * self.frame_buffer_size
if use_denoising_batch
else self.frame_buffer_size
)
if self.frame_buffer_size==1:
for _ in range(self.batch_size):
self.stream.sample(image)
image_tensor = self.stream.sample(image)
|
torch.set_grad_enabled(False)
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
class StreamDiffusionWrapper:
def __init__(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
mode: Literal["img2img", "txt2img"] = "img2img",
output_type: Literal["pil", "pt", "np", "latent"] = "pil",
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
device: Literal["cpu", "cuda"] = "cuda",
dtype: torch.dtype = torch.float16,
frame_buffer_size: int = 1,
width: int = 512,
height: int = 512,
warmup: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
do_add_noise: bool = True,
device_ids: Optional[List[int]] = None,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
enable_similar_image_filter: bool = False,
similar_image_filter_threshold: float = 0.98,
similar_image_filter_max_skip_frame: int = 10,
use_denoising_batch: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
use_safety_checker: bool = False,
):
"""
Initializes the StreamDiffusionWrapper.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
mode : Literal["img2img", "txt2img"], optional
txt2img or img2img, by default "img2img".
output_type : Literal["pil", "pt", "np", "latent"], optional
The output type of image, by default "pil".
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
If None, the default LCM-LoRA
("latent-consistency/lcm-lora-sdv1-5") will be used.
vae_id : Optional[str], optional
The vae_id to load, by default None.
If None, the default TinyVAE
("madebyollin/taesd") will be used.
device : Literal["cpu", "cuda"], optional
The device to use for inference, by default "cuda".
dtype : torch.dtype, optional
The dtype for inference, by default torch.float16.
frame_buffer_size : int, optional
The frame buffer size for denoising batch, by default 1.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
warmup : int, optional
The number of warmup steps to perform, by default 10.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
device_ids : Optional[List[int]], optional
The device ids to use for DataParallel, by default None.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
enable_similar_image_filter : bool, optional
Whether to enable similar image filter or not,
by default False.
similar_image_filter_threshold : float, optional
The threshold for similar image filter, by default 0.98.
similar_image_filter_max_skip_frame : int, optional
The max skip frame for similar image filter, by default 10.
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
use_safety_checker : bool, optional
Whether to use safety checker or not, by default False.
"""
self.sd_turbo = "turbo" in model_id_or_path
if mode == "txt2img":
if cfg_type != "none":
raise ValueError(
f"txt2img mode accepts only cfg_type = 'none', but got {cfg_type}"
)
if use_denoising_batch and frame_buffer_size > 1:
if not self.sd_turbo:
raise ValueError(
"txt2img mode cannot use denoising batch with frame_buffer_size > 1."
)
if mode == "img2img":
if not use_denoising_batch:
raise NotImplementedError(
"img2img mode must use denoising batch for now."
)
self.device = device
self.dtype = dtype
self.width = width
self.height = height
self.mode = mode
self.output_type = output_type
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.t_index_list =t_index_list
self.cfg_type =cfg_type
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.do_add_noise =do_add_noise
self.seed=seed
self.stream: StreamDiffusion = self._load_model(
model_id_or_path=model_id_or_path,
lora_dict=lora_dict,
lcm_lora_id=lcm_lora_id,
vae_id=vae_id,
t_index_list=t_index_list,
acceleration=acceleration,
warmup=warmup,
do_add_noise=do_add_noise,
use_lcm_lora=use_lcm_lora,
use_tiny_vae=use_tiny_vae,
cfg_type=cfg_type,
seed=seed,
)
if device_ids is not None:
self.stream.unet = torch.nn.DataParallel(
self.stream.unet, device_ids=device_ids
)
if enable_similar_image_filter:
self.stream.enable_similar_image_filter(similar_image_filter_threshold, similar_image_filter_max_skip_frame)
def prepare(
self,
prompt: str,
negative_prompt: str = "",
num_inference_steps: int = 50,
guidance_scale: float = 1.2,
delta: float = 1.0,
t_index_list: List[int]=[16,32,45],
do_add_noise: bool = True,
enable_similar_image_filter: bool = False,
similar_image_filter_threshold: float = 0.98,
similar_image_filter_max_skip_frame: int = 10,
use_denoising_batch: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
frame_buffer_size:int=1,
use_safety_checker: bool = False,
) -> None:
"""
Prepares the model for inference.
Parameters
----------
prompt : str
The prompt to generate images from.
num_inference_steps : int, optional
The number of inference steps to perform, by default 50.
guidance_scale : float, optional
The guidance scale to use, by default 1.2.
delta : float, optional
The delta multiplier of virtual residual noise,
by default 1.0.
"""
# self.stream.prepare(
# prompt,
# negative_prompt,
# num_inference_steps=num_inference_steps,
# guidance_scale=guidance_scale,
# delta=delta,
# )
self.prompt =prompt
self.negative_prompt=negative_prompt
self.num_inference_steps=num_inference_steps
self.guidance_scale=guidance_scale
self.delta=delta
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.t_index_list =t_index_list
self.cfg_type =cfg_type
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.do_add_noise =do_add_noise
self.seed=seed
if enable_similar_image_filter:
self.stream.enable_similar_image_filter(similar_image_filter_threshold, similar_image_filter_max_skip_frame)
else:
self.stream.disable_similar_image_filter()
if self.use_safety_checker:
if self.safety_checker==None or self.feature_extractor==None:
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
"CompVis/stable-diffusion-safety-checker"
).to(self.stream.device)
self.feature_extractor = CLIPFeatureExtractor.from_pretrained(
"openai/clip-vit-base-patch32"
)
self.nsfw_fallback_img = Image.new("RGB", (512, 512), (0, 0, 0))
def __call__(
self,
image: Optional[Union[str, Image.Image, torch.Tensor]] = None,
prompt: Optional[str] = None,
) -> Union[Image.Image, List[Image.Image]]:
"""
Performs img2img or txt2img based on the mode.
Parameters
----------
image : Optional[Union[str, Image.Image, torch.Tensor]]
The image to generate from.
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if self.mode == "img2img":
return self.img2img(image)
else:
return self.txt2img(prompt)
def sample(self, image: Optional[Union[str, Image.Image, torch.Tensor]] = None,
prompt: Optional[str] = None,negative_prompt: Optional[str] = None)-> List[Image.Image]:
use_denoising_batch=self.use_denoising_batch
if not image == None:
#图生图
if isinstance(image, str) or isinstance(image, Image.Image):
image = self.preprocess_image(image)
use_denoising_batch = True
self.stream.set_sampler_param(t_index_list=self.t_index_list,
width=self.width,
height=self.height,
do_add_noise=self.do_add_noise,
use_denoising_batch=use_denoising_batch,
frame_buffer_size=self.frame_buffer_size,
cfg_type=self.cfg_type)
else:
#文生图
if self.frame_buffer_size >1 and self.use_denoising_batch:
use_denoising_batch = False
self.stream.set_sampler_param(t_index_list=self.t_index_list,
width=self.width,
height=self.height,
do_add_noise=self.do_add_noise,
use_denoising_batch=use_denoising_batch,
frame_buffer_size=self.frame_buffer_size,
cfg_type='none')
self.stream.prepare(
prompt=self.prompt,
negative_prompt=self.negative_prompt,
num_inference_steps=self.num_inference_steps,
guidance_scale=self.guidance_scale,
delta=self.delta,
seed=self.seed,
)
if prompt is not None:
self.stream.update_prompt(prompt,negative_prompt)
self.batch_size = (
len(self.t_index_list) * self.frame_buffer_size
if use_denoising_batch
else self.frame_buffer_size
)
if self.frame_buffer_size==1:
for _ in range(self.batch_size):
self.stream.sample(image)
image_tensor = self.stream.sample(image) | image = postprocess_image(image_tensor.cpu(), output_type=self.output_type) | 1 | 2023-12-29 09:00:03+00:00 | 12k |
neobundy/MLX-Stable-Diffusion-WebUI | stable_diffusion/model_io.py | [
{
"identifier": "CLIPTextModel",
"path": "stable_diffusion/clip.py",
"snippet": "class CLIPTextModel(nn.Module):\n \"\"\"Implements the text encoder transformer from CLIP.\"\"\"\n\n def __init__(self, config: CLIPTextModelConfig):\n super().__init__()\n\n self.token_embedding = nn.Embedding(config.vocab_size, config.model_dims)\n self.position_embedding = nn.Embedding(config.max_length, config.model_dims)\n self.layers = [\n CLIPEncoderLayer(config.model_dims, config.num_heads)\n for i in range(config.num_layers)\n ]\n self.final_layer_norm = nn.LayerNorm(config.model_dims)\n\n def __call__(self, x):\n # Extract some shapes\n B, N = x.shape\n\n # Compute the embeddings\n x = self.token_embedding(x)\n x = x + self.position_embedding.weight[:N]\n\n # Compute the features from the transformer\n mask = nn.MultiHeadAttention.create_additive_causal_mask(N, x.dtype)\n for l in self.layers:\n x = l(x, mask)\n\n # Apply the final layernorm and return\n return self.final_layer_norm(x)"
},
{
"identifier": "AutoencoderConfig",
"path": "stable_diffusion/config.py",
"snippet": "class AutoencoderConfig(BaseConfig):\n in_channels: int = 3\n out_channels: int = 3\n latent_channels_out: int = 8\n latent_channels_in: int = 4\n block_out_channels: Tuple[int] = (128, 256, 512, 512)\n layers_per_block: int = 2\n norm_num_groups: int = 32\n scaling_factor: float = 0.18215"
},
{
"identifier": "CLIPTextModelConfig",
"path": "stable_diffusion/config.py",
"snippet": "class CLIPTextModelConfig(BaseConfig):\n num_layers: int = 23\n model_dims: int = 1024\n num_heads: int = 16\n max_length: int = 77\n vocab_size: int = 49408"
},
{
"identifier": "DiffusionConfig",
"path": "stable_diffusion/config.py",
"snippet": "class DiffusionConfig(BaseConfig):\n beta_schedule: str = \"scaled_linear\"\n beta_start: float = 0.00085\n beta_end: float = 0.012\n num_train_steps: int = 1000"
},
{
"identifier": "UNetConfig",
"path": "stable_diffusion/config.py",
"snippet": "class UNetConfig(BaseConfig):\n in_channels: int = 4\n out_channels: int = 4\n conv_in_kernel: int = 3\n conv_out_kernel: int = 3\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280)\n layers_per_block: Tuple[int] = (2, 2, 2, 2)\n mid_block_layers: int = 2\n transformer_layers_per_block: Tuple[int] = (1, 1, 1, 1)\n num_attention_heads: Tuple[int] = (5, 10, 20, 20)\n cross_attention_dim: Tuple[int] = (1024,) * 4\n norm_num_groups: int = 32"
},
{
"identifier": "Tokenizer",
"path": "stable_diffusion/tokenizer.py",
"snippet": "class Tokenizer:\n \"\"\"A simple port of CLIPTokenizer from https://github.com/huggingface/transformers/ .\"\"\"\n\n def __init__(self, bpe_ranks, vocab):\n self.bpe_ranks = bpe_ranks\n self.vocab = vocab\n self.pat = regex.compile(\n r\"\"\"<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+\"\"\",\n regex.IGNORECASE,\n )\n\n self._cache = {self.bos: self.bos, self.eos: self.eos}\n\n @property\n def bos(self):\n return \"<|startoftext|>\"\n\n @property\n def bos_token(self):\n return self.vocab[self.bos]\n\n @property\n def eos(self):\n return \"<|endoftext|>\"\n\n @property\n def eos_token(self):\n return self.vocab[self.eos]\n\n def bpe(self, text):\n if text in self._cache:\n return self._cache[text]\n\n unigrams = list(text[:-1]) + [text[-1] + \"</w>\"]\n unique_bigrams = set(zip(unigrams, unigrams[1:]))\n\n if not unique_bigrams:\n return unigrams\n\n # In every iteration try to merge the two most likely bigrams. If none\n # was merged we are done.\n #\n # Ported from https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/tokenization_clip.py\n while unique_bigrams:\n bigram = min(\n unique_bigrams, key=lambda pair: self.bpe_ranks.get(pair, float(\"inf\"))\n )\n if bigram not in self.bpe_ranks:\n break\n\n new_unigrams = []\n skip = False\n for a, b in zip(unigrams, unigrams[1:]):\n if skip:\n skip = False\n continue\n\n if (a, b) == bigram:\n new_unigrams.append(a + b)\n skip = True\n\n else:\n new_unigrams.append(a)\n\n if not skip:\n new_unigrams.append(b)\n\n unigrams = new_unigrams\n unique_bigrams = set(zip(unigrams, unigrams[1:]))\n\n self._cache[text] = unigrams\n\n return unigrams\n\n def tokenize(self, text, prepend_bos=True, append_eos=True):\n if isinstance(text, list):\n return [self.tokenize(t, prepend_bos, append_eos) for t in text]\n\n # Lower case cleanup and split according to self.pat. Hugging Face does\n # a much more thorough job here but this should suffice for 95% of\n # cases.\n clean_text = regex.sub(r\"\\s+\", \" \", text.lower())\n tokens = regex.findall(self.pat, clean_text)\n\n # Split the tokens according to the byte-pair merge file\n bpe_tokens = [ti for t in tokens for ti in self.bpe(t)]\n\n # Map to token ids and return\n tokens = [self.vocab[t] for t in bpe_tokens]\n if prepend_bos:\n tokens = [self.bos_token] + tokens\n if append_eos:\n tokens.append(self.eos_token)\n\n return tokens"
},
{
"identifier": "UNetModel",
"path": "stable_diffusion/unet.py",
"snippet": "class UNetModel(nn.Module):\n \"\"\"The conditional 2D UNet model that actually performs the denoising.\"\"\"\n\n def __init__(self, config: UNetConfig):\n super().__init__()\n\n self.conv_in = nn.Conv2d(\n config.in_channels,\n config.block_out_channels[0],\n config.conv_in_kernel,\n padding=(config.conv_in_kernel - 1) // 2,\n )\n\n # Generate sinusoidal positional encodings.\n # These encodings are used in transformer models to provide information about the position of the elements in the sequence.\n self.timesteps = nn.SinusoidalPositionalEncoding(\n config.block_out_channels[0],\n max_freq=1,\n min_freq=math.exp(\n -math.log(10000) + 2 * math.log(10000) / config.block_out_channels[0]\n ),\n scale=1.0,\n cos_first=True,\n full_turns=False,\n )\n self.time_embedding = TimestepEmbedding(\n config.block_out_channels[0],\n config.block_out_channels[0] * 4,\n )\n\n # Make the downsampling blocks\n block_channels = [config.block_out_channels[0]] + list(\n config.block_out_channels\n )\n self.down_blocks = [\n UNetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=config.block_out_channels[0] * 4,\n num_layers=config.layers_per_block[i],\n transformer_layers_per_block=config.transformer_layers_per_block[i],\n num_attention_heads=config.num_attention_heads[i],\n cross_attention_dim=config.cross_attention_dim[i],\n resnet_groups=config.norm_num_groups,\n add_downsample=(i < len(config.block_out_channels) - 1),\n add_upsample=False,\n add_cross_attention=(i < len(config.block_out_channels) - 1),\n )\n for i, (in_channels, out_channels) in enumerate(\n zip(block_channels, block_channels[1:])\n )\n ]\n\n # Make the middle block\n self.mid_blocks = [\n ResnetBlock2D(\n in_channels=config.block_out_channels[-1],\n out_channels=config.block_out_channels[-1],\n temb_channels=config.block_out_channels[0] * 4,\n groups=config.norm_num_groups,\n ),\n Transformer2D(\n in_channels=config.block_out_channels[-1],\n model_dims=config.block_out_channels[-1],\n num_heads=config.num_attention_heads[-1],\n num_layers=config.transformer_layers_per_block[-1],\n encoder_dims=config.cross_attention_dim[-1],\n ),\n ResnetBlock2D(\n in_channels=config.block_out_channels[-1],\n out_channels=config.block_out_channels[-1],\n temb_channels=config.block_out_channels[0] * 4,\n groups=config.norm_num_groups,\n ),\n ]\n\n # Make the upsampling blocks\n block_channels = (\n [config.block_out_channels[0]]\n + list(config.block_out_channels)\n + [config.block_out_channels[-1]]\n )\n self.up_blocks = [\n UNetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=config.block_out_channels[0] * 4,\n prev_out_channels=prev_out_channels,\n num_layers=config.layers_per_block[i] + 1,\n transformer_layers_per_block=config.transformer_layers_per_block[i],\n num_attention_heads=config.num_attention_heads[i],\n cross_attention_dim=config.cross_attention_dim[i],\n resnet_groups=config.norm_num_groups,\n add_downsample=False,\n add_upsample=(i > 0),\n add_cross_attention=(i < len(config.block_out_channels) - 1),\n )\n for i, (in_channels, out_channels, prev_out_channels) in reversed(\n list(\n enumerate(\n zip(block_channels, block_channels[1:], block_channels[2:])\n )\n )\n )\n ]\n\n self.conv_norm_out = nn.GroupNorm(\n config.norm_num_groups,\n config.block_out_channels[0],\n pytorch_compatible=True,\n )\n self.conv_out = nn.Conv2d(\n config.block_out_channels[0],\n config.out_channels,\n config.conv_out_kernel,\n padding=(config.conv_out_kernel - 1) // 2,\n )\n\n def __call__(self, x, timestep, encoder_x, attn_mask=None, encoder_attn_mask=None):\n\n # Get the sinusoidal positional encoding for the given timestep.\n # The self.timesteps object is an instance of the nn.SinusoidalPositionalEncoding class, which generates sinusoidal positional encodings.\n # These encodings are used in transformer models to provide information about the position of the elements in the sequence.\n # The astype(x.dtype) part is ensuring that the positional encoding has the same data type as the input tensor x.\n\n temb = self.timesteps(timestep).astype(x.dtype)\n temb = self.time_embedding(temb)\n\n # Preprocess the input\n x = self.conv_in(x)\n\n # Run the downsampling part of the unet\n residuals = [x]\n for block in self.down_blocks:\n x, res = block(\n x,\n encoder_x=encoder_x,\n temb=temb,\n attn_mask=attn_mask,\n encoder_attn_mask=encoder_attn_mask,\n )\n residuals.extend(res)\n\n # Run the middle part of the unet\n x = self.mid_blocks[0](x, temb)\n x = self.mid_blocks[1](x, encoder_x, attn_mask, encoder_attn_mask)\n x = self.mid_blocks[2](x, temb)\n\n # Run the upsampling part of the unet\n for block in self.up_blocks:\n x, _ = block(\n x,\n encoder_x=encoder_x,\n temb=temb,\n attn_mask=attn_mask,\n encoder_attn_mask=encoder_attn_mask,\n residual_hidden_states=residuals,\n )\n\n # Postprocess the output\n x = self.conv_norm_out(x)\n x = nn.silu(x)\n x = self.conv_out(x)\n\n return x"
},
{
"identifier": "Autoencoder",
"path": "stable_diffusion/vae.py",
"snippet": "class Autoencoder(nn.Module):\n \"\"\"The autoencoder that allows us to perform diffusion in the latent space.\"\"\"\n\n def __init__(self, config: AutoencoderConfig):\n super().__init__()\n\n self.latent_channels = config.latent_channels_in\n self.scaling_factor = config.scaling_factor\n self.encoder = Encoder(\n config.in_channels,\n config.latent_channels_out,\n config.block_out_channels,\n config.layers_per_block,\n resnet_groups=config.norm_num_groups,\n )\n self.decoder = Decoder(\n config.latent_channels_in,\n config.out_channels,\n config.block_out_channels,\n config.layers_per_block + 1,\n resnet_groups=config.norm_num_groups,\n )\n\n self.quant_proj = nn.Linear(\n config.latent_channels_out, config.latent_channels_out\n )\n self.post_quant_proj = nn.Linear(\n config.latent_channels_in, config.latent_channels_in\n )\n\n def encode(self, x):\n x = self.encoder(x)\n\n # This line applies the linear transformation to the tensor x.\n # The purpose of this operation is to transform the features extracted by the encoder into a form suitable for quantization.\n # In this case, the transformation doesn't change the dimensionality of the data (as both input and output dimensions are config.latent_channels_out),\n # but it can still learn to make the data more suitable for the subsequent operations (like splitting into mean and logvar).\n # The term \"projection\" in quant_proj refers to the operation of applying a linear transformation to the data,\n # which can be thought of as \"projecting\" the data onto a different subspace. This is a common operation in machine learning models,\n # and it is used here to transform the data into a form that is suitable for the subsequent operations in the VAE.\n x = self.quant_proj(x)\n\n # two tensors of size (B, C, H, W) where C = latent_channels_in\n mean, logvar = x.split(2, axis=-1)\n mean = mean * self.scaling_factor\n logvar = logvar + 2 * math.log(self.scaling_factor)\n\n return mean, logvar\n\n def decode(self, z):\n z = z / self.scaling_factor\n return self.decoder(self.post_quant_proj(z))\n\n def __call__(self, x, key=None):\n mean, logvar = self.encode(x)\n z = mx.random.normal(mean.shape, key=key) * mx.exp(0.5 * logvar) + mean\n x_hat = self.decode(z)\n\n return dict(x_hat=x_hat, z=z, mean=mean, logvar=logvar)"
},
{
"identifier": "_DEFAULT_MODEL",
"path": "stable_diffusion/models.py",
"snippet": "_DEFAULT_MODEL = _AVAILABLE_MODELS[0]"
},
{
"identifier": "_MODELS",
"path": "stable_diffusion/models.py",
"snippet": "_MODELS = {model: generate_model_dict() for model in _AVAILABLE_MODELS}"
},
{
"identifier": "DiffuserModelPathConfig",
"path": "stable_diffusion/config.py",
"snippet": "class DiffuserModelPathConfig:\n def __init__(self, model_path: str = \"./diffuser_models\"):\n self.model_path = model_path\n\n @property\n def unet_config(self):\n return self.model_path + \"/unet/config.json\"\n\n @property\n def unet(self):\n return self.model_path + \"/unet/diffusion_pytorch_model.safetensors\"\n\n @property\n def scheduler(self):\n return self.model_path + \"/scheduler/scheduler_config.json\"\n\n @property\n def text_encoder_config(self):\n return self.model_path + \"/text_encoder/config.json\"\n\n @property\n def text_encoder(self):\n return self.model_path + \"/text_encoder/model.safetensors\"\n\n @property\n def vae_config(self):\n return self.model_path + \"/vae/config.json\"\n\n @property\n def vae(self):\n return self.model_path + \"/vae/diffusion_pytorch_model.safetensors\"\n\n @property\n def diffusion_config(self):\n return self.model_path + \"/scheduler/scheduler_config.json\"\n\n @property\n def tokenizer_vocab(self):\n return self.model_path + \"/tokenizer/vocab.json\"\n\n @property\n def tokenizer_merges(self):\n return self.model_path + \"/tokenizer/merges.txt\""
}
] | from typing import Optional
from functools import partial
from huggingface_hub import hf_hub_download
from mlx.utils import tree_unflatten
from safetensors import safe_open as safetensor_open
from .clip import CLIPTextModel
from .config import AutoencoderConfig, CLIPTextModelConfig, DiffusionConfig, UNetConfig
from .tokenizer import Tokenizer
from .unet import UNetModel
from .vae import Autoencoder
from .models import _DEFAULT_MODEL, _MODELS
from .config import DiffuserModelPathConfig
from tqdm import tqdm
import json
import mlx.core as mx
import numpy as np | 7,393 | key = key.replace("mid_block.resnets.0", "mid_blocks.0")
_debug_print(f"Replaced 'mid_block.resnets.0' with 'mid_blocks.0' in {key}")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
_debug_print(f"Replaced 'mid_block.attentions.0' with 'mid_blocks.1' in {key}")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
_debug_print(f"Replaced 'mid_block.resnets.1' with 'mid_blocks.2' in {key}")
# Map the quant/post_quant layers
if "quant_conv" in key:
key = key.replace("quant_conv", "quant_proj")
value = value.squeeze()
_debug_print(f"Replaced 'quant_conv' with 'quant_proj' and squeezed value in {key}")
# Map the conv_shortcut to linear
if "conv_shortcut.weight" in key:
value = value.squeeze()
_debug_print(f"Squeezed 'conv_shortcut.weight' in {key}")
# Rearrange the dimensions to [B, H, W, C] - Autoencoder expects: B, H, W, C = x.shape
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
_debug_print(f"Transposed dimensions in {key}")
return [(key, _from_numpy(value))]
def _flatten(params):
return [(k, v) for p in params for (k, v) in p]
# The weights of the model can be loaded as 16-bit floating point numbers, which is a form of quantization known as half-precision floating point.
# This can reduce the memory requirements of the model by half compared to 32-bit floating point numbers, at the cost of reduced numerical precision.
def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):
dtype = np.float16 if float16 else np.float32
_debug_print(f"Loading weights from {weight_file}")
with safetensor_open(weight_file, framework="numpy") as f:
keys = list(f.keys())
weights = _flatten([mapper(k, f.get_tensor(k).astype(dtype)) for k in tqdm(keys, desc=f"Loading weights from {weight_file}...")])
model.update(tree_unflatten(weights))
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion UNet from Hugging Face Hub."""
_check_key(key, "load_unet")
# Download the config and create the model
unet_config = _MODELS[key]["unet_config"]
with open(hf_hub_download(key, unet_config)) as f:
config = json.load(f)
n_blocks = len(config["block_out_channels"])
model = UNetModel(
UNetConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=[config["layers_per_block"]] * n_blocks,
num_attention_heads=[config["attention_head_dim"]] * n_blocks
if isinstance(config["attention_head_dim"], int)
else config["attention_head_dim"],
cross_attention_dim=[config["cross_attention_dim"]] * n_blocks,
norm_num_groups=config["norm_num_groups"],
)
)
# Download the weights and map them into the model
unet_weights = _MODELS[key]["unet"]
weight_file = hf_hub_download(key, unet_weights)
_load_safetensor_weights(map_unet_weights, model, weight_file, float16)
return model
def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion text encoder from Hugging Face Hub."""
_check_key(key, "load_text_encoder")
# Download the config and create the model
text_encoder_config = _MODELS[key]["text_encoder_config"]
with open(hf_hub_download(key, text_encoder_config)) as f:
config = json.load(f)
model = CLIPTextModel(
CLIPTextModelConfig(
num_layers=config["num_hidden_layers"],
model_dims=config["hidden_size"],
num_heads=config["num_attention_heads"],
max_length=config["max_position_embeddings"],
vocab_size=config["vocab_size"],
)
)
# Download the weights and map them into the model
text_encoder_weights = _MODELS[key]["text_encoder"]
weight_file = hf_hub_download(key, text_encoder_weights)
_load_safetensor_weights(map_clip_text_encoder_weights, model, weight_file, float16)
return model
def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion autoencoder from Hugging Face Hub."""
_check_key(key, "load_autoencoder")
# Download the config and create the model
vae_config = _MODELS[key]["vae_config"]
with open(hf_hub_download(key, vae_config)) as f:
config = json.load(f)
| # Copyright © 2023 Apple Inc.
logfile = 'log.txt'
_DEBUG = False
def _debug_print(*args, **kwargs):
if _DEBUG:
# Convert the arguments to a string
message = ' '.join(map(str, args))
# Print the message to the console
print(message, **kwargs)
# Open the log file in append mode and write the message
with open(logfile, 'a') as f:
f.write(message + '\n')
def _from_numpy(x):
return mx.array(np.ascontiguousarray(x))
# The `map_*_weights` functions are used to adjust the weights of a model when loading it from a file.
# The weights of the model in the file might be in a different format than the weights of the model in the current codebase.
# When you load a pre-trained model, the weights are stored in a dictionary where the keys are the names of the parameters in the model.
# If the architecture of your model is different from the architecture of the model that the weights were trained on, you might need to adjust the keys and/or the weights to match your model's architecture.
# This is what the `map_*_weights` functions are doing. They are adjusting the keys and the weights to match the architecture of the models in the current codebase.
def map_unet_weights(key, value):
# Map up/downsampling
if "downsamplers" in key:
key = key.replace("downsamplers.0.conv", "downsample")
_debug_print(f"Replaced 'downsamplers.0.conv' with 'downsample' in {key}")
if "upsamplers" in key:
key = key.replace("upsamplers.0.conv", "upsample")
_debug_print(f"Replaced 'upsamplers.0.conv' with 'upsample' in {key}")
# Map the mid block
if "mid_block.resnets.0" in key:
key = key.replace("mid_block.resnets.0", "mid_blocks.0")
_debug_print(f"Replaced 'mid_block.resnets.0' with 'mid_blocks.0' in {key}")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
_debug_print(f"Replaced 'mid_block.attentions.0' with 'mid_blocks.1' in {key}")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
_debug_print(f"Replaced 'mid_block.resnets.1' with 'mid_blocks.2' in {key}")
# Map attention layers
if "to_k" in key:
key = key.replace("to_k", "key_proj")
_debug_print(f"Replaced 'to_k' with 'key_proj' in {key}")
if "to_out.0" in key:
key = key.replace("to_out.0", "out_proj")
_debug_print(f"Replaced 'to_out.0' with 'out_proj' in {key}")
if "to_q" in key:
key = key.replace("to_q", "query_proj")
_debug_print(f"Replaced 'to_q' with 'query_proj' in {key}")
if "to_v" in key:
key = key.replace("to_v", "value_proj")
_debug_print(f"Replaced 'to_v' with 'value_proj' in {key}")
# Map transformer ffn
if "ff.net.2" in key:
key = key.replace("ff.net.2", "linear3")
_debug_print(f"Replaced 'ff.net.2' with 'linear3' in {key}")
if "ff.net.0" in key:
k1 = key.replace("ff.net.0.proj", "linear1")
k2 = key.replace("ff.net.0.proj", "linear2")
v1, v2 = np.split(value, 2)
_debug_print(f"Replaced 'ff.net.0.proj' with 'linear1' and 'linear2' in {key}")
return [(k1, _from_numpy(v1)), (k2, _from_numpy(v2))]
# The weights of this 1x1 convolutional layer would be a 4-dimensional tensor
# with shape [out_channels, in_channels, 1, 1].
# The squeeze() function is used to remove the dimensions of size 1 from this tensor,
# converting it to a 2-dimensional tensor with shape [out_channels, in_channels].
# This is because the corresponding layer in the current model might be a linear layer
# rather than a convolutional layer, and the weights for a linear layer are expected to be a 2-dimensional tensor.
if "conv_shortcut.weight" in key:
value = value.squeeze()
_debug_print(f"Squeezed 'conv_shortcut.weight' in {key}")
# Transform the weights from 1x1 convs to linear
if len(value.shape) == 4 and ("proj_in" in key or "proj_out" in key):
value = value.squeeze()
_debug_print(f"Squeezed 'proj_in' or 'proj_out' in {key}")
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
_debug_print(f"Transposed dimensions in {key}")
return [(key, _from_numpy(value))]
def map_clip_text_encoder_weights(key, value):
# Remove prefixes
if key.startswith("text_model."):
key = key[11:]
_debug_print(f"Removed 'text_model.' prefix from {key}")
if key.startswith("embeddings."):
key = key[11:]
_debug_print(f"Removed 'embeddings.' prefix from {key}")
if key.startswith("encoder."):
key = key[8:]
_debug_print(f"Removed 'encoder.' prefix from {key}")
# Map attention layers
if "self_attn." in key:
key = key.replace("self_attn.", "attention.")
_debug_print(f"Replaced 'self_attn.' with 'attention.' in {key}")
if "q_proj." in key:
key = key.replace("q_proj.", "query_proj.")
_debug_print(f"Replaced 'q_proj.' with 'query_proj.' in {key}")
if "k_proj." in key:
key = key.replace("k_proj.", "key_proj.")
_debug_print(f"Replaced 'k_proj.' with 'key_proj.' in {key}")
if "v_proj." in key:
key = key.replace("v_proj.", "value_proj.")
_debug_print(f"Replaced 'v_proj.' with 'value_proj.' in {key}")
# Map ffn layers
if "mlp.fc1" in key:
key = key.replace("mlp.fc1", "linear1")
_debug_print(f"Replaced 'mlp.fc1' with 'linear1' in {key}")
if "mlp.fc2" in key:
key = key.replace("mlp.fc2", "linear2")
_debug_print(f"Replaced 'mlp.fc2' with 'linear2' in {key}")
return [(key, _from_numpy(value))]
def map_vae_weights(key, value):
# Map up/downsampling
if "downsamplers" in key:
key = key.replace("downsamplers.0.conv", "downsample")
_debug_print(f"Replaced 'downsamplers.0.conv' with 'downsample' in {key}")
if "upsamplers" in key:
key = key.replace("upsamplers.0.conv", "upsample")
_debug_print(f"Replaced 'upsamplers.0.conv' with 'upsample' in {key}")
# Map attention layers
if "to_k" in key:
key = key.replace("to_k", "key_proj")
_debug_print(f"Replaced 'to_k' with 'key_proj' in {key}")
if "to_out.0" in key:
key = key.replace("to_out.0", "out_proj")
_debug_print(f"Replaced 'to_out.0' with 'out_proj' in {key}")
if "to_q" in key:
key = key.replace("to_q", "query_proj")
_debug_print(f"Replaced 'to_q' with 'query_proj' in {key}")
if "to_v" in key:
key = key.replace("to_v", "value_proj")
_debug_print(f"Replaced 'to_v' with 'value_proj' in {key}")
# Map the mid block
if "mid_block.resnets.0" in key:
key = key.replace("mid_block.resnets.0", "mid_blocks.0")
_debug_print(f"Replaced 'mid_block.resnets.0' with 'mid_blocks.0' in {key}")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
_debug_print(f"Replaced 'mid_block.attentions.0' with 'mid_blocks.1' in {key}")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
_debug_print(f"Replaced 'mid_block.resnets.1' with 'mid_blocks.2' in {key}")
# Map the quant/post_quant layers
if "quant_conv" in key:
key = key.replace("quant_conv", "quant_proj")
value = value.squeeze()
_debug_print(f"Replaced 'quant_conv' with 'quant_proj' and squeezed value in {key}")
# Map the conv_shortcut to linear
if "conv_shortcut.weight" in key:
value = value.squeeze()
_debug_print(f"Squeezed 'conv_shortcut.weight' in {key}")
# Rearrange the dimensions to [B, H, W, C] - Autoencoder expects: B, H, W, C = x.shape
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
_debug_print(f"Transposed dimensions in {key}")
return [(key, _from_numpy(value))]
def _flatten(params):
return [(k, v) for p in params for (k, v) in p]
# The weights of the model can be loaded as 16-bit floating point numbers, which is a form of quantization known as half-precision floating point.
# This can reduce the memory requirements of the model by half compared to 32-bit floating point numbers, at the cost of reduced numerical precision.
def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):
dtype = np.float16 if float16 else np.float32
_debug_print(f"Loading weights from {weight_file}")
with safetensor_open(weight_file, framework="numpy") as f:
keys = list(f.keys())
weights = _flatten([mapper(k, f.get_tensor(k).astype(dtype)) for k in tqdm(keys, desc=f"Loading weights from {weight_file}...")])
model.update(tree_unflatten(weights))
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion UNet from Hugging Face Hub."""
_check_key(key, "load_unet")
# Download the config and create the model
unet_config = _MODELS[key]["unet_config"]
with open(hf_hub_download(key, unet_config)) as f:
config = json.load(f)
n_blocks = len(config["block_out_channels"])
model = UNetModel(
UNetConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=[config["layers_per_block"]] * n_blocks,
num_attention_heads=[config["attention_head_dim"]] * n_blocks
if isinstance(config["attention_head_dim"], int)
else config["attention_head_dim"],
cross_attention_dim=[config["cross_attention_dim"]] * n_blocks,
norm_num_groups=config["norm_num_groups"],
)
)
# Download the weights and map them into the model
unet_weights = _MODELS[key]["unet"]
weight_file = hf_hub_download(key, unet_weights)
_load_safetensor_weights(map_unet_weights, model, weight_file, float16)
return model
def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion text encoder from Hugging Face Hub."""
_check_key(key, "load_text_encoder")
# Download the config and create the model
text_encoder_config = _MODELS[key]["text_encoder_config"]
with open(hf_hub_download(key, text_encoder_config)) as f:
config = json.load(f)
model = CLIPTextModel(
CLIPTextModelConfig(
num_layers=config["num_hidden_layers"],
model_dims=config["hidden_size"],
num_heads=config["num_attention_heads"],
max_length=config["max_position_embeddings"],
vocab_size=config["vocab_size"],
)
)
# Download the weights and map them into the model
text_encoder_weights = _MODELS[key]["text_encoder"]
weight_file = hf_hub_download(key, text_encoder_weights)
_load_safetensor_weights(map_clip_text_encoder_weights, model, weight_file, float16)
return model
def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion autoencoder from Hugging Face Hub."""
_check_key(key, "load_autoencoder")
# Download the config and create the model
vae_config = _MODELS[key]["vae_config"]
with open(hf_hub_download(key, vae_config)) as f:
config = json.load(f)
| model = Autoencoder( | 7 | 2023-12-25 05:49:34+00:00 | 12k |
Con6924/SPM | train_spm_xl.py | [
{
"identifier": "SPMNetwork",
"path": "src/models/spm.py",
"snippet": "class SPMNetwork(nn.Module):\n UNET_TARGET_REPLACE_MODULE_TRANSFORMER = [\n \"Transformer2DModel\",\n ]\n UNET_TARGET_REPLACE_MODULE_CONV = [\n \"ResnetBlock2D\",\n \"Downsample2D\",\n \"Upsample2D\",\n ]\n\n SPM_PREFIX_UNET = \"lora_unet\" # aligning with SD webui usage\n DEFAULT_TARGET_REPLACE = UNET_TARGET_REPLACE_MODULE_TRANSFORMER\n\n def __init__(\n self,\n unet: UNet2DConditionModel,\n rank: int = 4,\n multiplier: float = 1.0,\n alpha: float = 1.0,\n module = SPMLayer,\n module_kwargs = None,\n ) -> None:\n super().__init__()\n\n self.multiplier = multiplier\n self.dim = rank\n self.alpha = alpha\n\n self.module = module\n self.module_kwargs = module_kwargs or {}\n\n # unet spm\n self.unet_spm_layers = self.create_modules(\n SPMNetwork.SPM_PREFIX_UNET,\n unet,\n SPMNetwork.DEFAULT_TARGET_REPLACE,\n self.dim,\n self.multiplier,\n )\n print(f\"Create SPM for U-Net: {len(self.unet_spm_layers)} modules.\")\n\n spm_names = set()\n for spm_layer in self.unet_spm_layers:\n assert (\n spm_layer.spm_name not in spm_names\n ), f\"duplicated SPM layer name: {spm_layer.spm_name}. {spm_names}\"\n spm_names.add(spm_layer.spm_name)\n\n for spm_layer in self.unet_spm_layers:\n spm_layer.apply_to()\n self.add_module(\n spm_layer.spm_name,\n spm_layer,\n )\n\n del unet\n\n torch.cuda.empty_cache()\n\n def create_modules(\n self,\n prefix: str,\n root_module: nn.Module,\n target_replace_modules: List[str],\n rank: int,\n multiplier: float,\n ) -> list:\n spm_layers = []\n\n for name, module in root_module.named_modules():\n if module.__class__.__name__ in target_replace_modules:\n for child_name, child_module in module.named_modules():\n if child_module.__class__.__name__ in [\"Linear\", \"Conv2d\"]:\n spm_name = prefix + \".\" + name + \".\" + child_name\n spm_name = spm_name.replace(\".\", \"_\")\n print(f\"{spm_name}\")\n spm_layer = self.module(\n spm_name, child_module, multiplier, rank, self.alpha, **self.module_kwargs\n )\n spm_layers.append(spm_layer)\n\n return spm_layers\n\n def prepare_optimizer_params(self, text_encoder_lr, unet_lr, default_lr):\n all_params = []\n\n if self.unet_spm_layers:\n params = []\n [params.extend(spm_layer.parameters()) for spm_layer in self.unet_spm_layers]\n param_data = {\"params\": params}\n if default_lr is not None:\n param_data[\"lr\"] = default_lr\n all_params.append(param_data)\n\n return all_params\n\n def save_weights(self, file, dtype=None, metadata: Optional[dict] = None):\n state_dict = self.state_dict()\n\n if dtype is not None:\n for key in list(state_dict.keys()):\n v = state_dict[key]\n v = v.detach().clone().to(\"cpu\").to(dtype)\n state_dict[key] = v\n\n for key in list(state_dict.keys()):\n if not key.startswith(\"lora\"):\n del state_dict[key]\n\n if os.path.splitext(file)[1] == \".safetensors\":\n save_file(state_dict, file, metadata)\n else:\n torch.save(state_dict, file)\n\n def __enter__(self):\n for spm_layer in self.unet_spm_layers:\n spm_layer.multiplier = 1.0\n\n def __exit__(self, exc_type, exc_value, tb):\n for spm_layer in self.unet_spm_layers:\n spm_layer.multiplier = 0"
},
{
"identifier": "SPMLayer",
"path": "src/models/spm.py",
"snippet": "class SPMLayer(nn.Module):\n \"\"\"\n replaces forward method of the original Linear, instead of replacing the original Linear module.\n \"\"\"\n\n def __init__(\n self,\n spm_name,\n org_module: nn.Module,\n multiplier=1.0,\n dim=4,\n alpha=1,\n ):\n \"\"\"if alpha == 0 or None, alpha is rank (no scaling).\"\"\"\n super().__init__()\n self.spm_name = spm_name\n self.dim = dim\n\n if org_module.__class__.__name__ == \"Linear\":\n in_dim = org_module.in_features\n out_dim = org_module.out_features\n self.lora_down = nn.Linear(in_dim, dim, bias=False)\n self.lora_up = nn.Linear(dim, out_dim, bias=False)\n\n elif org_module.__class__.__name__ == \"Conv2d\":\n in_dim = org_module.in_channels\n out_dim = org_module.out_channels\n\n self.dim = min(self.dim, in_dim, out_dim)\n if self.dim != dim:\n print(f\"{spm_name} dim (rank) is changed to: {self.dim}\")\n\n kernel_size = org_module.kernel_size\n stride = org_module.stride\n padding = org_module.padding\n self.lora_down = nn.Conv2d(\n in_dim, self.dim, kernel_size, stride, padding, bias=False\n )\n self.lora_up = nn.Conv2d(self.dim, out_dim, (1, 1), (1, 1), bias=False)\n\n if type(alpha) == torch.Tensor:\n alpha = alpha.detach().numpy()\n alpha = dim if alpha is None or alpha == 0 else alpha\n self.scale = alpha / self.dim\n self.register_buffer(\"alpha\", torch.tensor(alpha))\n\n # same as microsoft's\n nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))\n nn.init.zeros_(self.lora_up.weight)\n\n self.multiplier = multiplier\n self.org_module = org_module # remove in applying\n\n def apply_to(self):\n self.org_forward = self.org_module.forward\n self.org_module.forward = self.forward\n del self.org_module\n\n def forward(self, x):\n return (\n self.org_forward(x)\n + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale\n )"
},
{
"identifier": "sample_xl",
"path": "src/engine/sampling.py",
"snippet": "def sample_xl(prompt_pair: PromptEmbedsPair, tokenizers=None, text_encoders=None):\n res = []\n for unconditional, target in zip(\n [prompt_pair.unconditional.text_embeds, prompt_pair.unconditional.pooled_embeds],\n [prompt_pair.target.text_embeds, prompt_pair.target.pooled_embeds]\n ):\n samples = []\n while len(samples) < prompt_pair.sampling_batch_size:\n while True:\n # sample from gaussian distribution\n noise = torch.randn_like(target)\n # normalize the noise\n noise = noise / noise.view(-1).norm(dim=-1)\n # compute the similarity\n sim = torch.cosine_similarity(target.view(-1), noise.view(-1), dim=-1)\n # the possibility of accepting the sample = 1 - sim\n if random.random() < 1 - sim:\n break\n scale = random.random() * 0.4 + 0.8\n sample = scale * noise * target.view(-1).norm(dim=-1)\n samples.append(sample)\n \n samples = [torch.cat([unconditional, s]) for s in samples]\n samples = torch.cat(samples, dim=0)\n res.append(samples)\n \n return res"
},
{
"identifier": "model_util",
"path": "src/models/model_util.py",
"snippet": "TOKENIZER_V1_MODEL_NAME = \"CompVis/stable-diffusion-v1-4\"\nTOKENIZER_V2_MODEL_NAME = \"stabilityai/stable-diffusion-2-1\"\nAVAILABLE_SCHEDULERS = Literal[\"ddim\", \"ddpm\", \"lms\", \"euler_a\"]\nSDXL_TEXT_ENCODER_TYPE = Union[CLIPTextModel, CLIPTextModelWithProjection]\nDIFFUSERS_CACHE_DIR = \".cache/\" # if you want to change the cache dir, change this\nLOCAL_ONLY = False # if you want to use only local files, change this\ndef load_diffusers_model(\n pretrained_model_name_or_path: str,\n v2: bool = False,\n clip_skip: Optional[int] = None,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel,]:\ndef load_checkpoint_model(\n checkpoint_path: str,\n v2: bool = False,\n clip_skip: Optional[int] = None,\n weight_dtype: torch.dtype = torch.float32,\n device = \"cuda\",\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, DiffusionPipeline]:\ndef load_models(\n pretrained_model_name_or_path: str,\n scheduler_name: AVAILABLE_SCHEDULERS,\n v2: bool = False,\n v_pred: bool = False,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, SchedulerMixin, DiffusionPipeline, ]:\ndef load_diffusers_model_xl(\n pretrained_model_name_or_path: str,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[list[CLIPTokenizer], list[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel,]:\ndef load_checkpoint_model_xl(\n checkpoint_path: str,\n weight_dtype: torch.dtype = torch.float32,\n device = \"cuda\",\n) -> tuple[list[CLIPTokenizer], list[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel, DiffusionPipeline, ]:\ndef load_models_xl(\n pretrained_model_name_or_path: str,\n scheduler_name: AVAILABLE_SCHEDULERS,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[\ndef create_noise_scheduler(\n scheduler_name: AVAILABLE_SCHEDULERS = \"ddpm\",\n prediction_type: Literal[\"epsilon\", \"v_prediction\"] = \"epsilon\",\n) -> SchedulerMixin:"
},
{
"identifier": "eval_util",
"path": "src/evaluation/eval_util.py",
"snippet": "def get_clip_preprocess(n_px=224):\n def Convert(image):\n def text_preprocess(text):\ndef clip_score(\n images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]],\n texts: str,\n w: float = 2.5,\n clip_model: str = \"ViT-B/32\",\n n_px: int = 224,\n cross_matching: bool = False,\n):\ndef clip_accuracy(\n images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]],\n ablated_texts: Union[List[str], str],\n anchor_texts: Union[List[str], str],\n w: float = 2.5,\n clip_model: str = \"ViT-B/32\",\n n_px: int = 224,\n):\ndef clip_eval_by_image(\n images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]],\n ablated_texts: Union[List[str], str],\n anchor_texts: Union[List[str], str],\n w: float = 2.5,\n clip_model: str = \"ViT-B/32\",\n n_px: int = 224,\n):\ndef clip_eval(\n pipe: DiffusionPipeline,\n config: RootConfig,\n w: float = 2.5,\n clip_model: str = \"ViT-B/32\",\n n_px: int = 224,\n):"
},
{
"identifier": "config",
"path": "src/configs/config.py",
"snippet": "PRECISION_TYPES = Literal[\"fp32\", \"fp16\", \"bf16\", \"float32\", \"float16\", \"bfloat16\"]\nclass PretrainedModelConfig(BaseModel):\nclass NetworkConfig(BaseModel):\nclass TrainConfig(BaseModel): \nclass SaveConfig(BaseModel):\nclass LoggingConfig(BaseModel):\nclass InferenceConfig(BaseModel):\nclass OtherConfig(BaseModel):\nclass RootConfig(BaseModel):\ndef parse_precision(precision: str) -> torch.dtype:\ndef load_config_from_yaml(config_path: str) -> RootConfig:"
},
{
"identifier": "prompt",
"path": "src/configs/prompt.py",
"snippet": "ACTION_TYPES = Literal[\n \"erase\",\n \"erase_with_la\",\n]\nPROMPT_EMBEDDING = Union[torch.FloatTensor, PromptEmbedsXL]\nclass PromptEmbedsXL:\nclass PromptEmbedsCache:\nclass PromptSettings(BaseModel): # yaml\nclass PromptEmbedsPair:\n def __init__(self, embeds) -> None:\n def __setitem__(self, __name: str, __value: PROMPT_EMBEDDING) -> None:\n def __getitem__(self, __name: str) -> Optional[PROMPT_EMBEDDING]:\n def fill_prompts(cls, values):\n def __init__(\n self,\n loss_fn: torch.nn.Module,\n target: PROMPT_EMBEDDING,\n positive: PROMPT_EMBEDDING,\n unconditional: PROMPT_EMBEDDING,\n neutral: PROMPT_EMBEDDING,\n settings: PromptSettings,\n ) -> None:\n def _prepare_embeddings(\n self, \n cache: PromptEmbedsCache,\n tokenizer: CLIPTokenizer,\n text_encoder: CLIPTextModel,\n ):\n def _erase(\n self,\n target_latents: torch.FloatTensor, # \"van gogh\"\n positive_latents: torch.FloatTensor, # \"van gogh\"\n neutral_latents: torch.FloatTensor, # \"\"\n **kwargs,\n ) -> torch.FloatTensor:\n def _erase_with_la(\n self,\n target_latents: torch.FloatTensor, # \"van gogh\"\n positive_latents: torch.FloatTensor, # \"van gogh\"\n neutral_latents: torch.FloatTensor, # \"\"\n anchor_latents: torch.FloatTensor, \n anchor_latents_ori: torch.FloatTensor, \n **kwargs,\n ):\n def loss(\n self,\n **kwargs,\n ):\ndef load_prompts_from_yaml(path: str | Path) -> list[PromptSettings]:\ndef load_prompts_from_table(path: str | Path) -> list[PromptSettings]:\ndef compute_rotation_matrix(target: torch.FloatTensor):"
},
{
"identifier": "RootConfig",
"path": "src/configs/config.py",
"snippet": "class RootConfig(BaseModel):\n prompts_file: Optional[str] = None\n \n pretrained_model: PretrainedModelConfig\n\n network: Optional[NetworkConfig] = None\n\n train: Optional[TrainConfig] = None\n\n save: Optional[SaveConfig] = None\n\n logging: Optional[LoggingConfig] = None\n\n inference: Optional[InferenceConfig] = None\n\n other: Optional[OtherConfig] = None"
},
{
"identifier": "PromptEmbedsCache",
"path": "src/configs/prompt.py",
"snippet": "class PromptEmbedsCache:\n prompts: dict[str, PROMPT_EMBEDDING] = {}\n\n def __setitem__(self, __name: str, __value: PROMPT_EMBEDDING) -> None:\n self.prompts[__name] = __value\n\n def __getitem__(self, __name: str) -> Optional[PROMPT_EMBEDDING]:\n if __name in self.prompts:\n return self.prompts[__name]\n else:\n return None"
},
{
"identifier": "PromptEmbedsPair",
"path": "src/configs/prompt.py",
"snippet": "class PromptEmbedsPair:\n target: PROMPT_EMBEDDING # the concept that do not want to generate \n positive: PROMPT_EMBEDDING # generate the concept\n unconditional: PROMPT_EMBEDDING # uncondition (default should be empty)\n neutral: PROMPT_EMBEDDING # base condition (default should be empty)\n use_template: bool = False # use clip template or not\n\n guidance_scale: float\n resolution: int\n dynamic_resolution: bool\n batch_size: int\n dynamic_crops: bool\n\n loss_fn: torch.nn.Module\n action: ACTION_TYPES\n\n def __init__(\n self,\n loss_fn: torch.nn.Module,\n target: PROMPT_EMBEDDING,\n positive: PROMPT_EMBEDDING,\n unconditional: PROMPT_EMBEDDING,\n neutral: PROMPT_EMBEDDING,\n settings: PromptSettings,\n ) -> None:\n self.loss_fn = loss_fn\n self.target = target\n self.positive = positive\n self.unconditional = unconditional\n self.neutral = neutral\n \n self.settings = settings\n\n self.use_template = settings.use_template\n self.guidance_scale = settings.guidance_scale\n self.resolution = settings.resolution\n self.dynamic_resolution = settings.dynamic_resolution\n self.batch_size = settings.batch_size\n self.dynamic_crops = settings.dynamic_crops\n self.action = settings.action\n \n self.la_strength = settings.la_strength\n self.sampling_batch_size = settings.sampling_batch_size\n \n \n def _prepare_embeddings(\n self, \n cache: PromptEmbedsCache,\n tokenizer: CLIPTokenizer,\n text_encoder: CLIPTextModel,\n ):\n \"\"\"\n Prepare embeddings for training. When use_template is True, the embeddings will be\n format using a template, and then be processed by the model.\n \"\"\"\n if not self.use_template:\n return\n template = random.choice(imagenet_templates)\n target_prompt = template.format(self.settings.target)\n if cache[target_prompt]:\n self.target = cache[target_prompt]\n else:\n self.target = encode_prompts(tokenizer, text_encoder, [target_prompt])\n \n \n def _erase(\n self,\n target_latents: torch.FloatTensor, # \"van gogh\"\n positive_latents: torch.FloatTensor, # \"van gogh\"\n neutral_latents: torch.FloatTensor, # \"\"\n **kwargs,\n ) -> torch.FloatTensor:\n \"\"\"Target latents are going not to have the positive concept.\"\"\"\n\n erase_loss = self.loss_fn(\n target_latents,\n neutral_latents\n - self.guidance_scale * (positive_latents - neutral_latents),\n )\n losses = {\n \"loss\": erase_loss,\n \"loss/erase\": erase_loss,\n }\n return losses\n \n def _erase_with_la(\n self,\n target_latents: torch.FloatTensor, # \"van gogh\"\n positive_latents: torch.FloatTensor, # \"van gogh\"\n neutral_latents: torch.FloatTensor, # \"\"\n anchor_latents: torch.FloatTensor, \n anchor_latents_ori: torch.FloatTensor, \n **kwargs,\n ):\n anchoring_loss = self.loss_fn(anchor_latents, anchor_latents_ori)\n erase_loss = self._erase(\n target_latents=target_latents,\n positive_latents=positive_latents,\n neutral_latents=neutral_latents,\n )[\"loss/erase\"]\n losses = {\n \"loss\": erase_loss + self.la_strength * anchoring_loss,\n \"loss/erase\": erase_loss,\n \"loss/anchoring\": anchoring_loss\n }\n return losses\n\n def loss(\n self,\n **kwargs,\n ):\n if self.action == \"erase\":\n return self._erase(**kwargs)\n elif self.action == \"erase_with_la\":\n return self._erase_with_la(**kwargs)\n else:\n raise ValueError(\"action must be erase or erase_with_la\")"
},
{
"identifier": "PromptSettings",
"path": "src/configs/prompt.py",
"snippet": "class PromptSettings(BaseModel): # yaml\n target: str\n positive: str = None # if None, target will be used\n unconditional: str = \"\" # default is \"\"\n neutral: str = None # if None, unconditional will be used\n action: ACTION_TYPES = \"erase\" # default is \"erase\"\n guidance_scale: float = 1.0 # default is 1.0\n resolution: int = 512 # default is 512\n dynamic_resolution: bool = False # default is False\n batch_size: int = 1 # default is 1\n dynamic_crops: bool = False # default is False. only used when model is XL\n use_template: bool = False # default is False\n \n la_strength: float = 1000.0\n sampling_batch_size: int = 4\n\n seed: int = None\n case_number: int = 0\n\n @root_validator(pre=True)\n def fill_prompts(cls, values):\n keys = values.keys()\n if \"target\" not in keys:\n raise ValueError(\"target must be specified\")\n if \"positive\" not in keys:\n values[\"positive\"] = values[\"target\"]\n if \"unconditional\" not in keys:\n values[\"unconditional\"] = \"\"\n if \"neutral\" not in keys:\n values[\"neutral\"] = values[\"unconditional\"]\n\n return values"
},
{
"identifier": "PromptEmbedsXL",
"path": "src/configs/prompt.py",
"snippet": "class PromptEmbedsXL:\n text_embeds: torch.FloatTensor\n pooled_embeds: torch.FloatTensor\n\n def __init__(self, embeds) -> None:\n self.text_embeds, self.pooled_embeds = embeds"
}
] | import argparse
import gc
import torch
import src.engine.train_util as train_util
import wandb
from pathlib import Path
from tqdm import tqdm
from src.models.spm import (
SPMNetwork,
SPMLayer,
)
from src.engine.sampling import sample_xl
from src.models import model_util
from src.evaluation import eval_util
from src.configs import config as config_pkg
from src.configs import prompt as prompt_pkg
from src.configs.config import RootConfig
from src.configs.prompt import PromptEmbedsCache, PromptEmbedsPair, PromptSettings, PromptEmbedsXL | 7,368 | positive_latents.requires_grad = False
neutral_latents.requires_grad = False
loss = prompt_pair.loss(
target_latents=target_latents,
positive_latents=positive_latents,
neutral_latents=neutral_latents,
anchor_latents=anchor_latents,
anchor_latents_ori=anchor_latents_ori,
)
loss["loss"].backward()
if config.train.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(
trainable_params, config.train.max_grad_norm, norm_type=2
)
optimizer.step()
lr_scheduler.step()
pbar.set_description(f"Loss*1k: {loss['loss'].item()*1000:.4f}")
# logging
if config.logging.use_wandb:
log_dict = {"iteration": i}
loss = {k: v.detach().cpu().item() for k, v in loss.items()}
log_dict.update(loss)
lrs = lr_scheduler.get_last_lr()
if len(lrs) == 1:
log_dict["lr"] = float(lrs[0])
else:
log_dict["lr/textencoder"] = float(lrs[0])
log_dict["lr/unet"] = float(lrs[-1])
if config.train.optimizer_type.lower().startswith("dadapt"):
log_dict["lr/d*lr"] = (
optimizer.param_groups[0]["d"] * optimizer.param_groups[0]["lr"]
)
# generate sample images
if config.logging.interval > 0 and (
i % config.logging.interval == 0 or i == config.train.iterations - 1
):
print("Generating samples...")
with network:
samples = train_util.text2img(
pipe,
prompts=config.logging.prompts,
negative_prompt=config.logging.negative_prompt,
width=config.logging.width,
height=config.logging.height,
num_inference_steps=config.logging.num_inference_steps,
guidance_scale=config.logging.guidance_scale,
generate_num=config.logging.generate_num,
seed=config.logging.seed,
)
for text, img in samples:
log_dict[text] = wandb.Image(img)
# evaluate on the generated images
print("Evaluating CLIPScore and CLIPAccuracy...")
with network:
clip_scores, clip_accs = eval_util.clip_eval(pipe, config)
for prompt, clip_score, clip_accuracy in zip(
config.logging.prompts, clip_scores, clip_accs
):
log_dict[f"CLIPScore/{prompt}"] = clip_score
log_dict[f"CLIPAccuracy/{prompt}"] = clip_accuracy
log_dict[f"CLIPScore/average"] = sum(clip_scores) / len(clip_scores)
log_dict[f"CLIPAccuracy/average"] = sum(clip_accs) / len(clip_accs)
wandb.log(log_dict)
# save model
if (
i % config.save.per_steps == 0
and i != 0
and i != config.train.iterations - 1
):
print("Saving...")
save_path.mkdir(parents=True, exist_ok=True)
network.save_weights(
save_path / f"{config.save.name}_{i}steps.safetensors",
dtype=save_weight_dtype,
metadata=model_metadata,
)
del (
positive_latents,
neutral_latents,
target_latents,
latents,
anchor_latents,
anchor_latents_ori,
)
flush()
print("Saving...")
save_path.mkdir(parents=True, exist_ok=True)
network.save_weights(
save_path / f"{config.save.name}_last.safetensors",
dtype=save_weight_dtype,
metadata=model_metadata,
)
del (
unet,
noise_scheduler,
loss,
optimizer,
network,
)
flush()
print("Done.")
def main(args):
config_file = args.config_file
config = config_pkg.load_config_from_yaml(config_file)
| # ref:
# - https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L566
# - https://huggingface.co/spaces/baulab/Erasing-Concepts-In-Diffusion/blob/main/train.py
# - https://github.com/p1atdev/LECO/blob/main/train_lora_xl.py
DEVICE_CUDA = torch.device("cuda:0")
NUM_IMAGES_PER_PROMPT = 1
def flush():
torch.cuda.empty_cache()
gc.collect()
def train(
config: RootConfig,
prompts: list[PromptSettings],
):
metadata = {
"prompts": ",".join([prompt.json() for prompt in prompts]),
"config": config.json(),
}
model_metadata = {
"prompts": ",".join([prompt.target for prompt in prompts]),
"rank": str(config.network.rank),
"alpha": str(config.network.alpha),
}
save_path = Path(config.save.path)
if config.logging.verbose:
print(metadata)
weight_dtype = config_pkg.parse_precision(config.train.precision)
save_weight_dtype = config_pkg.parse_precision(config.train.precision)
if config.logging.use_wandb:
wandb.init(project=f"SPM",
config=metadata,
name=config.logging.run_name,
settings=wandb.Settings(symlink=False))
(
tokenizers,
text_encoders,
unet,
noise_scheduler,
pipe
) = model_util.load_models_xl(
config.pretrained_model.name_or_path,
scheduler_name=config.train.noise_scheduler,
)
for text_encoder in text_encoders:
text_encoder.to(DEVICE_CUDA, dtype=weight_dtype)
text_encoder.requires_grad_(False)
text_encoder.eval()
unet.to(DEVICE_CUDA, dtype=weight_dtype)
unet.enable_xformers_memory_efficient_attention()
unet.requires_grad_(False)
unet.eval()
network = SPMNetwork(
unet,
rank=config.network.rank,
multiplier=1.0,
alpha=config.network.alpha,
module=SPMLayer,
).to(DEVICE_CUDA, dtype=weight_dtype)
trainable_params = network.prepare_optimizer_params(
config.train.text_encoder_lr, config.train.unet_lr, config.train.lr
)
optimizer_name, optimizer_args, optimizer = train_util.get_optimizer(
config, trainable_params
)
lr_scheduler = train_util.get_scheduler_fix(config, optimizer)
criteria = torch.nn.MSELoss()
print("Prompts")
for settings in prompts:
print(settings)
cache = PromptEmbedsCache()
prompt_pairs: list[PromptEmbedsPair] = []
with torch.no_grad():
for settings in prompts:
for prompt in [
settings.target,
settings.positive,
settings.neutral,
settings.unconditional,
]:
if cache[prompt] == None:
cache[prompt] = PromptEmbedsXL(
train_util.encode_prompts_xl(
tokenizers,
text_encoders,
[prompt],
num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
)
)
prompt_pair = PromptEmbedsPair(
criteria,
cache[settings.target],
cache[settings.positive],
cache[settings.unconditional],
cache[settings.neutral],
settings,
)
assert prompt_pair.sampling_batch_size % prompt_pair.batch_size == 0
prompt_pairs.append(prompt_pair)
flush()
pbar = tqdm(range(config.train.iterations))
loss = None
for i in pbar:
with torch.no_grad():
noise_scheduler.set_timesteps(
config.train.max_denoising_steps, device=DEVICE_CUDA
)
optimizer.zero_grad()
prompt_pair: PromptEmbedsPair = prompt_pairs[
torch.randint(0, len(prompt_pairs), (1,)).item()
]
timesteps_to = torch.randint(
1, config.train.max_denoising_steps, (1,)
).item()
height, width = (
prompt_pair.resolution,
prompt_pair.resolution,
)
if prompt_pair.dynamic_resolution:
height, width = train_util.get_random_resolution_in_bucket(
prompt_pair.resolution
)
if config.logging.verbose:
print("guidance_scale:", prompt_pair.guidance_scale)
print("resolution:", prompt_pair.resolution)
print("dynamic_resolution:", prompt_pair.dynamic_resolution)
if prompt_pair.dynamic_resolution:
print("bucketed resolution:", (height, width))
print("batch_size:", prompt_pair.batch_size)
print("dynamic_crops:", prompt_pair.dynamic_crops)
latents = train_util.get_initial_latents(
noise_scheduler, prompt_pair.batch_size, height, width, 1
).to(DEVICE_CUDA, dtype=weight_dtype)
add_time_ids = train_util.get_add_time_ids(
height,
width,
dynamic_crops=prompt_pair.dynamic_crops,
dtype=weight_dtype,
).to(DEVICE_CUDA, dtype=weight_dtype)
with network:
denoised_latents = train_util.diffusion_xl(
unet,
noise_scheduler,
latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.target.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.target.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
start_timesteps=0,
total_timesteps=timesteps_to,
guidance_scale=3,
)
noise_scheduler.set_timesteps(1000)
current_timestep = noise_scheduler.timesteps[
int(timesteps_to * 1000 / config.train.max_denoising_steps)
]
positive_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.positive.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.positive.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
neutral_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.neutral.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.neutral.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
with network:
target_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.target.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.target.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
# ------------------------- latent anchoring part -----------------------------
if prompt_pair.action == "erase_with_la":
# noise sampling
anchors_text, anchors_pool = sample_xl(prompt_pair, tokenizers=tokenizers, text_encoders=text_encoders)
# get latents
repeat = prompt_pair.sampling_batch_size // prompt_pair.batch_size
# TODO: target or positive?
with network:
anchor_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents.repeat(repeat, 1, 1, 1),
text_embeddings=anchors_text,
add_text_embeddings=anchors_pool,
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.sampling_batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
with torch.no_grad():
anchor_latents_ori = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents.repeat(repeat, 1, 1, 1),
text_embeddings=anchors_text,
add_text_embeddings=anchors_pool,
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.sampling_batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
anchor_latents_ori.requires_grad_ = False
else:
anchor_latents = None
anchor_latents_ori = None
# ----------------------------------------------------------------
positive_latents.requires_grad = False
neutral_latents.requires_grad = False
loss = prompt_pair.loss(
target_latents=target_latents,
positive_latents=positive_latents,
neutral_latents=neutral_latents,
anchor_latents=anchor_latents,
anchor_latents_ori=anchor_latents_ori,
)
loss["loss"].backward()
if config.train.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(
trainable_params, config.train.max_grad_norm, norm_type=2
)
optimizer.step()
lr_scheduler.step()
pbar.set_description(f"Loss*1k: {loss['loss'].item()*1000:.4f}")
# logging
if config.logging.use_wandb:
log_dict = {"iteration": i}
loss = {k: v.detach().cpu().item() for k, v in loss.items()}
log_dict.update(loss)
lrs = lr_scheduler.get_last_lr()
if len(lrs) == 1:
log_dict["lr"] = float(lrs[0])
else:
log_dict["lr/textencoder"] = float(lrs[0])
log_dict["lr/unet"] = float(lrs[-1])
if config.train.optimizer_type.lower().startswith("dadapt"):
log_dict["lr/d*lr"] = (
optimizer.param_groups[0]["d"] * optimizer.param_groups[0]["lr"]
)
# generate sample images
if config.logging.interval > 0 and (
i % config.logging.interval == 0 or i == config.train.iterations - 1
):
print("Generating samples...")
with network:
samples = train_util.text2img(
pipe,
prompts=config.logging.prompts,
negative_prompt=config.logging.negative_prompt,
width=config.logging.width,
height=config.logging.height,
num_inference_steps=config.logging.num_inference_steps,
guidance_scale=config.logging.guidance_scale,
generate_num=config.logging.generate_num,
seed=config.logging.seed,
)
for text, img in samples:
log_dict[text] = wandb.Image(img)
# evaluate on the generated images
print("Evaluating CLIPScore and CLIPAccuracy...")
with network:
clip_scores, clip_accs = eval_util.clip_eval(pipe, config)
for prompt, clip_score, clip_accuracy in zip(
config.logging.prompts, clip_scores, clip_accs
):
log_dict[f"CLIPScore/{prompt}"] = clip_score
log_dict[f"CLIPAccuracy/{prompt}"] = clip_accuracy
log_dict[f"CLIPScore/average"] = sum(clip_scores) / len(clip_scores)
log_dict[f"CLIPAccuracy/average"] = sum(clip_accs) / len(clip_accs)
wandb.log(log_dict)
# save model
if (
i % config.save.per_steps == 0
and i != 0
and i != config.train.iterations - 1
):
print("Saving...")
save_path.mkdir(parents=True, exist_ok=True)
network.save_weights(
save_path / f"{config.save.name}_{i}steps.safetensors",
dtype=save_weight_dtype,
metadata=model_metadata,
)
del (
positive_latents,
neutral_latents,
target_latents,
latents,
anchor_latents,
anchor_latents_ori,
)
flush()
print("Saving...")
save_path.mkdir(parents=True, exist_ok=True)
network.save_weights(
save_path / f"{config.save.name}_last.safetensors",
dtype=save_weight_dtype,
metadata=model_metadata,
)
del (
unet,
noise_scheduler,
loss,
optimizer,
network,
)
flush()
print("Done.")
def main(args):
config_file = args.config_file
config = config_pkg.load_config_from_yaml(config_file) | prompts = prompt_pkg.load_prompts_from_yaml(config.prompts_file) | 4 | 2023-12-26 03:19:16+00:00 | 12k |
dakpinaroglu/Frame2seq | frame2seq/openfold/utils/feats.py | [
{
"identifier": "protein",
"path": "frame2seq/openfold/np/protein.py",
"snippet": "PICO_TO_ANGSTROM = 0.01\nclass Protein:\ndef from_pdb_string(pdb_str: str, chain_id: Optional[str] = None) -> Protein:\ndef from_proteinnet_string(proteinnet_str: str) -> Protein:\ndef get_pdb_headers(prot: Protein, chain_id: int = 0) -> Sequence[str]:\ndef add_pdb_headers(prot: Protein, pdb_str: str) -> str:\ndef to_pdb(prot: Protein) -> str:\ndef ideal_atom_mask(prot: Protein) -> np.ndarray:\ndef from_prediction(\n features: FeatureDict,\n result: ModelOutput,\n b_factors: Optional[np.ndarray] = None,\n chain_index: Optional[np.ndarray] = None,\n remark: Optional[str] = None,\n parents: Optional[Sequence[str]] = None,\n parents_chain_index: Optional[Sequence[int]] = None\n) -> Protein:"
},
{
"identifier": "Rotation",
"path": "frame2seq/openfold/utils/rigid_utils.py",
"snippet": "class Rotation:\n \"\"\"\n A 3D rotation. Depending on how the object is initialized, the\n rotation is represented by either a rotation matrix or a\n quaternion, though both formats are made available by helper functions.\n To simplify gradient computation, the underlying format of the\n rotation cannot be changed in-place. Like Rigid, the class is designed\n to mimic the behavior of a torch Tensor, almost as if each Rotation\n object were a tensor of rotations, in one format or another.\n \"\"\"\n def __init__(self,\n rot_mats: Optional[torch.Tensor] = None,\n quats: Optional[torch.Tensor] = None,\n normalize_quats: bool = True,\n ):\n \"\"\"\n Args:\n rot_mats:\n A [*, 3, 3] rotation matrix tensor. Mutually exclusive with\n quats\n quats:\n A [*, 4] quaternion. Mutually exclusive with rot_mats. If\n normalize_quats is not True, must be a unit quaternion\n normalize_quats:\n If quats is specified, whether to normalize quats\n \"\"\"\n if((rot_mats is None and quats is None) or \n (rot_mats is not None and quats is not None)):\n raise ValueError(\"Exactly one input argument must be specified\")\n\n if((rot_mats is not None and rot_mats.shape[-2:] != (3, 3)) or \n (quats is not None and quats.shape[-1] != 4)):\n raise ValueError(\n \"Incorrectly shaped rotation matrix or quaternion\"\n )\n\n # Force full-precision\n if(quats is not None):\n quats = quats.to(dtype=torch.float32)\n if(rot_mats is not None):\n rot_mats = rot_mats.to(dtype=torch.float32)\n\n if(quats is not None and normalize_quats):\n quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)\n\n self._rot_mats = rot_mats\n self._quats = quats\n\n @staticmethod\n def identity(\n shape,\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rotation:\n \"\"\"\n Returns an identity Rotation.\n\n Args:\n shape:\n The \"shape\" of the resulting Rotation object. See documentation\n for the shape property\n dtype:\n The torch dtype for the rotation\n device:\n The torch device for the new rotation\n requires_grad:\n Whether the underlying tensors in the new rotation object\n should require gradient computation\n fmt:\n One of \"quat\" or \"rot_mat\". Determines the underlying format\n of the new object's rotation \n Returns:\n A new identity rotation\n \"\"\"\n if(fmt == \"rot_mat\"):\n rot_mats = identity_rot_mats(\n shape, dtype, device, requires_grad,\n )\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(fmt == \"quat\"):\n quats = identity_quats(shape, dtype, device, requires_grad)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(f\"Invalid format: f{fmt}\")\n\n # Magic methods\n\n def __getitem__(self, index: Any) -> Rotation:\n \"\"\"\n Allows torch-style indexing over the virtual shape of the rotation\n object. See documentation for the shape property.\n\n Args:\n index:\n A torch index. E.g. (1, 3, 2), or (slice(None,))\n Returns:\n The indexed rotation\n \"\"\"\n if type(index) != tuple:\n index = (index,)\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats[index + (slice(None), slice(None))]\n return Rotation(rot_mats=rot_mats)\n elif(self._quats is not None):\n quats = self._quats[index + (slice(None),)]\n return Rotation(quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Pointwise left multiplication of the rotation with a tensor. Can be\n used to e.g. mask the Rotation.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats * right[..., None, None]\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats * right[..., None]\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Reverse pointwise multiplication of the rotation with a tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n \n # Properties\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the virtual shape of the rotation object. This shape is\n defined as the batch dimensions of the underlying rotation matrix\n or quaternion. If the Rotation was initialized with a [10, 3, 3]\n rotation matrix tensor, for example, the resulting shape would be\n [10].\n \n Returns:\n The virtual shape of the rotation object\n \"\"\"\n s = None\n if(self._quats is not None):\n s = self._quats.shape[:-1]\n else:\n s = self._rot_mats.shape[:-2]\n\n return s\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Returns the dtype of the underlying rotation.\n\n Returns:\n The dtype of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.dtype\n elif(self._quats is not None):\n return self._quats.dtype\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n The device of the underlying rotation\n\n Returns:\n The device of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.device\n elif(self._quats is not None):\n return self._quats.device\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def requires_grad(self) -> bool:\n \"\"\"\n Returns the requires_grad property of the underlying rotation\n\n Returns:\n The requires_grad property of the underlying tensor\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.requires_grad\n elif(self._quats is not None):\n return self._quats.requires_grad\n else:\n raise ValueError(\"Both rotations are None\")\n\n def get_rot_mats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a rotation matrix tensor.\n\n Returns:\n The rotation as a rotation matrix tensor\n \"\"\"\n rot_mats = self._rot_mats\n if(rot_mats is None):\n if(self._quats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n rot_mats = quat_to_rot(self._quats)\n\n return rot_mats \n\n def get_quats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a quaternion tensor.\n\n Depending on whether the Rotation was initialized with a\n quaternion, this function may call torch.linalg.eigh.\n\n Returns:\n The rotation as a quaternion tensor.\n \"\"\"\n quats = self._quats\n if(quats is None):\n if(self._rot_mats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n quats = rot_to_quat(self._rot_mats)\n\n return quats\n\n def get_cur_rot(self) -> torch.Tensor:\n \"\"\"\n Return the underlying rotation in its current form\n\n Returns:\n The stored rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats\n elif(self._quats is not None):\n return self._quats\n else:\n raise ValueError(\"Both rotations are None\")\n\n # Rotation functions\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor, \n normalize_quats: bool = True\n ) -> Rotation:\n \"\"\"\n Returns a new quaternion Rotation after updating the current\n object's underlying rotation with a quaternion update, formatted\n as a [*, 3] tensor whose final three columns represent x, y, z such \n that (1, x, y, z) is the desired (not necessarily unit) quaternion\n update.\n\n Args:\n q_update_vec:\n A [*, 3] quaternion update tensor\n normalize_quats:\n Whether to normalize the output quaternion\n Returns:\n An updated Rotation\n \"\"\"\n quats = self.get_quats()\n new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)\n return Rotation(\n rot_mats=None, \n quats=new_quats, \n normalize_quats=normalize_quats,\n )\n\n def compose_r(self, r: Rotation) -> Rotation:\n \"\"\"\n Compose the rotation matrices of the current Rotation object with\n those of another.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n r1 = self.get_rot_mats()\n r2 = r.get_rot_mats()\n new_rot_mats = rot_matmul(r1, r2)\n return Rotation(rot_mats=new_rot_mats, quats=None)\n\n def compose_q(self, r: Rotation, normalize_quats: bool = True) -> Rotation:\n \"\"\"\n Compose the quaternions of the current Rotation object with those\n of another.\n\n Depending on whether either Rotation was initialized with\n quaternions, this function may call torch.linalg.eigh.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n q1 = self.get_quats()\n q2 = r.get_quats()\n new_quats = quat_multiply(q1, q2)\n return Rotation(\n rot_mats=None, quats=new_quats, normalize_quats=normalize_quats\n )\n\n def apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Apply the current Rotation as a rotation matrix to a set of 3D\n coordinates.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n return rot_vec_mul(rot_mats, pts)\n\n def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n The inverse of the apply() method.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] inverse-rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n inv_rot_mats = invert_rot_mat(rot_mats) \n return rot_vec_mul(inv_rot_mats, pts)\n\n def invert(self) -> Rotation:\n \"\"\"\n Returns the inverse of the current Rotation.\n\n Returns:\n The inverse of the current Rotation\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=invert_rot_mat(self._rot_mats), \n quats=None\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None,\n quats=invert_quat(self._quats),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n # \"Tensor\" stuff\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shape of the Rotation object.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed Rotation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n @staticmethod\n def cat(\n rs: Sequence[Rotation], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates rotations along one of the batch dimensions. Analogous\n to torch.cat().\n\n Note that the output of this operation is always a rotation matrix,\n regardless of the format of input rotations.\n\n Args:\n rs: \n A list of rotation objects\n dim: \n The dimension along which the rotations should be \n concatenated\n Returns:\n A concatenated Rotation object in rotation matrix format\n \"\"\"\n rot_mats = [r.get_rot_mats() for r in rs]\n rot_mats = torch.cat(rot_mats, dim=dim if dim >= 0 else dim - 2)\n\n return Rotation(rot_mats=rot_mats, quats=None) \n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rotation:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying rotation tensors,\n mapping over the rotation dimension(s). Can be used e.g. to sum out\n a one-hot batch dimension.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rotation \n Returns:\n The transformed Rotation object\n \"\"\" \n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))\n rot_mats = torch.stack(\n list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1\n )\n rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = torch.stack(\n list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1\n )\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n \n def cuda(self) -> Rotation:\n \"\"\"\n Analogous to the cuda() method of torch Tensors\n\n Returns:\n A copy of the Rotation in CUDA memory\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.cuda(),\n normalize_quats=False\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def to(self, \n device: Optional[torch.device], \n dtype: Optional[torch.dtype]\n ) -> Rotation:\n \"\"\"\n Analogous to the to() method of torch Tensors\n\n Args:\n device:\n A torch device\n dtype:\n A torch dtype\n Returns:\n A copy of the Rotation using the new device and dtype\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=self._rot_mats.to(device=device, dtype=dtype), \n quats=None,\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.to(device=device, dtype=dtype),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def detach(self) -> Rotation:\n \"\"\"\n Returns a copy of the Rotation whose underlying Tensor has been\n detached from its torch graph.\n\n Returns:\n A copy of the Rotation whose underlying Tensor has been detached\n from its torch graph\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.detach(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.detach(), \n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")"
},
{
"identifier": "Rigid",
"path": "frame2seq/openfold/utils/rigid_utils.py",
"snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like a single torch tensor with the \n shape of the shared batch dimensions of its component parts.\n \"\"\"\n def __init__(self, \n rots: Optional[Rotation],\n trans: Optional[torch.Tensor],\n ):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if(trans is not None):\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif(rots is not None):\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if(rots is None):\n rots = Rotation.identity(\n batch_dims, dtype, device, requires_grad,\n )\n elif(trans is None):\n trans = identity_trans(\n batch_dims, dtype, device, requires_grad,\n )\n\n if((rots.shape != trans.shape[:-1]) or\n (rots.device != trans.device)):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int], \n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None, \n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape: \n The desired shape\n dtype: \n The dtype of both internal tensors\n device: \n The device of both internal tensors\n requires_grad: \n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, \n index: Any,\n ) -> Rigid:\n \"\"\" \n Indexes the affine transformation with PyTorch-style indices.\n The index is applied to the shared dimensions of both the rotation\n and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)\n t = Rigid(r, torch.rand(10, 10, 3))\n indexed = t[3, 4:6]\n assert(indexed.shape == (2,))\n assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor \n \"\"\"\n if type(index) != tuple:\n index = (index,)\n \n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor.\n Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a \n tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and\n the translation.\n \n Returns:\n The shape of the transformation\n \"\"\"\n s = self._trans.shape[:-1]\n return s\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of\n shape [*, 6], where the final 6 columns represent the x, y, and\n z values of a quaternion of form (1, x, y, z) followed by a 3D\n translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self,\n r: Rigid,\n ) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, \n pts: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts) \n return rotated + self._trans\n\n def invert_apply(self, \n pts: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts) \n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert() \n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and\n rotation tensors, mapping over the translation/rotation dimensions\n respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\" \n new_rots = self._rots.map_tensor_fn(fn) \n new_trans = torch.stack(\n list(map(fn, torch.unbind(self._trans, dim=-1))), \n dim=-1\n )\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(\n t: torch.Tensor\n ) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation\n tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if(t.shape[-2:] != (4, 4)):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n \n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four \n for the quaternion followed by three for the translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(\n t: torch.Tensor,\n normalize_quats: bool = False,\n ) -> Rigid:\n if(t.shape[-1] != 7):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(\n rot_mats=None, \n quats=quats, \n normalize_quats=normalize_quats\n )\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, \n origin: torch.Tensor, \n p_xy_plane: torch.Tensor, \n eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 \n points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)\n origin = torch.unbind(origin, dim=-1)\n p_xy_plane = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]\n\n denom = torch.sqrt(sum((c * c for c in e0)) + eps)\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps)\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin, dim=-1))\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shared dimensions of the rotation/translation.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(\n ts: Sequence[Rigid], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts: \n A list of T objects\n dim: \n The dimension along which the transformations should be \n concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim) \n trans = torch.cat(\n [t._trans for t in ts], dim=dim if dim >= 0 else dim - 1\n )\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation\n object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn: \n A function of type Tensor -> Tensor to be applied to the\n translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n fn = lambda t: t * trans_scale_factor\n return self.apply_trans_fn(fn)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n fn = lambda r: r.detach()\n return self.apply_rot_fn(fn)\n\n @staticmethod\n def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):\n \"\"\"\n Returns a transformation object from reference coordinates.\n \n Note that this method does not take care of symmetries. If you \n provide the atom positions in the non-standard way, the N atom will \n end up not at [-0.527250, 1.359329, 0.0] but instead at \n [-0.527250, -1.359329, 0.0]. You need to take care of such cases in \n your code.\n \n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and \n rotation to the reference backbone, the coordinates will \n approximately equal to the input coordinates.\n \"\"\" \n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n zeros = sin_c1.new_zeros(sin_c1.shape)\n ones = sin_c1.new_ones(sin_c1.shape)\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n \n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())"
},
{
"identifier": "batched_gather",
"path": "frame2seq/openfold/utils/tensor_utils.py",
"snippet": "def add(m1, m2, inplace):\ndef permute_final_dims(tensor: torch.Tensor, inds: List[int]):\ndef flatten_final_dims(t: torch.Tensor, no_dims: int):\ndef masked_mean(mask, value, dim, eps=1e-4):\ndef pts_to_distogram(pts, min_bin=2.3125, max_bin=21.6875, no_bins=64):\ndef dict_multimap(fn, dicts):\ndef one_hot(x, v_bins):\ndef batched_gather(data, inds, dim=0, no_batch_dims=0):\ndef dict_map(fn, dic, leaf_type):\ndef tree_map(fn, tree, leaf_type):"
}
] | import math
import numpy as np
import torch
import torch.nn as nn
import frame2seq.openfold.np.residue_constants as rc
from typing import Dict
from frame2seq.openfold.np import protein
from frame2seq.openfold.utils.rigid_utils import Rotation, Rigid
from frame2seq.openfold.utils.tensor_utils import (
batched_gather,
one_hot,
tree_map,
tensor_tree_map,
) | 10,598 | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
is_gly = aatype == rc.restype_order["G"]
ca_idx = rc.atom_order["CA"]
cb_idx = rc.atom_order["CB"]
pseudo_beta = torch.where(
is_gly[..., None].expand(*((-1,) * len(is_gly.shape)), 3),
all_atom_positions[..., ca_idx, :],
all_atom_positions[..., cb_idx, :],
)
if all_atom_masks is not None:
pseudo_beta_mask = torch.where(
is_gly,
all_atom_masks[..., ca_idx],
all_atom_masks[..., cb_idx],
)
return pseudo_beta, pseudo_beta_mask
else:
return pseudo_beta
def atom14_to_atom37(atom14, batch):
atom37_data = batched_gather(
atom14,
batch["residx_atom37_to_atom14"],
dim=-2,
no_batch_dims=len(atom14.shape[:-2]),
)
atom37_data = atom37_data * batch["atom37_atom_exists"][..., None]
return atom37_data
def build_template_angle_feat(template_feats):
template_aatype = template_feats["template_aatype"]
torsion_angles_sin_cos = template_feats["template_torsion_angles_sin_cos"]
alt_torsion_angles_sin_cos = template_feats[
"template_alt_torsion_angles_sin_cos"
]
torsion_angles_mask = template_feats["template_torsion_angles_mask"]
template_angle_feat = torch.cat(
[
nn.functional.one_hot(template_aatype, 22),
torsion_angles_sin_cos.reshape(
*torsion_angles_sin_cos.shape[:-2], 14
),
alt_torsion_angles_sin_cos.reshape(
*alt_torsion_angles_sin_cos.shape[:-2], 14
),
torsion_angles_mask,
],
dim=-1,
)
return template_angle_feat
def build_template_pair_feat(
batch,
min_bin, max_bin, no_bins,
use_unit_vector=False,
eps=1e-20, inf=1e8
):
template_mask = batch["template_pseudo_beta_mask"]
template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
# Compute distogram (this seems to differ slightly from Alg. 5)
tpb = batch["template_pseudo_beta"]
dgram = torch.sum(
(tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True
)
lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2
upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1)
dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype)
to_concat = [dgram, template_mask_2d[..., None]]
aatype_one_hot = nn.functional.one_hot(
batch["template_aatype"],
rc.restype_num + 2,
)
n_res = batch["template_aatype"].shape[-1]
to_concat.append(
aatype_one_hot[..., None, :, :].expand(
*aatype_one_hot.shape[:-2], n_res, -1, -1
)
)
to_concat.append(
aatype_one_hot[..., None, :].expand(
*aatype_one_hot.shape[:-2], -1, n_res, -1
)
)
n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]]
| # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
is_gly = aatype == rc.restype_order["G"]
ca_idx = rc.atom_order["CA"]
cb_idx = rc.atom_order["CB"]
pseudo_beta = torch.where(
is_gly[..., None].expand(*((-1,) * len(is_gly.shape)), 3),
all_atom_positions[..., ca_idx, :],
all_atom_positions[..., cb_idx, :],
)
if all_atom_masks is not None:
pseudo_beta_mask = torch.where(
is_gly,
all_atom_masks[..., ca_idx],
all_atom_masks[..., cb_idx],
)
return pseudo_beta, pseudo_beta_mask
else:
return pseudo_beta
def atom14_to_atom37(atom14, batch):
atom37_data = batched_gather(
atom14,
batch["residx_atom37_to_atom14"],
dim=-2,
no_batch_dims=len(atom14.shape[:-2]),
)
atom37_data = atom37_data * batch["atom37_atom_exists"][..., None]
return atom37_data
def build_template_angle_feat(template_feats):
template_aatype = template_feats["template_aatype"]
torsion_angles_sin_cos = template_feats["template_torsion_angles_sin_cos"]
alt_torsion_angles_sin_cos = template_feats[
"template_alt_torsion_angles_sin_cos"
]
torsion_angles_mask = template_feats["template_torsion_angles_mask"]
template_angle_feat = torch.cat(
[
nn.functional.one_hot(template_aatype, 22),
torsion_angles_sin_cos.reshape(
*torsion_angles_sin_cos.shape[:-2], 14
),
alt_torsion_angles_sin_cos.reshape(
*alt_torsion_angles_sin_cos.shape[:-2], 14
),
torsion_angles_mask,
],
dim=-1,
)
return template_angle_feat
def build_template_pair_feat(
batch,
min_bin, max_bin, no_bins,
use_unit_vector=False,
eps=1e-20, inf=1e8
):
template_mask = batch["template_pseudo_beta_mask"]
template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
# Compute distogram (this seems to differ slightly from Alg. 5)
tpb = batch["template_pseudo_beta"]
dgram = torch.sum(
(tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True
)
lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2
upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1)
dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype)
to_concat = [dgram, template_mask_2d[..., None]]
aatype_one_hot = nn.functional.one_hot(
batch["template_aatype"],
rc.restype_num + 2,
)
n_res = batch["template_aatype"].shape[-1]
to_concat.append(
aatype_one_hot[..., None, :, :].expand(
*aatype_one_hot.shape[:-2], n_res, -1, -1
)
)
to_concat.append(
aatype_one_hot[..., None, :].expand(
*aatype_one_hot.shape[:-2], -1, n_res, -1
)
)
n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]] | rigids = Rigid.make_transform_from_reference( | 2 | 2023-12-25 09:29:36+00:00 | 12k |
wwxu21/CUT | finetune_unlikelihood.py | [
{
"identifier": "LlamaForCausalLM",
"path": "modeling_llama_unlikelihood.py",
"snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config, threshold):\n super().__init__(config)\n self.model = LlamaModel(config)\n self.vocab_size = config.vocab_size\n self.threshold = threshold\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n weight_like: Optional[torch.Tensor] = None,\n weight_unlike: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n input_ids_neg=None,\n attention_mask_neg=None,\n labels_neg=None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = outputs[0]\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)\n logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]\n logits = torch.cat(logits, dim=-1)\n else:\n logits = self.lm_head(hidden_states)\n logits = logits.float()\n probs = torch.softmax(logits,dim=2)\n batch_size2, seq_length, hidden_size = probs.size()\n batch_size = batch_size2 // 2\n \n loss = None\n unlike_mask = weight_unlike.ne(-1).view(-1).to(probs.device)\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_probs_pos = probs[:batch_size][..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = NLLLoss()\n shift_probs_pos = shift_probs_pos.view(-1, self.config.vocab_size)\n shift_logits = torch.log(shift_probs_pos)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n \n loss = loss\n if unlike_mask.any():\n loss_unlike = self.unlikelihood(probs, labels, labels_neg, weight_unlike, unlike_mask)\n loss = (loss_unlike + loss) / 2 \n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n def unlikelihood(self, probs, labels, labels_neg, weight_unlike, unlike_mask):\n labels = labels.to(probs.device)\n labels_neg = labels_neg.to(probs.device)\n weight_unlike = weight_unlike.to(probs.device)\n shift_probs = probs[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n shift_labels_neg = labels_neg[..., 1:].contiguous()\n valid_indices = shift_labels[unlike_mask] != -100\n valid_indices_neg = shift_labels_neg[unlike_mask] != -100\n # assert (valid_indices == valid_indices_neg).all()\n batch_size2, seq_length, hidden_size = shift_probs.size()\n batch_size = batch_size2 // 2\n device = probs.device\n label_clamped = torch.clamp(shift_labels, min=0, max=hidden_size - 1) \n label_clamped_neg = torch.clamp(shift_labels_neg, min=0, max=hidden_size - 1)\n rows, cols = torch.meshgrid(torch.arange(batch_size, device=device), torch.arange(seq_length, device=device))\n probs_out = shift_probs[:batch_size][rows, cols, label_clamped][unlike_mask]\n probs_out_neg = shift_probs[batch_size:][rows, cols, label_clamped_neg][unlike_mask]\n valid_prob = probs_out[valid_indices]\n valid_prob_neg = probs_out_neg[valid_indices_neg]\n scale = (valid_prob / valid_prob_neg).detach()\n unlike_indices = scale > self.threshold # give some margins\n valid_prob_neg[unlike_indices] = 1 - valid_prob_neg[unlike_indices]\n valid_prob_neg[valid_prob_neg == 0] += 1e-5 # avoid 0\n valid_lprob_neg = torch.log(valid_prob_neg)\n valid_lprob_neg[unlike_indices] = weight_unlike[unlike_mask][0][0] * valid_lprob_neg[unlike_indices]\n valid_lprob_neg[~unlike_indices] = valid_lprob_neg[~unlike_indices]\n loss_unlike = -torch.sum(valid_lprob_neg)/ valid_lprob_neg.size(0)\n return loss_unlike\n\n \n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),\n )\n return reordered_past"
},
{
"identifier": "PeftModelForCausalLM",
"path": "modeling_llama_unlikelihood.py",
"snippet": "class PeftModelForCausalLM(PeftModel):\n \"\"\"\n Peft model for causal language modeling.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForCausalLM\n >>> from peft import PeftModelForCausalLM, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"PREFIX_TUNING\",\n ... \"task_type\": \"CAUSAL_LM\",\n ... \"inference_mode\": False,\n ... \"num_virtual_tokens\": 20,\n ... \"token_dim\": 1280,\n ... \"num_transformer_submodules\": 1,\n ... \"num_attention_heads\": 20,\n ... \"num_layers\": 36,\n ... \"encoder_hidden_size\": 1280,\n ... \"prefix_projection\": False,\n ... \"postprocess_past_key_value_function\": None,\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForCausalLM.from_pretrained(\"gpt2-large\")\n >>> peft_model = PeftModelForCausalLM(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n input_ids_neg=None,\n attention_mask_neg=None,\n labels_neg=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n weight_like=None,\n weight_unlike=None,\n **kwargs,\n ):\n peft_config = self.active_peft_config\n kwargs.update({'weight_like':weight_like, 'weight_unlike':weight_unlike, \"labels_neg\": labels_neg})\n input_ids = torch.cat([input_ids, input_ids_neg], dim=0)\n attention_mask = torch.cat([attention_mask, attention_mask_neg], dim=0)\n if not peft_config.is_prompt_learning:\n if self.base_model.config.model_type == \"mpt\":\n if inputs_embeds is not None:\n raise AssertionError(\"forward in MPTForCausalLM does not support inputs_embeds\")\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n batch_size = _get_batch_size(input_ids, inputs_embeds)\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)\n attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\")\n kwargs[\"token_type_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size)\n return self.base_model(\n input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, **kwargs\n )\n else:\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n # concat prompt labels\n if labels is not None:\n prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)\n kwargs[\"labels\"] = torch.cat((prefix_labels, labels), dim=1)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n\n def generate(self, **kwargs):\n self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation\n if hasattr(self.base_model, \"model\"):\n self.base_model.model.generation_config = self.generation_config\n else:\n self.base_model.generation_config = self.generation_config\n try:\n outputs = self.base_model.generate(**kwargs)\n except:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n raise\n else:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n return outputs\n\n def prepare_inputs_for_generation(self, *args, **kwargs):\n peft_config = self.active_peft_config\n model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)\n if peft_config.is_prompt_learning:\n if model_kwargs.get(\"attention_mask\", None) is not None:\n prefix_attention_mask = torch.ones(\n model_kwargs[\"input_ids\"].shape[0], peft_config.num_virtual_tokens\n ).to(model_kwargs[\"input_ids\"].device)\n model_kwargs[\"attention_mask\"] = torch.cat(\n (prefix_attention_mask, model_kwargs[\"attention_mask\"]), dim=1\n )\n\n if model_kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n model_kwargs[\"position_ids\"] = None\n\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\n \"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\"\n )\n kwargs[\"token_type_ids\"] = None\n\n if model_kwargs[\"past_key_values\"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size=model_kwargs[\"input_ids\"].shape[0])\n model_kwargs[\"past_key_values\"] = past_key_values\n else:\n if model_kwargs[\"past_key_values\"] is None:\n inputs_embeds = self.word_embeddings(model_kwargs[\"input_ids\"])\n prompts = self.get_prompt(batch_size=model_kwargs[\"input_ids\"].shape[0])\n prompts = prompts.to(inputs_embeds.dtype)\n model_kwargs[\"inputs_embeds\"] = torch.cat((prompts, inputs_embeds), dim=1)\n model_kwargs[\"input_ids\"] = None\n\n return model_kwargs"
},
{
"identifier": "Prompter",
"path": "prompter.py",
"snippet": "class Prompter(object):\n __slots__ = (\"template\", \"_verbose\")\n\n def __init__(self, template_name: str = \"\", verbose: bool = False):\n self._verbose = verbose\n if not template_name:\n # Enforce the default here, so the constructor can be called with '' and will not break.\n template_name = \"alpaca\"\n file_name = osp.join(\"templates\", f\"{template_name}.json\")\n if not osp.exists(file_name):\n raise ValueError(f\"Can't read {file_name}\")\n with open(file_name) as fp:\n self.template = json.load(fp)\n if self._verbose:\n print(\n f\"Using prompt template {template_name}: {self.template['description']}\"\n )\n\n def generate_prompt(\n self,\n data_point,\n output=False,\n ) -> str:\n # returns the full prompt from instruction and optional input\n # if a label (=response, =output) is provided, it's also appended.\n instruction = data_point['instruction']\n label = data_point['output']\n res = instruction\n if output:\n res = f\"{res}{label}\"\n if self._verbose:\n print(res)\n return res\n\n def get_response(self, output: str) -> str:\n return output.split(self.template[\"response_split\"])[1].strip()"
}
] | import os
import sys
import json
import fire
import torch
import transformers
import numpy as np
import random
from typing import List
from torch.utils.data import DataLoader
from datasets import load_dataset, concatenate_datasets, Dataset
from transformers import TrainerCallback, TrainingArguments, TrainerState, TrainerControl
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
from peft import (
LoraConfig,
prepare_model_for_int8_training,
set_peft_model_state_dict,
MODEL_TYPE_TO_PEFT_MODEL_MAPPING,
PeftModel,
)
from peft.utils import _prepare_prompt_learning_config
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.utils import PaddingStrategy
from transformers import LlamaTokenizer, LlamaConfig
from modeling_llama_unlikelihood import LlamaForCausalLM, PeftModelForCausalLM
from prompter import Prompter
from typing import Optional, Union, Any
from dataclasses import dataclass | 7,333 | args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
kwargs["model"].save_pretrained(checkpoint_folder)
pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin")
torch.save({}, pytorch_model_path)
return control
class LoadBestPeftModelCallback(TrainerCallback):
def on_train_end(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
print(f"Loading best peft model from {state.best_model_checkpoint} (score: {state.best_metric}).")
best_model_path = os.path.join(state.best_model_checkpoint, "adapter_model.bin")
adapters_weights = torch.load(best_model_path)
model = kwargs["model"]
set_peft_model_state_dict(model, adapters_weights)
return control
def get_peft_model(model, peft_config, adapter_name: str = "default"):
"""
Returns a Peft model object from a model and a config.
Args:
model ([`transformers.PreTrainedModel`]): Model to be wrapped.
peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model.
"""
model_config = getattr(model, "config", {"model_type": "custom"})
if hasattr(model_config, "to_dict"):
model_config = model_config.to_dict()
peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None)
if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning:
return PeftModel(model, peft_config, adapter_name=adapter_name)
if peft_config.is_prompt_learning:
peft_config = _prepare_prompt_learning_config(peft_config, model_config)
return PeftModelForCausalLM(model, peft_config, adapter_name=adapter_name)
def train(
# model/data params
base_model: str = "",
data_path: str = "",
output_dir: str = "",
# training hyperparams
batch_size: int = 128,
micro_batch_size: int = 8,
num_epochs: int = 1,
learning_rate: float = 3e-4,
cutoff_len: int = 4096,
val_set_size: int = 0,
lr_scheduler: str = "cosine",
warmup_steps: int = 100,
# lora hyperparams
lora_r: int = 16,
lora_alpha: int = 16,
lora_dropout: float = 0.05,
# from peft docs: ["q_proj", "k_proj", "v_proj", "o_proj", "fc_in", "fc_out", "wte", "gate_proj", "down_proj", "up_proj"]
lora_target_modules: List[str] = ["gate_proj", "down_proj", "up_proj"],
# llm hyperparams
train_on_inputs: bool = False, # if False, masks out inputs in loss
add_eos_token: bool = False,
group_by_length: bool = False, # faster, but produces an odd training loss curve
# wandb params
wandb_project: str = "",
wandb_run_name: str = "",
wandb_watch: str = "", # options: false | gradients | all
wandb_log_model: str = "", # options: false | true
resume_from_checkpoint: str = None, # either training checkpoint or final adapter
prompt_template_name: str = "alpaca",
weight_unlike: float = 0.1,
threshold: float = 1.1,
downsample: float = -1,
debug: bool = False,
):
if int(os.environ.get("LOCAL_RANK", 0)) == 0:
print(
f"Params using prompt template {prompt_template_name}\n"
f"the unlikelihood weight for the incorrect token in the incorrect response: {weight_unlike}\n"
f"the threshold to determine the unlikelihood token: {threshold}\n"
f"downssample rate for Hindsight-P: {downsample}\n"
f"base_model: {base_model}\n"
f"data_path: {data_path}\n"
f"output_dir: {output_dir}\n"
f"batch_size: {batch_size}\n"
f"micro_batch_size: {micro_batch_size}\n"
f"num_epochs: {num_epochs}\n"
f"learning_rate: {learning_rate}\n"
f"cutoff_len: {cutoff_len}\n"
f"val_set_size: {val_set_size}\n"
f"lr_scheduler: {lr_scheduler}\n"
f"warmup_steps: {warmup_steps}\n"
f"lora_r: {lora_r}\n"
f"lora_alpha: {lora_alpha}\n"
f"lora_dropout: {lora_dropout}\n"
f"lora_target_modules: {lora_target_modules}\n"
f"train_on_inputs: {train_on_inputs}\n"
f"add_eos_token: {add_eos_token}\n"
f"group_by_length: {group_by_length}\n"
f"wandb_project: {wandb_project}\n"
f"wandb_run_name: {wandb_run_name}\n"
f"wandb_watch: {wandb_watch}\n"
f"wandb_log_model: {wandb_log_model}\n"
f"resume_from_checkpoint: {resume_from_checkpoint or False}\n"
)
assert (
base_model
), "Please specify a --base_model, e.g. --base_model='huggyllama/llama-7b'"
gradient_accumulation_steps = batch_size // micro_batch_size
| seed = 42
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
@dataclass
class MyDataCollator:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
model ([`PreTrainedModel`]):
The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
prepare the *decoder_input_ids*
This is useful when using *label_smoothing* to avoid calculating loss twice.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[Any] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def __call__(self, features, return_tensors=None):
if return_tensors is None:
return_tensors = self.return_tensors
labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
labels_neg = [feature["labels_neg"] for feature in features] if "labels_neg" in features[0].keys() else None
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
if labels is not None:
max_label_length = max(len(l) for l in labels)
if labels_neg is not None:
max_label_length_neg = max(len(l) for l in labels_neg)
max_label_length = max(max_label_length, max_label_length_neg)
if self.pad_to_multiple_of is not None:
max_label_length = (
(max_label_length + self.pad_to_multiple_of - 1)
// self.pad_to_multiple_of
* self.pad_to_multiple_of
)
# self.tokenizer.padding_side = "left"
padding_side = self.tokenizer.padding_side
for feature in features:
feature['weight_like'] = [feature['weight_like']]
feature['weight_unlike'] = [feature['weight_unlike']]
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
remainder_length = max_label_length - len(feature["labels_neg"])
remainder_label = [self.label_pad_token_id] * remainder_length
remainder_ids = [self.tokenizer.pad_token_id] * remainder_length
remainder_mask = [0] * remainder_length
if isinstance(feature["labels"], list):
feature["labels"] = (
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
)
feature["labels_neg"] = (
feature["labels_neg"] + remainder_label if padding_side == "right" else remainder_label + feature["labels_neg"]
)
feature["input_ids_neg"] = (
feature["input_ids_neg"] + remainder_ids if padding_side == "right" else remainder_ids + feature["input_ids_neg"]
)
feature["attention_mask_neg"] = (
feature["attention_mask_neg"] + remainder_mask if padding_side == "right" else remainder_mask + feature["attention_mask_neg"]
)
elif padding_side == "right":
feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
feature["labels_neg"] = np.concatenate([feature["labels_neg"], remainder_label]).astype(np.int64)
feature["input_ids_neg"] = np.concatenate([feature["input_ids_neg"], remainder_ids]).astype(np.int64)
feature["attention_mask_neg"] = np.concatenate([feature["attention_mask_neg"], remainder_mask]).astype(np.int64)
else:
feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
feature["labels_neg"] = np.concatenate([remainder_label, feature["labels_neg"]]).astype(np.int64)
feature["input_ids_neg"] = np.concatenate([remainder_ids, feature["input_ids_neg"]]).astype(np.int64)
feature["attention_mask_neg"] = np.concatenate([remainder_mask, feature["attention_mask_neg"]]).astype(np.int64)
features = self.tokenizer.pad(
features,
padding=self.padding,
max_length=max_label_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=return_tensors,
)
# prepare decoder_input_ids
if (
labels is not None
and self.model is not None
and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
features["decoder_input_ids"] = decoder_input_ids
return features
class SavePeftModelCallback(TrainerCallback):
def on_save(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
kwargs["model"].save_pretrained(checkpoint_folder)
pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin")
torch.save({}, pytorch_model_path)
return control
class LoadBestPeftModelCallback(TrainerCallback):
def on_train_end(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
print(f"Loading best peft model from {state.best_model_checkpoint} (score: {state.best_metric}).")
best_model_path = os.path.join(state.best_model_checkpoint, "adapter_model.bin")
adapters_weights = torch.load(best_model_path)
model = kwargs["model"]
set_peft_model_state_dict(model, adapters_weights)
return control
def get_peft_model(model, peft_config, adapter_name: str = "default"):
"""
Returns a Peft model object from a model and a config.
Args:
model ([`transformers.PreTrainedModel`]): Model to be wrapped.
peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model.
"""
model_config = getattr(model, "config", {"model_type": "custom"})
if hasattr(model_config, "to_dict"):
model_config = model_config.to_dict()
peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None)
if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning:
return PeftModel(model, peft_config, adapter_name=adapter_name)
if peft_config.is_prompt_learning:
peft_config = _prepare_prompt_learning_config(peft_config, model_config)
return PeftModelForCausalLM(model, peft_config, adapter_name=adapter_name)
def train(
# model/data params
base_model: str = "",
data_path: str = "",
output_dir: str = "",
# training hyperparams
batch_size: int = 128,
micro_batch_size: int = 8,
num_epochs: int = 1,
learning_rate: float = 3e-4,
cutoff_len: int = 4096,
val_set_size: int = 0,
lr_scheduler: str = "cosine",
warmup_steps: int = 100,
# lora hyperparams
lora_r: int = 16,
lora_alpha: int = 16,
lora_dropout: float = 0.05,
# from peft docs: ["q_proj", "k_proj", "v_proj", "o_proj", "fc_in", "fc_out", "wte", "gate_proj", "down_proj", "up_proj"]
lora_target_modules: List[str] = ["gate_proj", "down_proj", "up_proj"],
# llm hyperparams
train_on_inputs: bool = False, # if False, masks out inputs in loss
add_eos_token: bool = False,
group_by_length: bool = False, # faster, but produces an odd training loss curve
# wandb params
wandb_project: str = "",
wandb_run_name: str = "",
wandb_watch: str = "", # options: false | gradients | all
wandb_log_model: str = "", # options: false | true
resume_from_checkpoint: str = None, # either training checkpoint or final adapter
prompt_template_name: str = "alpaca",
weight_unlike: float = 0.1,
threshold: float = 1.1,
downsample: float = -1,
debug: bool = False,
):
if int(os.environ.get("LOCAL_RANK", 0)) == 0:
print(
f"Params using prompt template {prompt_template_name}\n"
f"the unlikelihood weight for the incorrect token in the incorrect response: {weight_unlike}\n"
f"the threshold to determine the unlikelihood token: {threshold}\n"
f"downssample rate for Hindsight-P: {downsample}\n"
f"base_model: {base_model}\n"
f"data_path: {data_path}\n"
f"output_dir: {output_dir}\n"
f"batch_size: {batch_size}\n"
f"micro_batch_size: {micro_batch_size}\n"
f"num_epochs: {num_epochs}\n"
f"learning_rate: {learning_rate}\n"
f"cutoff_len: {cutoff_len}\n"
f"val_set_size: {val_set_size}\n"
f"lr_scheduler: {lr_scheduler}\n"
f"warmup_steps: {warmup_steps}\n"
f"lora_r: {lora_r}\n"
f"lora_alpha: {lora_alpha}\n"
f"lora_dropout: {lora_dropout}\n"
f"lora_target_modules: {lora_target_modules}\n"
f"train_on_inputs: {train_on_inputs}\n"
f"add_eos_token: {add_eos_token}\n"
f"group_by_length: {group_by_length}\n"
f"wandb_project: {wandb_project}\n"
f"wandb_run_name: {wandb_run_name}\n"
f"wandb_watch: {wandb_watch}\n"
f"wandb_log_model: {wandb_log_model}\n"
f"resume_from_checkpoint: {resume_from_checkpoint or False}\n"
)
assert (
base_model
), "Please specify a --base_model, e.g. --base_model='huggyllama/llama-7b'"
gradient_accumulation_steps = batch_size // micro_batch_size
| prompter = Prompter(prompt_template_name) | 2 | 2023-12-22 07:32:19+00:00 | 12k |
usail-hkust/LLMTSCS | utils/oneline.py | [
{
"identifier": "DIC_AGENTS",
"path": "utils/config.py",
"snippet": "DIC_AGENTS = {\n \"Random\": RandomAgent,\n \"Fixedtime\": FixedtimeAgent,\n \"MaxPressure\": MaxPressureAgent,\n \"EfficientMaxPressure\": EfficientMaxPressureAgent,\n \"AdvancedMaxPressure\": AdvancedMaxPressureAgent,\n\n \"EfficientPressLight\": PressLightAgentOne,\n \"EfficientColight\": CoLightAgent,\n \"EfficientMPLight\": MPLightAgent,\n \"MPLight\": MPLightAgent,\n \"Colight\": CoLightAgent,\n\n \"AdvancedMPLight\": AdvancedMPLightAgent,\n \"AdvancedColight\": CoLightAgent,\n \"AdvancedDQN\": SimpleDQNAgentOne,\n \"Attend\": AttendLightAgent,\n \"ChatGPTTLCSWaitTimeForecast\": ChatGPTTLCS_Wait_Time_Forecast,\n \"ChatGPTTLCSCommonsense\": ChatGPTTLCS_Commonsense,\n \"ChatGPTTLCSCommonsenseFlowCoordination\": ChatGPTTLCS_Commonsense_Flow_Coordination,\n \"ChatGPTTLCSWaitTimeForecastCode\": ChatGPTTLCS_Wait_Time_Forecast_Code,\n \"ChatGPTTLCSCommonsenseCode\": ChatGPTTLCS_Commonsense_Code,\n \"ChatGPTTLCSCommonsenseFlowCoordinationCode\": ChatGPTTLCS_Commonsense_Flow_Coordination_Code,\n \"ChatGPTTLCSZeroKnowledge\": ChatGPTTLCS_Zero_Knowledge,\n \"ChatGPTTLCSZeroKnowledgeCode\": ChatGPTTLCS_Zero_Knowledge_Code,\n \"LLMTLCSWaitTimeForecast\": LLM_TLCS_Wait_Time_Forecast,\n \"LLMTLCSCommonsense\": LLM_TLCS_Commonsense,\n}"
},
{
"identifier": "merge",
"path": "utils/my_utils.py",
"snippet": "def merge(dic_tmp, dic_to_change):\r\ndef load_json(file):\r\ndef dump_json(data, file, indent=None):\r\ndef calculate_road_length(road_points):\r\ndef get_state(roads, env):\r\ndef get_state_detail(roads, env):\r\ndef get_state_three_segment(roads, env):\r\ndef trans_prompt_llama(message, chat_history, system_prompt):\r"
},
{
"identifier": "CityFlowEnv",
"path": "utils/cityflow_env.py",
"snippet": "class CityFlowEnv:\n\n def __init__(self, path_to_log, path_to_work_directory, dic_traffic_env_conf, dic_path):\n self.path_to_log = path_to_log\n self.path_to_work_directory = path_to_work_directory\n self.dic_traffic_env_conf = dic_traffic_env_conf\n self.dic_path = dic_path\n\n self.current_time = None\n self.id_to_index = None\n self.traffic_light_node_dict = None\n self.intersection_dict = None\n self.eng = None\n self.list_intersection = None\n self.list_inter_log = None\n self.list_lanes = None\n self.system_states = None\n self.lane_length = None\n self.waiting_vehicle_list = {}\n\n # check min action time\n if self.dic_traffic_env_conf[\"MIN_ACTION_TIME\"] <= self.dic_traffic_env_conf[\"YELLOW_TIME\"]:\n \"\"\" include the yellow time in action time \"\"\"\n print(\"MIN_ACTION_TIME should include YELLOW_TIME\")\n sys.exit()\n\n # touch new inter_{}.pkl (if exists, remove)\n for inter_ind in range(self.dic_traffic_env_conf[\"NUM_INTERSECTIONS\"]):\n path_to_log_file = os.path.join(self.path_to_log, \"inter_{0}.pkl\".format(inter_ind))\n f = open(path_to_log_file, \"wb\")\n f.close()\n\n def reset(self):\n print(\" ============= self.eng.reset() to be implemented ==========\")\n if not os.path.isdir(\"./frontend/web\"):\n os.mkdir(\"./frontend/web\")\n cityflow_config = {\n \"interval\": self.dic_traffic_env_conf[\"INTERVAL\"],\n \"seed\": int(np.random.randint(0, 100)),\n \"laneChange\": True,\n \"dir\": self.path_to_work_directory+\"/\",\n \"roadnetFile\": self.dic_traffic_env_conf[\"ROADNET_FILE\"],\n \"flowFile\": self.dic_traffic_env_conf[\"TRAFFIC_FILE\"],\n \"rlTrafficLight\": True,\n \"saveReplay\": True, # if \"GPT\" in self.dic_traffic_env_conf[\"MODEL_NAME\"] or \"llm\" in self.dic_traffic_env_conf[\"MODEL_NAME\"] else False,\n \"roadnetLogFile\": f\"../../../frontend/web/{self.dic_traffic_env_conf['ROADNET_FILE']}-{self.dic_traffic_env_conf['TRAFFIC_FILE']}-{self.dic_traffic_env_conf['MODEL_NAME']}-{len(self.dic_traffic_env_conf['PHASE'])}_Phases-roadnetLogFile.json\",\n \"replayLogFile\": f\"../../../frontend/web/{self.dic_traffic_env_conf['ROADNET_FILE']}-{self.dic_traffic_env_conf['TRAFFIC_FILE']}-{self.dic_traffic_env_conf['MODEL_NAME']}-{len(self.dic_traffic_env_conf['PHASE'])}_Phases-replayLogFile.txt\"\n }\n # print(cityflow_config)\n with open(os.path.join(self.path_to_work_directory, \"cityflow.config\"), \"w\") as json_file:\n json.dump(cityflow_config, json_file)\n\n self.eng = engine.Engine(os.path.join(self.path_to_work_directory, \"cityflow.config\"), thread_num=1)\n\n # get adjacency\n self.traffic_light_node_dict = self._adjacency_extraction()\n\n # get lane length\n _, self.lane_length = self.get_lane_length()\n\n # initialize intersections (grid)\n self.list_intersection = [Intersection((i+1, j+1), self.dic_traffic_env_conf, self.eng,\n self.traffic_light_node_dict[\"intersection_{0}_{1}\".format(i+1, j+1)],\n self.path_to_log,\n self.lane_length)\n for i in range(self.dic_traffic_env_conf[\"NUM_COL\"])\n for j in range(self.dic_traffic_env_conf[\"NUM_ROW\"])]\n self.list_inter_log = [[] for _ in range(self.dic_traffic_env_conf[\"NUM_COL\"] *\n self.dic_traffic_env_conf[\"NUM_ROW\"])]\n\n self.id_to_index = {}\n count = 0\n for i in range(self.dic_traffic_env_conf[\"NUM_COL\"]):\n for j in range(self.dic_traffic_env_conf[\"NUM_ROW\"]):\n self.id_to_index[\"intersection_{0}_{1}\".format(i+1, j+1)] = count\n count += 1\n\n self.list_lanes = []\n for inter in self.list_intersection:\n self.list_lanes += inter.list_lanes\n self.list_lanes = np.unique(self.list_lanes).tolist()\n\n # get new measurements\n self.system_states = {\"get_lane_vehicles\": self.eng.get_lane_vehicles(),\n \"get_lane_waiting_vehicle_count\": self.eng.get_lane_waiting_vehicle_count(),\n \"get_vehicle_speed\": self.eng.get_vehicle_speed(),\n \"get_vehicle_distance\": self.eng.get_vehicle_distance(),\n }\n\n for inter in self.list_intersection:\n inter.update_current_measurements(self.system_states)\n state, done = self.get_state()\n\n # create roadnet dict\n if self.intersection_dict is None:\n self.create_intersection_dict()\n\n return state\n\n\n def create_intersection_dict(self):\n roadnet = load_json(f'./{self.dic_path[\"PATH_TO_DATA\"]}/{self.dic_traffic_env_conf[\"ROADNET_FILE\"]}')\n\n intersections_raw = roadnet[\"intersections\"]\n roads_raw = roadnet[\"roads\"]\n\n agent_intersections = {}\n\n # init agent intersections\n for i, inter in enumerate(intersections_raw):\n inter_id = inter[\"id\"]\n intersection = None\n for env_inter in self.list_intersection:\n if env_inter.inter_name == inter_id:\n intersection = env_inter\n break\n\n if len(inter['roadLinks']) > 0:\n # collect yellow allowed road links\n yellow_time = None\n phases = inter['trafficLight']['lightphases']\n all_sets = []\n yellow_phase_idx = None\n for p_i, p in enumerate(phases):\n all_sets.append(set(p['availableRoadLinks']))\n if p[\"time\"] < 30:\n yellow_phase_idx = p_i\n yellow_time = p[\"time\"]\n yellow_allowed_links = reduce(lambda x, y: x & y, all_sets)\n\n # init intersection\n agent_intersections[inter_id] = {\"phases\": {\"Y\": {\"time\": yellow_time, \"idx\": yellow_phase_idx}},\n \"roads\": {}}\n\n # init roads\n roads = {}\n for r in inter[\"roads\"]:\n roads[r] = {\"location\": None, \"type\": \"incoming\", \"go_straight\": None, \"turn_left\": None,\n \"turn_right\": None, \"length\": None, \"max_speed\": None,\n \"lanes\": {\"go_straight\": [], \"turn_left\": [], \"turn_right\": []}}\n\n # collect road length speed info & init road location\n road_links = inter[\"roadLinks\"]\n for r in roads_raw:\n r_id = r[\"id\"]\n if r_id in roads:\n roads[r_id][\"length\"] = calculate_road_length(r[\"points\"])\n roads[r_id][\"max_speed\"] = r[\"lanes\"][0][\"maxSpeed\"]\n for env_road_location in intersection.dic_entering_approach_to_edge:\n if intersection.dic_entering_approach_to_edge[env_road_location] == r_id:\n roads[r_id][\"location\"] = location_dict_reverse[env_road_location]\n break\n for env_road_location in intersection.dic_exiting_approach_to_edge:\n if intersection.dic_exiting_approach_to_edge[env_road_location] == r_id:\n roads[r_id][\"location\"] = location_dict_reverse[env_road_location]\n break\n\n # collect signal phase info\n for p_idx, p in enumerate(phases):\n other_allowed_links = set(p['availableRoadLinks']) - yellow_allowed_links\n if len(other_allowed_links) > 0:\n allowed_directions = []\n for l_idx in other_allowed_links:\n link = road_links[l_idx]\n location = roads[link[\"startRoad\"]][\"location\"]\n direction = link[\"type\"]\n allowed_directions.append(f\"{location_dict[location]}{direction_dict[direction]}\")\n allowed_directions = sorted(allowed_directions)\n allowed_directions = f\"{allowed_directions[0]}{allowed_directions[1]}\"\n agent_intersections[inter_id][\"phases\"][allowed_directions] = {\"time\": p[\"time\"], \"idx\": p_idx}\n\n # collect location type direction info\n for r_link in road_links:\n start = r_link['startRoad']\n end = r_link['endRoad']\n lane_links = r_link['laneLinks']\n\n for r in roads:\n if r != start:\n continue\n # collect type\n roads[r][\"type\"] = \"outgoing\"\n\n # collect directions\n if r_link[\"type\"] == \"go_straight\":\n roads[r][\"go_straight\"] = end\n\n # collect lane info\n for l_link in lane_links:\n lane_id = l_link['startLaneIndex']\n if lane_id not in roads[r][\"lanes\"][\"go_straight\"]:\n roads[r][\"lanes\"][\"go_straight\"].append(lane_id)\n\n elif r_link[\"type\"] == \"turn_left\":\n roads[r][\"turn_left\"] = end\n\n # collect lane info\n for l_link in lane_links:\n lane_id = l_link['startLaneIndex']\n if lane_id not in roads[r][\"lanes\"][\"turn_left\"]:\n roads[r][\"lanes\"][\"turn_left\"].append(lane_id)\n\n elif r_link[\"type\"] == \"turn_right\":\n roads[r][\"turn_right\"] = end\n\n # collect lane info\n for l_link in lane_links:\n lane_id = l_link['startLaneIndex']\n if lane_id not in roads[r][\"lanes\"][\"turn_right\"]:\n roads[r][\"lanes\"][\"turn_right\"].append(lane_id)\n\n agent_intersections[inter_id][\"roads\"] = roads\n\n self.intersection_dict = agent_intersections\n\n def step(self, action):\n\n step_start_time = time.time()\n\n list_action_in_sec = [action]\n list_action_in_sec_display = [action]\n for i in range(self.dic_traffic_env_conf[\"MIN_ACTION_TIME\"]-1):\n if self.dic_traffic_env_conf[\"ACTION_PATTERN\"] == \"switch\":\n list_action_in_sec.append(np.zeros_like(action).tolist())\n elif self.dic_traffic_env_conf[\"ACTION_PATTERN\"] == \"set\":\n list_action_in_sec.append(np.copy(action).tolist())\n list_action_in_sec_display.append(np.full_like(action, fill_value=-1).tolist())\n\n average_reward_action_list = [0]*len(action)\n for i in range(self.dic_traffic_env_conf[\"MIN_ACTION_TIME\"]):\n\n action_in_sec = list_action_in_sec[i]\n action_in_sec_display = list_action_in_sec_display[i]\n\n instant_time = self.get_current_time()\n self.current_time = self.get_current_time()\n\n before_action_feature = self.get_feature()\n # state = self.get_state()\n\n if i == 0:\n print(\"time: {0}\".format(instant_time))\n \n self._inner_step(action_in_sec)\n\n # get reward\n reward = self.get_reward()\n for j in range(len(reward)):\n average_reward_action_list[j] = (average_reward_action_list[j] * i + reward[j]) / (i + 1)\n self.log(cur_time=instant_time, before_action_feature=before_action_feature, action=action_in_sec_display)\n next_state, done = self.get_state()\n\n print(\"Step time: \", time.time() - step_start_time)\n return next_state, reward, done, average_reward_action_list\n\n def _inner_step(self, action):\n # copy current measurements to previous measurements\n for inter in self.list_intersection:\n inter.update_previous_measurements()\n # set signals\n # multi_intersection decided by action {inter_id: phase}\n for inter_ind, inter in enumerate(self.list_intersection):\n inter.set_signal(\n action=action[inter_ind],\n action_pattern=self.dic_traffic_env_conf[\"ACTION_PATTERN\"],\n yellow_time=self.dic_traffic_env_conf[\"YELLOW_TIME\"],\n path_to_log=self.path_to_log\n )\n\n # run one step\n for i in range(int(1/self.dic_traffic_env_conf[\"INTERVAL\"])):\n self.eng.next_step()\n\n # update queuing vehicle info\n vehicle_ids = self.eng.get_vehicles(include_waiting=False)\n for v_id in vehicle_ids:\n v_info = self.eng.get_vehicle_info(v_id)\n speed = float(v_info[\"speed\"])\n if speed < 0.1:\n if v_id not in self.waiting_vehicle_list:\n self.waiting_vehicle_list[v_id] = {\"time\": None, \"link\": None}\n self.waiting_vehicle_list[v_id][\"time\"] = self.dic_traffic_env_conf[\"INTERVAL\"]\n self.waiting_vehicle_list[v_id][\"link\"] = v_info['drivable']\n else:\n if self.waiting_vehicle_list[v_id][\"link\"] != v_info['drivable']:\n self.waiting_vehicle_list[v_id] = {\"time\": None, \"link\": None}\n self.waiting_vehicle_list[v_id][\"time\"] = self.dic_traffic_env_conf[\"INTERVAL\"]\n self.waiting_vehicle_list[v_id][\"link\"] = v_info['drivable']\n else:\n self.waiting_vehicle_list[v_id][\"time\"] += self.dic_traffic_env_conf[\"INTERVAL\"]\n else:\n if v_id in self.waiting_vehicle_list:\n self.waiting_vehicle_list.pop(v_id)\n\n if v_id in self.waiting_vehicle_list and self.waiting_vehicle_list[v_id][\"link\"] != v_info['drivable']:\n self.waiting_vehicle_list.pop(v_id)\n\n self.system_states = {\"get_lane_vehicles\": self.eng.get_lane_vehicles(),\n \"get_lane_waiting_vehicle_count\": self.eng.get_lane_waiting_vehicle_count(),\n \"get_vehicle_speed\": self.eng.get_vehicle_speed(),\n \"get_vehicle_distance\": self.eng.get_vehicle_distance()\n }\n\n for inter in self.list_intersection:\n inter.update_current_measurements(self.system_states)\n\n def get_feature(self):\n list_feature = [inter.get_feature() for inter in self.list_intersection]\n return list_feature\n\n def get_state(self, list_state_feature=None):\n if list_state_feature is not None:\n list_state = [inter.get_state(list_state_feature) for inter in self.list_intersection]\n done = False\n else:\n list_state = [inter.get_state(self.dic_traffic_env_conf[\"LIST_STATE_FEATURE\"]) for inter in self.list_intersection]\n done = False\n return list_state, done\n\n def get_reward(self):\n list_reward = [inter.get_reward(self.dic_traffic_env_conf[\"DIC_REWARD_INFO\"]) for inter in self.list_intersection]\n return list_reward\n\n def get_current_time(self):\n return self.eng.get_current_time()\n\n def log(self, cur_time, before_action_feature, action):\n\n for inter_ind in range(len(self.list_intersection)):\n self.list_inter_log[inter_ind].append({\"time\": cur_time,\n \"state\": before_action_feature[inter_ind],\n \"action\": action[inter_ind]})\n\n def batch_log_2(self):\n \"\"\"\n Used for model test, only log the vehicle_inter_.csv\n \"\"\"\n for inter_ind in range(self.dic_traffic_env_conf[\"NUM_INTERSECTIONS\"]):\n # changed from origin\n if int(inter_ind) % 100 == 0:\n print(\"Batch log for inter \", inter_ind)\n path_to_log_file = os.path.join(self.path_to_log, \"vehicle_inter_{0}.csv\".format(inter_ind))\n dic_vehicle = self.list_intersection[inter_ind].get_dic_vehicle_arrive_leave_time()\n df = pd.DataFrame.from_dict(dic_vehicle, orient=\"index\")\n df.to_csv(path_to_log_file, na_rep=\"nan\")\n\n def batch_log(self, start, stop):\n for inter_ind in range(start, stop):\n # changed from origin\n if int(inter_ind) % 100 == 0:\n print(\"Batch log for inter \", inter_ind)\n path_to_log_file = os.path.join(self.path_to_log, \"vehicle_inter_{0}.csv\".format(inter_ind))\n dic_vehicle = self.list_intersection[inter_ind].get_dic_vehicle_arrive_leave_time()\n df = pd.DataFrame.from_dict(dic_vehicle, orient=\"index\")\n df.to_csv(path_to_log_file, na_rep=\"nan\")\n \n path_to_log_file = os.path.join(self.path_to_log, \"inter_{0}.pkl\".format(inter_ind))\n f = open(path_to_log_file, \"wb\")\n pickle.dump(self.list_inter_log[inter_ind], f)\n f.close()\n\n def bulk_log_multi_process(self, batch_size=100):\n assert len(self.list_intersection) == len(self.list_inter_log)\n if batch_size > len(self.list_intersection):\n batch_size_run = len(self.list_intersection)\n else:\n batch_size_run = batch_size\n process_list = []\n for batch in range(0, len(self.list_intersection), batch_size_run):\n start = batch\n stop = min(batch + batch_size, len(self.list_intersection))\n p = Process(target=self.batch_log, args=(start, stop))\n print(\"before\")\n p.start()\n print(\"end\")\n process_list.append(p)\n print(\"before join\")\n\n for t in process_list:\n t.join()\n print(\"end join\")\n\n def _adjacency_extraction(self):\n traffic_light_node_dict = {}\n file = os.path.join(self.path_to_work_directory, self.dic_traffic_env_conf[\"ROADNET_FILE\"])\n with open(\"{0}\".format(file)) as json_data:\n net = json.load(json_data)\n for inter in net[\"intersections\"]:\n if not inter[\"virtual\"]:\n traffic_light_node_dict[inter[\"id\"]] = {\"location\": {\"x\": float(inter[\"point\"][\"x\"]),\n \"y\": float(inter[\"point\"][\"y\"])},\n \"total_inter_num\": None, \"adjacency_row\": None,\n \"inter_id_to_index\": None,\n \"neighbor_ENWS\": None}\n\n top_k = self.dic_traffic_env_conf[\"TOP_K_ADJACENCY\"]\n total_inter_num = len(traffic_light_node_dict.keys())\n inter_id_to_index = {}\n\n edge_id_dict = {}\n for road in net[\"roads\"]:\n if road[\"id\"] not in edge_id_dict.keys():\n edge_id_dict[road[\"id\"]] = {}\n edge_id_dict[road[\"id\"]][\"from\"] = road[\"startIntersection\"]\n edge_id_dict[road[\"id\"]][\"to\"] = road[\"endIntersection\"]\n\n index = 0\n for i in traffic_light_node_dict.keys():\n inter_id_to_index[i] = index\n index += 1\n\n for i in traffic_light_node_dict.keys():\n location_1 = traffic_light_node_dict[i][\"location\"]\n\n row = np.array([0]*total_inter_num)\n # row = np.zeros((self.dic_traffic_env_conf[\"NUM_ROW\"],self.dic_traffic_env_conf[\"NUM_col\"]))\n for j in traffic_light_node_dict.keys():\n location_2 = traffic_light_node_dict[j][\"location\"]\n dist = self._cal_distance(location_1, location_2)\n row[inter_id_to_index[j]] = dist\n if len(row) == top_k:\n adjacency_row_unsorted = np.argpartition(row, -1)[:top_k].tolist()\n elif len(row) > top_k:\n adjacency_row_unsorted = np.argpartition(row, top_k)[:top_k].tolist()\n else:\n adjacency_row_unsorted = [k for k in range(total_inter_num)]\n adjacency_row_unsorted.remove(inter_id_to_index[i])\n traffic_light_node_dict[i][\"adjacency_row\"] = [inter_id_to_index[i]]+adjacency_row_unsorted\n traffic_light_node_dict[i][\"total_inter_num\"] = total_inter_num\n\n for i in traffic_light_node_dict.keys():\n traffic_light_node_dict[i][\"total_inter_num\"] = inter_id_to_index\n traffic_light_node_dict[i][\"neighbor_ENWS\"] = []\n for j in range(4):\n road_id = i.replace(\"intersection\", \"road\")+\"_\"+str(j)\n if edge_id_dict[road_id][\"to\"] not in traffic_light_node_dict.keys():\n traffic_light_node_dict[i][\"neighbor_ENWS\"].append(None)\n else:\n traffic_light_node_dict[i][\"neighbor_ENWS\"].append(edge_id_dict[road_id][\"to\"])\n\n return traffic_light_node_dict\n\n @staticmethod\n def _cal_distance(loc_dict1, loc_dict2):\n a = np.array((loc_dict1[\"x\"], loc_dict1[\"y\"]))\n b = np.array((loc_dict2[\"x\"], loc_dict2[\"y\"]))\n return np.sqrt(np.sum((a-b)**2))\n\n @staticmethod\n def end_cityflow():\n print(\"============== cityflow process end ===============\")\n\n def get_lane_length(self):\n \"\"\"\n newly added part for get lane length\n Read the road net file\n Return: dict{lanes} normalized with the min lane length\n \"\"\"\n file = os.path.join(self.path_to_work_directory, self.dic_traffic_env_conf[\"ROADNET_FILE\"])\n with open(file) as json_data:\n net = json.load(json_data)\n roads = net['roads']\n lanes_length_dict = {}\n lane_normalize_factor = {}\n\n for road in roads:\n points = road[\"points\"]\n road_length = abs(points[0]['x'] + points[0]['y'] - points[1]['x'] - points[1]['y'])\n for i in range(3):\n lane_id = road['id'] + \"_{0}\".format(i)\n lanes_length_dict[lane_id] = road_length\n min_length = min(lanes_length_dict.values())\n\n for key, value in lanes_length_dict.items():\n lane_normalize_factor[key] = value / min_length\n return lane_normalize_factor, lanes_length_dict"
},
{
"identifier": "path_check",
"path": "utils/pipeline.py",
"snippet": "def path_check(dic_path):\n if os.path.exists(dic_path[\"PATH_TO_WORK_DIRECTORY\"]):\n if dic_path[\"PATH_TO_WORK_DIRECTORY\"] != \"records/default\":\n raise FileExistsError\n else:\n pass\n else:\n os.makedirs(dic_path[\"PATH_TO_WORK_DIRECTORY\"])\n if os.path.exists(dic_path[\"PATH_TO_MODEL\"]):\n if dic_path[\"PATH_TO_MODEL\"] != \"model/default\":\n raise FileExistsError\n else:\n pass\n else:\n os.makedirs(dic_path[\"PATH_TO_MODEL\"])"
},
{
"identifier": "copy_cityflow_file",
"path": "utils/pipeline.py",
"snippet": "def copy_cityflow_file(dic_path, dic_traffic_env_conf, path=None):\n if path is None:\n path = dic_path[\"PATH_TO_WORK_DIRECTORY\"]\n shutil.copy(os.path.join(dic_path[\"PATH_TO_DATA\"], dic_traffic_env_conf[\"TRAFFIC_FILE\"]),\n os.path.join(path, dic_traffic_env_conf[\"TRAFFIC_FILE\"]))\n shutil.copy(os.path.join(dic_path[\"PATH_TO_DATA\"], dic_traffic_env_conf[\"ROADNET_FILE\"]),\n os.path.join(path, dic_traffic_env_conf[\"ROADNET_FILE\"]))"
},
{
"identifier": "copy_conf_file",
"path": "utils/pipeline.py",
"snippet": "def copy_conf_file(dic_path, dic_agent_conf, dic_traffic_env_conf, path=None):\n if path is None:\n path = dic_path[\"PATH_TO_WORK_DIRECTORY\"]\n json.dump(dic_agent_conf, open(os.path.join(path, \"agent.conf\"), \"w\"), indent=4)\n json.dump(dic_traffic_env_conf, open(os.path.join(path, \"traffic_env.conf\"), \"w\"), indent=4)"
}
] | from .config import DIC_AGENTS
from .my_utils import merge, get_state, get_state_detail, eight_phase_list, dump_json
from copy import deepcopy
from .cityflow_env import CityFlowEnv
from .pipeline import path_check, copy_cityflow_file, copy_conf_file
from tqdm import tqdm
import os
import time
import numpy as np
import wandb
import threading | 8,027 | print("end reset")
current_time = self.env.get_current_time() # in seconds
all_config = merge(merge(self.dic_agent_conf, self.dic_path), self.dic_traffic_env_conf)
logger = wandb.init(
project=self.dic_traffic_env_conf['PROJECT_NAME'],
group=f"{self.dic_traffic_env_conf['MODEL_NAME']}-{self.roadnet}-{self.trafficflow}-{len(self.dic_traffic_env_conf['PHASE'])}_Phases",
name=f"round_{round}",
config=all_config,
)
start_time = time.time()
state_action_log = [[] for _ in range(len(state))]
while not done and current_time < total_run_cnt:
action_list = []
threads = []
for i in range(len(state)):
# log statistic state
intersection = self.env.intersection_dict[self.env.list_intersection[i].inter_name]
roads = deepcopy(intersection["roads"])
statistic_state, statistic_state_incoming, mean_speed = get_state_detail(roads, self.env)
state_action_log[i].append({"state": statistic_state, "state_incoming": statistic_state_incoming, "approaching_speed": mean_speed})
one_state = state[i]
count = step_num
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"] or "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
thread = threading.Thread(target=self.agents[i].choose_action, args=(self.env,))
threads.append(thread)
else:
action = self.agents[i].choose_action(count, one_state)
action_list.append(action)
# multi-thread
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"]:
for thread in threads:
thread.start()
for thread in tqdm(threads):
thread.join()
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
# multi-thread
if "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
started_thread_id = []
thread_num = self.dic_traffic_env_conf["LLM_API_THREAD_NUM"] if not self.dic_agent_conf["WITH_EXTERNAL_API"] else 2
for i, thread in enumerate(tqdm(threads)):
thread.start()
started_thread_id.append(i)
if (i + 1) % thread_num == 0:
for t_id in started_thread_id:
threads[t_id].join()
started_thread_id = []
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
next_state, reward, done, _ = self.env.step(action_list)
# log action
for i in range(len(state)):
state_action_log[i][-1]["action"] = eight_phase_list[action_list[i]]
f_memory = open(file_name_memory, "a")
# output to std out and file
memory_str = 'time = {0}\taction = {1}\tcurrent_phase = {2}\treward = {3}'.\
format(current_time, str(action_list), str([state[i]["cur_phase"][0] for i in range(len(state))]),
str(reward),)
f_memory.write(memory_str + "\n")
f_memory.close()
current_time = self.env.get_current_time() # in seconds
state = next_state
step_num += 1
# calculate logger results
total_reward += sum(reward)
queue_length_inter = []
for inter in self.env.list_intersection:
queue_length_inter.append(sum(inter.dic_feature['lane_num_waiting_vehicle_in']))
queue_length_episode.append(sum(queue_length_inter))
# waiting time
waiting_times = []
for veh in self.env.waiting_vehicle_list:
waiting_times.append(self.env.waiting_vehicle_list[veh]['time'])
waiting_time_episode.append(np.mean(waiting_times) if len(waiting_times) > 0 else 0.0)
# wandb logger
vehicle_travel_times = {}
for inter in self.env.list_intersection:
arrive_left_times = inter.dic_vehicle_arrive_leave_time
for veh in arrive_left_times:
if "shadow" in veh:
continue
enter_time = arrive_left_times[veh]["enter_time"]
leave_time = arrive_left_times[veh]["leave_time"]
if not np.isnan(enter_time):
leave_time = leave_time if not np.isnan(leave_time) else self.dic_traffic_env_conf["RUN_COUNTS"]
if veh not in vehicle_travel_times:
vehicle_travel_times[veh] = [leave_time - enter_time]
else:
vehicle_travel_times[veh].append(leave_time - enter_time)
total_travel_time = np.mean([sum(vehicle_travel_times[veh]) for veh in vehicle_travel_times])
results = {
"test_reward_over": total_reward,
"test_avg_queue_len_over": np.mean(queue_length_episode) if len(queue_length_episode) > 0 else 0,
"test_queuing_vehicle_num_over": np.sum(queue_length_episode) if len(queue_length_episode) > 0 else 0,
"test_avg_waiting_time_over": np.mean(waiting_time_episode) if len(queue_length_episode) > 0 else 0,
"test_avg_travel_time_over": total_travel_time}
logger.log(results)
print(results)
f_state_action = os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "state_action.json")
|
class OneLine:
def __init__(self, dic_agent_conf, dic_traffic_env_conf, dic_path, roadnet, trafficflow):
self.dic_agent_conf = dic_agent_conf
self.dic_traffic_env_conf = dic_traffic_env_conf
self.dic_path = dic_path
self.agents = []
self.env = None
self.roadnet = roadnet
self.trafficflow = trafficflow
self.models = []
self.initialize()
def initialize(self):
path_check(self.dic_path)
copy_conf_file(self.dic_path, self.dic_agent_conf, self.dic_traffic_env_conf)
copy_cityflow_file(self.dic_path, self.dic_traffic_env_conf)
self.env = CityFlowEnv(
path_to_log=self.dic_path["PATH_TO_WORK_DIRECTORY"],
path_to_work_directory=self.dic_path["PATH_TO_WORK_DIRECTORY"],
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path
)
self.env.reset()
agent_name = self.dic_traffic_env_conf["MODEL_NAME"]
for i in range(self.dic_traffic_env_conf['NUM_INTERSECTIONS']):
if "ChatGPT" in agent_name:
agent = DIC_AGENTS[agent_name.split("-")[0]](
GPT_version=self.dic_agent_conf["GPT_VERSION"],
intersection=self.env.intersection_dict[self.env.list_intersection[i].inter_name],
inter_name=self.env.list_intersection[i].inter_name,
phase_num=len(self.env.list_intersection[i].list_phases),
log_dir=self.dic_agent_conf["LOG_DIR"],
dataset=f"{self.roadnet}-{self.trafficflow}"
)
elif "open_llm" in agent_name:
agent = DIC_AGENTS[agent_name.split("-")[0]](
ex_api=self.dic_agent_conf["WITH_EXTERNAL_API"],
model=agent_name.split("-")[1],
intersection=self.env.intersection_dict[self.env.list_intersection[i].inter_name],
inter_name=self.env.list_intersection[i].inter_name,
phase_num=len(self.env.list_intersection[i].list_phases),
log_dir=self.dic_agent_conf["LOG_DIR"],
dataset=f"{self.roadnet}-{self.trafficflow}"
)
else:
agent = DIC_AGENTS[agent_name](
dic_agent_conf=self.dic_agent_conf,
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path,
cnt_round=0,
intersection_id=str(i)
)
self.agents.append(agent)
def train(self, round):
print("================ start train ================")
total_run_cnt = self.dic_traffic_env_conf["RUN_COUNTS"]
# initialize output streams
file_name_memory = os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "memories.txt")
done = False
state = self.env.reset()
total_reward = 0.0
queue_length_episode = []
waiting_time_episode = []
step_num = 0
print("end reset")
current_time = self.env.get_current_time() # in seconds
all_config = merge(merge(self.dic_agent_conf, self.dic_path), self.dic_traffic_env_conf)
logger = wandb.init(
project=self.dic_traffic_env_conf['PROJECT_NAME'],
group=f"{self.dic_traffic_env_conf['MODEL_NAME']}-{self.roadnet}-{self.trafficflow}-{len(self.dic_traffic_env_conf['PHASE'])}_Phases",
name=f"round_{round}",
config=all_config,
)
start_time = time.time()
state_action_log = [[] for _ in range(len(state))]
while not done and current_time < total_run_cnt:
action_list = []
threads = []
for i in range(len(state)):
# log statistic state
intersection = self.env.intersection_dict[self.env.list_intersection[i].inter_name]
roads = deepcopy(intersection["roads"])
statistic_state, statistic_state_incoming, mean_speed = get_state_detail(roads, self.env)
state_action_log[i].append({"state": statistic_state, "state_incoming": statistic_state_incoming, "approaching_speed": mean_speed})
one_state = state[i]
count = step_num
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"] or "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
thread = threading.Thread(target=self.agents[i].choose_action, args=(self.env,))
threads.append(thread)
else:
action = self.agents[i].choose_action(count, one_state)
action_list.append(action)
# multi-thread
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"]:
for thread in threads:
thread.start()
for thread in tqdm(threads):
thread.join()
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
# multi-thread
if "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
started_thread_id = []
thread_num = self.dic_traffic_env_conf["LLM_API_THREAD_NUM"] if not self.dic_agent_conf["WITH_EXTERNAL_API"] else 2
for i, thread in enumerate(tqdm(threads)):
thread.start()
started_thread_id.append(i)
if (i + 1) % thread_num == 0:
for t_id in started_thread_id:
threads[t_id].join()
started_thread_id = []
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
next_state, reward, done, _ = self.env.step(action_list)
# log action
for i in range(len(state)):
state_action_log[i][-1]["action"] = eight_phase_list[action_list[i]]
f_memory = open(file_name_memory, "a")
# output to std out and file
memory_str = 'time = {0}\taction = {1}\tcurrent_phase = {2}\treward = {3}'.\
format(current_time, str(action_list), str([state[i]["cur_phase"][0] for i in range(len(state))]),
str(reward),)
f_memory.write(memory_str + "\n")
f_memory.close()
current_time = self.env.get_current_time() # in seconds
state = next_state
step_num += 1
# calculate logger results
total_reward += sum(reward)
queue_length_inter = []
for inter in self.env.list_intersection:
queue_length_inter.append(sum(inter.dic_feature['lane_num_waiting_vehicle_in']))
queue_length_episode.append(sum(queue_length_inter))
# waiting time
waiting_times = []
for veh in self.env.waiting_vehicle_list:
waiting_times.append(self.env.waiting_vehicle_list[veh]['time'])
waiting_time_episode.append(np.mean(waiting_times) if len(waiting_times) > 0 else 0.0)
# wandb logger
vehicle_travel_times = {}
for inter in self.env.list_intersection:
arrive_left_times = inter.dic_vehicle_arrive_leave_time
for veh in arrive_left_times:
if "shadow" in veh:
continue
enter_time = arrive_left_times[veh]["enter_time"]
leave_time = arrive_left_times[veh]["leave_time"]
if not np.isnan(enter_time):
leave_time = leave_time if not np.isnan(leave_time) else self.dic_traffic_env_conf["RUN_COUNTS"]
if veh not in vehicle_travel_times:
vehicle_travel_times[veh] = [leave_time - enter_time]
else:
vehicle_travel_times[veh].append(leave_time - enter_time)
total_travel_time = np.mean([sum(vehicle_travel_times[veh]) for veh in vehicle_travel_times])
results = {
"test_reward_over": total_reward,
"test_avg_queue_len_over": np.mean(queue_length_episode) if len(queue_length_episode) > 0 else 0,
"test_queuing_vehicle_num_over": np.sum(queue_length_episode) if len(queue_length_episode) > 0 else 0,
"test_avg_waiting_time_over": np.mean(waiting_time_episode) if len(queue_length_episode) > 0 else 0,
"test_avg_travel_time_over": total_travel_time}
logger.log(results)
print(results)
f_state_action = os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "state_action.json") | dump_json(state_action_log, f_state_action) | 1 | 2023-12-26 08:31:47+00:00 | 12k |
KyanChen/TTP | tests/test_datasets/test_transform.py | [
{
"identifier": "LoadBiomedicalData",
"path": "mmseg/datasets/transforms/loading.py",
"snippet": "class LoadBiomedicalData(BaseTransform):\n \"\"\"Load an biomedical image and annotation from file.\n\n The loading data format is as the following:\n\n .. code-block:: python\n\n {\n 'img': np.ndarray data[:-1, X, Y, Z]\n 'seg_map': np.ndarray data[-1, X, Y, Z]\n }\n\n\n Required Keys:\n\n - img_path\n\n Added Keys:\n\n - img (np.ndarray): Biomedical image with shape (N, Z, Y, X) by default,\n N is the number of modalities.\n - gt_seg_map (np.ndarray, optional): Biomedical seg map with shape\n (Z, Y, X) by default.\n - img_shape\n - ori_shape\n\n Args:\n with_seg (bool): Whether to parse and load the semantic segmentation\n annotation. Defaults to False.\n decode_backend (str): The data decoding backend type. Options are\n 'numpy'and 'nifti', and there is a convention that when backend is\n 'nifti' the axis of data loaded is XYZ, and when backend is\n 'numpy', the the axis is ZYX. The data will be transposed if the\n backend is 'nifti'. Defaults to 'nifti'.\n to_xyz (bool): Whether transpose data from Z, Y, X to X, Y, Z.\n Defaults to False.\n backend_args (dict, Optional): Arguments to instantiate a file backend.\n See https://mmengine.readthedocs.io/en/latest/api/fileio.htm\n for details. Defaults to None.\n Notes: mmcv>=2.0.0rc4, mmengine>=0.2.0 required.\n \"\"\"\n\n def __init__(self,\n with_seg=False,\n decode_backend: str = 'numpy',\n to_xyz: bool = False,\n backend_args: Optional[dict] = None) -> None: # noqa\n self.with_seg = with_seg\n self.decode_backend = decode_backend\n self.to_xyz = to_xyz\n self.backend_args = backend_args.copy() if backend_args else None\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"Functions to load image.\n\n Args:\n results (dict): Result dict from :obj:``mmcv.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded image and meta information.\n \"\"\"\n data_bytes = fileio.get(results['img_path'], self.backend_args)\n data = datafrombytes(data_bytes, backend=self.decode_backend)\n # img is 4D data (N, X, Y, Z), N is the number of protocol\n img = data[:-1, :]\n\n if self.decode_backend == 'nifti':\n img = img.transpose(0, 3, 2, 1)\n\n if self.to_xyz:\n img = img.transpose(0, 3, 2, 1)\n\n results['img'] = img\n results['img_shape'] = img.shape[1:]\n results['ori_shape'] = img.shape[1:]\n\n if self.with_seg:\n gt_seg_map = data[-1, :]\n if self.decode_backend == 'nifti':\n gt_seg_map = gt_seg_map.transpose(2, 1, 0)\n\n if self.to_xyz:\n gt_seg_map = gt_seg_map.transpose(2, 1, 0)\n results['gt_seg_map'] = gt_seg_map\n return results\n\n def __repr__(self) -> str:\n repr_str = (f'{self.__class__.__name__}('\n f'with_seg={self.with_seg}, '\n f\"decode_backend='{self.decode_backend}', \"\n f'to_xyz={self.to_xyz}, '\n f'backend_args={self.backend_args})')\n return repr_str"
},
{
"identifier": "LoadBiomedicalImageFromFile",
"path": "mmseg/datasets/transforms/loading.py",
"snippet": "class LoadBiomedicalImageFromFile(BaseTransform):\n \"\"\"Load an biomedical mage from file.\n\n Required Keys:\n\n - img_path\n\n Added Keys:\n\n - img (np.ndarray): Biomedical image with shape (N, Z, Y, X) by default,\n N is the number of modalities, and data type is float32\n if set to_float32 = True, or float64 if decode_backend is 'nifti' and\n to_float32 is False.\n - img_shape\n - ori_shape\n\n Args:\n decode_backend (str): The data decoding backend type. Options are\n 'numpy'and 'nifti', and there is a convention that when backend is\n 'nifti' the axis of data loaded is XYZ, and when backend is\n 'numpy', the the axis is ZYX. The data will be transposed if the\n backend is 'nifti'. Defaults to 'nifti'.\n to_xyz (bool): Whether transpose data from Z, Y, X to X, Y, Z.\n Defaults to False.\n to_float32 (bool): Whether to convert the loaded image to a float32\n numpy array. If set to False, the loaded image is an float64 array.\n Defaults to True.\n backend_args (dict, Optional): Arguments to instantiate a file backend.\n See https://mmengine.readthedocs.io/en/latest/api/fileio.htm\n for details. Defaults to None.\n Notes: mmcv>=2.0.0rc4, mmengine>=0.2.0 required.\n \"\"\"\n\n def __init__(self,\n decode_backend: str = 'nifti',\n to_xyz: bool = False,\n to_float32: bool = True,\n backend_args: Optional[dict] = None) -> None:\n self.decode_backend = decode_backend\n self.to_xyz = to_xyz\n self.to_float32 = to_float32\n self.backend_args = backend_args.copy() if backend_args else None\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"Functions to load image.\n\n Args:\n results (dict): Result dict from :obj:``mmcv.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded image and meta information.\n \"\"\"\n\n filename = results['img_path']\n\n data_bytes = fileio.get(filename, self.backend_args)\n img = datafrombytes(data_bytes, backend=self.decode_backend)\n\n if self.to_float32:\n img = img.astype(np.float32)\n\n if len(img.shape) == 3:\n img = img[None, ...]\n\n if self.decode_backend == 'nifti':\n img = img.transpose(0, 3, 2, 1)\n\n if self.to_xyz:\n img = img.transpose(0, 3, 2, 1)\n\n results['img'] = img\n results['img_shape'] = img.shape[1:]\n results['ori_shape'] = img.shape[1:]\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}('\n f\"decode_backend='{self.decode_backend}', \"\n f'to_xyz={self.to_xyz}, '\n f'to_float32={self.to_float32}, '\n f'backend_args={self.backend_args})')\n return repr_str"
},
{
"identifier": "PhotoMetricDistortion",
"path": "mmseg/datasets/transforms/transforms.py",
"snippet": "class PhotoMetricDistortion(BaseTransform):\n \"\"\"Apply photometric distortion to image sequentially, every transformation\n is applied with a probability of 0.5. The position of random contrast is in\n second or second to last.\n\n 1. random brightness\n 2. random contrast (mode 0)\n 3. convert color from BGR to HSV\n 4. random saturation\n 5. random hue\n 6. convert color from HSV to BGR\n 7. random contrast (mode 1)\n\n Required Keys:\n\n - img\n\n Modified Keys:\n\n - img\n\n Args:\n brightness_delta (int): delta of brightness.\n contrast_range (tuple): range of contrast.\n saturation_range (tuple): range of saturation.\n hue_delta (int): delta of hue.\n \"\"\"\n\n def __init__(self,\n brightness_delta: int = 32,\n contrast_range: Sequence[float] = (0.5, 1.5),\n saturation_range: Sequence[float] = (0.5, 1.5),\n hue_delta: int = 18):\n self.brightness_delta = brightness_delta\n self.contrast_lower, self.contrast_upper = contrast_range\n self.saturation_lower, self.saturation_upper = saturation_range\n self.hue_delta = hue_delta\n\n def convert(self,\n img: np.ndarray,\n alpha: int = 1,\n beta: int = 0) -> np.ndarray:\n \"\"\"Multiple with alpha and add beat with clip.\n\n Args:\n img (np.ndarray): The input image.\n alpha (int): Image weights, change the contrast/saturation\n of the image. Default: 1\n beta (int): Image bias, change the brightness of the\n image. Default: 0\n\n Returns:\n np.ndarray: The transformed image.\n \"\"\"\n\n img = img.astype(np.float32) * alpha + beta\n img = np.clip(img, 0, 255)\n return img.astype(np.uint8)\n\n def brightness(self, img: np.ndarray) -> np.ndarray:\n \"\"\"Brightness distortion.\n\n Args:\n img (np.ndarray): The input image.\n Returns:\n np.ndarray: Image after brightness change.\n \"\"\"\n\n if random.randint(2):\n return self.convert(\n img,\n beta=random.uniform(-self.brightness_delta,\n self.brightness_delta))\n return img\n\n def contrast(self, img: np.ndarray) -> np.ndarray:\n \"\"\"Contrast distortion.\n\n Args:\n img (np.ndarray): The input image.\n Returns:\n np.ndarray: Image after contrast change.\n \"\"\"\n\n if random.randint(2):\n return self.convert(\n img,\n alpha=random.uniform(self.contrast_lower, self.contrast_upper))\n return img\n\n def saturation(self, img: np.ndarray) -> np.ndarray:\n \"\"\"Saturation distortion.\n\n Args:\n img (np.ndarray): The input image.\n Returns:\n np.ndarray: Image after saturation change.\n \"\"\"\n\n if random.randint(2):\n img = mmcv.bgr2hsv(img)\n img[:, :, 1] = self.convert(\n img[:, :, 1],\n alpha=random.uniform(self.saturation_lower,\n self.saturation_upper))\n img = mmcv.hsv2bgr(img)\n return img\n\n def hue(self, img: np.ndarray) -> np.ndarray:\n \"\"\"Hue distortion.\n\n Args:\n img (np.ndarray): The input image.\n Returns:\n np.ndarray: Image after hue change.\n \"\"\"\n\n if random.randint(2):\n img = mmcv.bgr2hsv(img)\n img[:, :,\n 0] = (img[:, :, 0].astype(int) +\n random.randint(-self.hue_delta, self.hue_delta)) % 180\n img = mmcv.hsv2bgr(img)\n return img\n\n def transform(self, results: dict) -> dict:\n \"\"\"Transform function to perform photometric distortion on images.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Result dict with images distorted.\n \"\"\"\n\n img = results['img']\n # random brightness\n img = self.brightness(img)\n\n # mode == 0 --> do random contrast first\n # mode == 1 --> do random contrast last\n mode = random.randint(2)\n if mode == 1:\n img = self.contrast(img)\n\n # random saturation\n img = self.saturation(img)\n\n # random hue\n img = self.hue(img)\n\n # random contrast\n if mode == 0:\n img = self.contrast(img)\n\n results['img'] = img\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += (f'(brightness_delta={self.brightness_delta}, '\n f'contrast_range=({self.contrast_lower}, '\n f'{self.contrast_upper}), '\n f'saturation_range=({self.saturation_lower}, '\n f'{self.saturation_upper}), '\n f'hue_delta={self.hue_delta})')\n return repr_str"
},
{
"identifier": "RandomCrop",
"path": "mmseg/datasets/transforms/transforms.py",
"snippet": "class RandomCrop(BaseTransform):\n \"\"\"Random crop the image & seg.\n\n Required Keys:\n\n - img\n - gt_seg_map\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_seg_map\n\n\n Args:\n crop_size (Union[int, Tuple[int, int]]): Expected size after cropping\n with the format of (h, w). If set to an integer, then cropping\n width and height are equal to this integer.\n cat_max_ratio (float): The maximum ratio that single category could\n occupy.\n ignore_index (int): The label index to be ignored. Default: 255\n \"\"\"\n\n def __init__(self,\n crop_size: Union[int, Tuple[int, int]],\n cat_max_ratio: float = 1.,\n ignore_index: int = 255):\n super().__init__()\n assert isinstance(crop_size, int) or (\n isinstance(crop_size, tuple) and len(crop_size) == 2\n ), 'The expected crop_size is an integer, or a tuple containing two '\n 'intergers'\n\n if isinstance(crop_size, int):\n crop_size = (crop_size, crop_size)\n assert crop_size[0] > 0 and crop_size[1] > 0\n self.crop_size = crop_size\n self.cat_max_ratio = cat_max_ratio\n self.ignore_index = ignore_index\n\n @cache_randomness\n def crop_bbox(self, results: dict) -> tuple:\n \"\"\"get a crop bounding box.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n tuple: Coordinates of the cropped image.\n \"\"\"\n\n def generate_crop_bbox(img: np.ndarray) -> tuple:\n \"\"\"Randomly get a crop bounding box.\n\n Args:\n img (np.ndarray): Original input image.\n\n Returns:\n tuple: Coordinates of the cropped image.\n \"\"\"\n\n margin_h = max(img.shape[0] - self.crop_size[0], 0)\n margin_w = max(img.shape[1] - self.crop_size[1], 0)\n offset_h = np.random.randint(0, margin_h + 1)\n offset_w = np.random.randint(0, margin_w + 1)\n crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]\n crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]\n\n return crop_y1, crop_y2, crop_x1, crop_x2\n\n img = results['img']\n crop_bbox = generate_crop_bbox(img)\n if self.cat_max_ratio < 1.:\n # Repeat 10 times\n for _ in range(10):\n seg_temp = self.crop(results['gt_seg_map'], crop_bbox)\n labels, cnt = np.unique(seg_temp, return_counts=True)\n cnt = cnt[labels != self.ignore_index]\n if len(cnt) > 1 and np.max(cnt) / np.sum(\n cnt) < self.cat_max_ratio:\n break\n crop_bbox = generate_crop_bbox(img)\n\n return crop_bbox\n\n def crop(self, img: np.ndarray, crop_bbox: tuple) -> np.ndarray:\n \"\"\"Crop from ``img``\n\n Args:\n img (np.ndarray): Original input image.\n crop_bbox (tuple): Coordinates of the cropped image.\n\n Returns:\n np.ndarray: The cropped image.\n \"\"\"\n\n crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox\n img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]\n return img\n\n def transform(self, results: dict) -> dict:\n \"\"\"Transform function to randomly crop images, semantic segmentation\n maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Randomly cropped results, 'img_shape' key in result dict is\n updated according to crop size.\n \"\"\"\n\n img = results['img']\n crop_bbox = self.crop_bbox(results)\n\n # crop the image\n img = self.crop(img, crop_bbox)\n\n # crop semantic seg\n for key in results.get('seg_fields', []):\n results[key] = self.crop(results[key], crop_bbox)\n\n results['img'] = img\n results['img_shape'] = img.shape[:2]\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(crop_size={self.crop_size})'"
},
{
"identifier": "RandomDepthMix",
"path": "mmseg/datasets/transforms/transforms.py",
"snippet": "class RandomDepthMix(BaseTransform):\n \"\"\"This class implements the RandomDepthMix transform.\n\n Args:\n prob (float): Probability of applying the transformation.\n Defaults to 0.25.\n mix_scale_ratio (float): Ratio to scale the mix width.\n Defaults to 0.75.\n \"\"\"\n\n def __init__(\n self,\n prob: float = 0.25,\n mix_scale_ratio: float = 0.75,\n ):\n super().__init__()\n\n self.prob = prob\n self.mix_scale_ratio = mix_scale_ratio\n\n def transform(self, results: dict) -> dict:\n if random.random() > self.prob:\n return results\n\n h, w = results['img_shape'][:2]\n left = int(w * random.random())\n width_ratio = self.mix_scale_ratio * random.random()\n width = int(max(1, (w - left) * width_ratio))\n\n img = results['img']\n depth_rescale_factor = results.get('depth_rescale_factor', 1)\n depth_map = results['gt_depth_map'] / depth_rescale_factor\n\n if img.ndim == 3:\n for c in range(img.shape[-1]):\n img[:, left:left + width, c] = depth_map[:, left:left + width]\n elif img.ndim == 2:\n img[:, left:left + width] = depth_map[:, left:left + width]\n else:\n raise ValueError(f'Invalid image shape ({img.shape})')\n\n results['img'] = img\n return results"
},
{
"identifier": "TRANSFORMS",
"path": "mmseg/registry/registry.py",
"snippet": "TRANSFORMS = Registry(\n 'transform',\n parent=MMENGINE_TRANSFORMS,\n locations=['mmseg.datasets.transforms'])"
}
] | import copy
import os.path as osp
import mmcv
import numpy as np
import pytest
from unittest import TestCase
from mmengine.registry import init_default_scope
from PIL import Image
from mmseg.datasets.transforms import * # noqa
from mmseg.datasets.transforms import (LoadBiomedicalData,
LoadBiomedicalImageFromFile,
PhotoMetricDistortion, RandomCrop,
RandomDepthMix)
from mmseg.registry import TRANSFORMS
from mmseg.datasets.transforms import (LoadBiomedicalAnnotation,
LoadBiomedicalImageFromFile)
from mmseg.datasets.transforms import LoadBiomedicalImageFromFile
from mmseg.datasets.transforms import LoadBiomedicalImageFromFile
from mmseg.datasets.transforms import LoadBiomedicalImageFromFile | 7,544 | results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
mean = np.array(img_norm_cfg['mean'])
std = np.array(img_norm_cfg['std'])
converted_img = (original_img[..., ::-1] - mean) / std
assert np.allclose(results['img'], converted_img)
def test_random_crop():
# test assertion for invalid random crop
with pytest.raises(AssertionError):
RandomCrop(crop_size=(-1, 0))
results = dict()
img = mmcv.imread(osp.join('tests/data/color.jpg'), 'color')
seg = np.array(Image.open(osp.join('tests/data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
h, w, _ = img.shape
pipeline = RandomCrop(crop_size=(h - 20, w - 20))
results = pipeline(results)
assert results['img'].shape[:2] == (h - 20, w - 20)
assert results['img_shape'] == (h - 20, w - 20)
assert results['gt_semantic_seg'].shape[:2] == (h - 20, w - 20)
def test_rgb2gray():
# test assertion out_channels should be greater than 0
with pytest.raises(AssertionError):
transform = dict(type='RGB2Gray', out_channels=-1)
TRANSFORMS.build(transform)
# test assertion weights should be tuple[float]
with pytest.raises(AssertionError):
transform = dict(type='RGB2Gray', out_channels=1, weights=1.1)
TRANSFORMS.build(transform)
# test out_channels is None
transform = dict(type='RGB2Gray')
transform = TRANSFORMS.build(transform)
assert str(transform) == f'RGB2Gray(' \
f'out_channels={None}, ' \
f'weights={(0.299, 0.587, 0.114)})'
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
h, w, c = img.shape
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
assert results['img'].shape == (h, w, c)
assert results['img_shape'] == (h, w, c)
assert results['ori_shape'] == (h, w, c)
# test out_channels = 2
transform = dict(type='RGB2Gray', out_channels=2)
transform = TRANSFORMS.build(transform)
assert str(transform) == f'RGB2Gray(' \
f'out_channels={2}, ' \
f'weights={(0.299, 0.587, 0.114)})'
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
h, w, c = img.shape
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
assert results['img'].shape == (h, w, 2)
assert results['img_shape'] == (h, w, 2)
def test_photo_metric_distortion():
results = dict()
img = mmcv.imread(osp.join('tests/data/color.jpg'), 'color')
seg = np.array(Image.open(osp.join('tests/data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
| # Copyright (c) OpenMMLab. All rights reserved.
init_default_scope('mmseg')
def test_resize():
# Test `Resize`, `RandomResize` and `RandomChoiceResize` from
# MMCV transform. Noted: `RandomResize` has args `scales` but
# `Resize` and `RandomResize` has args `scale`.
transform = dict(type='Resize', scale=(1333, 800), keep_ratio=True)
resize_module = TRANSFORMS.build(transform)
results = dict()
# (288, 512, 3)
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
resized_results = resize_module(results.copy())
# img_shape = results['img'].shape[:2] in ``MMCV resize`` function
# so right now it is (750, 1333) rather than (750, 1333, 3)
assert resized_results['img_shape'] == (750, 1333)
# test keep_ratio=False
transform = dict(
type='RandomResize',
scale=(1280, 800),
ratio_range=(1.0, 1.0),
resize_type='Resize',
keep_ratio=False)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] == (800, 1280)
# test `RandomChoiceResize`, which in older mmsegmentation
# `Resize` is multiscale_mode='range'
transform = dict(type='RandomResize', scale=[(1333, 400), (1333, 1200)])
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert max(resized_results['img_shape'][:2]) <= 1333
assert min(resized_results['img_shape'][:2]) >= 400
assert min(resized_results['img_shape'][:2]) <= 1200
# test RandomChoiceResize, which in older mmsegmentation
# `Resize` is multiscale_mode='value'
transform = dict(
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 400)],
resize_type='Resize',
keep_ratio=False)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] in [(800, 1333), (400, 1333)]
transform = dict(type='Resize', scale_factor=(0.9, 1.1), keep_ratio=True)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert max(resized_results['img_shape'][:2]) <= 1333 * 1.1
# test RandomChoiceResize, which `resize_type` is `ResizeShortestEdge`
transform = dict(
type='RandomChoiceResize',
scales=[128, 256, 512],
resize_type='ResizeShortestEdge',
max_size=1333)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'][0] in [128, 256, 512]
transform = dict(
type='RandomChoiceResize',
scales=[512],
resize_type='ResizeShortestEdge',
max_size=512)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'][1] == 512
transform = dict(
type='RandomChoiceResize',
scales=[(128, 256), (256, 512), (512, 1024)],
resize_type='ResizeShortestEdge',
max_size=1333)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'][0] in [128, 256, 512]
# test scale=None and scale_factor is tuple.
# img shape: (288, 512, 3)
transform = dict(
type='Resize', scale=None, scale_factor=(0.5, 2.0), keep_ratio=True)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert int(288 * 0.5) <= resized_results['img_shape'][0] <= 288 * 2.0
assert int(512 * 0.5) <= resized_results['img_shape'][1] <= 512 * 2.0
# test minimum resized image shape is 640
transform = dict(type='Resize', scale=(2560, 640), keep_ratio=True)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] == (640, 1138)
# test minimum resized image shape is 640 when img_scale=(512, 640)
# where should define `scale_factor` in MMCV new ``Resize`` function.
min_size_ratio = max(640 / img.shape[0], 640 / img.shape[1])
transform = dict(
type='Resize', scale_factor=min_size_ratio, keep_ratio=True)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] == (640, 1138)
# test h > w
img = np.random.randn(512, 288, 3)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
min_size_ratio = max(640 / img.shape[0], 640 / img.shape[1])
transform = dict(
type='Resize',
scale=(2560, 640),
scale_factor=min_size_ratio,
keep_ratio=True)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] == (1138, 640)
def test_flip():
# test assertion for invalid prob
with pytest.raises(AssertionError):
transform = dict(type='RandomFlip', prob=1.5)
TRANSFORMS.build(transform)
# test assertion for invalid direction
with pytest.raises(AssertionError):
transform = dict(type='RandomFlip', prob=1.0, direction='horizonta')
TRANSFORMS.build(transform)
transform = dict(type='RandomFlip', prob=1.0)
flip_module = TRANSFORMS.build(transform)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
original_seg = copy.deepcopy(seg)
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = flip_module(results)
flip_module = TRANSFORMS.build(transform)
results = flip_module(results)
assert np.equal(original_img, results['img']).all()
assert np.equal(original_seg, results['gt_semantic_seg']).all()
results['gt_depth_map'] = seg
results['seg_fields'] = ['gt_depth_map']
results = flip_module(results)
flip_module = TRANSFORMS.build(transform)
results = flip_module(results)
assert np.equal(original_img, results['img']).all()
assert np.equal(original_seg, results['gt_depth_map']).all()
def test_random_rotate_flip():
with pytest.raises(AssertionError):
transform = dict(type='RandomRotFlip', flip_prob=1.5)
TRANSFORMS.build(transform)
with pytest.raises(AssertionError):
transform = dict(type='RandomRotFlip', rotate_prob=1.5)
TRANSFORMS.build(transform)
with pytest.raises(AssertionError):
transform = dict(type='RandomRotFlip', degree=[20, 20, 20])
TRANSFORMS.build(transform)
with pytest.raises(AssertionError):
transform = dict(type='RandomRotFlip', degree=-20)
TRANSFORMS.build(transform)
transform = dict(
type='RandomRotFlip', flip_prob=1.0, rotate_prob=0, degree=20)
rot_flip_module = TRANSFORMS.build(transform)
results = dict()
img = mmcv.imread(
osp.join(
osp.dirname(__file__),
'../data/pseudo_synapse_dataset/img_dir/case0005_slice000.jpg'),
'color')
original_img = copy.deepcopy(img)
seg = np.array(
Image.open(
osp.join(
osp.dirname(__file__),
'../data/pseudo_synapse_dataset/ann_dir/case0005_slice000.png')
))
original_seg = copy.deepcopy(seg)
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
result_flip = rot_flip_module(results)
assert original_img.shape == result_flip['img'].shape
assert original_seg.shape == result_flip['gt_semantic_seg'].shape
transform = dict(
type='RandomRotFlip', flip_prob=0, rotate_prob=1.0, degree=20)
rot_flip_module = TRANSFORMS.build(transform)
result_rotate = rot_flip_module(results)
assert original_img.shape == result_rotate['img'].shape
assert original_seg.shape == result_rotate['gt_semantic_seg'].shape
assert str(transform) == "{'type': 'RandomRotFlip'," \
" 'flip_prob': 0," \
" 'rotate_prob': 1.0," \
" 'degree': 20}"
def test_pad():
# test assertion if both size_divisor and size is None
with pytest.raises(AssertionError):
transform = dict(type='Pad')
TRANSFORMS.build(transform)
transform = dict(type='Pad', size_divisor=32)
transform = TRANSFORMS.build(transform)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
# original img already divisible by 32
assert np.equal(results['img'], original_img).all()
img_shape = results['img'].shape
assert img_shape[0] % 32 == 0
assert img_shape[1] % 32 == 0
def test_normalize():
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
transform = dict(type='Normalize', **img_norm_cfg)
transform = TRANSFORMS.build(transform)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
mean = np.array(img_norm_cfg['mean'])
std = np.array(img_norm_cfg['std'])
converted_img = (original_img[..., ::-1] - mean) / std
assert np.allclose(results['img'], converted_img)
def test_random_crop():
# test assertion for invalid random crop
with pytest.raises(AssertionError):
RandomCrop(crop_size=(-1, 0))
results = dict()
img = mmcv.imread(osp.join('tests/data/color.jpg'), 'color')
seg = np.array(Image.open(osp.join('tests/data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
h, w, _ = img.shape
pipeline = RandomCrop(crop_size=(h - 20, w - 20))
results = pipeline(results)
assert results['img'].shape[:2] == (h - 20, w - 20)
assert results['img_shape'] == (h - 20, w - 20)
assert results['gt_semantic_seg'].shape[:2] == (h - 20, w - 20)
def test_rgb2gray():
# test assertion out_channels should be greater than 0
with pytest.raises(AssertionError):
transform = dict(type='RGB2Gray', out_channels=-1)
TRANSFORMS.build(transform)
# test assertion weights should be tuple[float]
with pytest.raises(AssertionError):
transform = dict(type='RGB2Gray', out_channels=1, weights=1.1)
TRANSFORMS.build(transform)
# test out_channels is None
transform = dict(type='RGB2Gray')
transform = TRANSFORMS.build(transform)
assert str(transform) == f'RGB2Gray(' \
f'out_channels={None}, ' \
f'weights={(0.299, 0.587, 0.114)})'
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
h, w, c = img.shape
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
assert results['img'].shape == (h, w, c)
assert results['img_shape'] == (h, w, c)
assert results['ori_shape'] == (h, w, c)
# test out_channels = 2
transform = dict(type='RGB2Gray', out_channels=2)
transform = TRANSFORMS.build(transform)
assert str(transform) == f'RGB2Gray(' \
f'out_channels={2}, ' \
f'weights={(0.299, 0.587, 0.114)})'
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
h, w, c = img.shape
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
assert results['img'].shape == (h, w, 2)
assert results['img_shape'] == (h, w, 2)
def test_photo_metric_distortion():
results = dict()
img = mmcv.imread(osp.join('tests/data/color.jpg'), 'color')
seg = np.array(Image.open(osp.join('tests/data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
| pipeline = PhotoMetricDistortion(saturation_range=(1., 1.)) | 2 | 2023-12-23 08:36:47+00:00 | 12k |
SkierProjects/MultiLabelImageClassificationPytorch | src/utils/training/train_model.py | [
{
"identifier": "LoggerFactory",
"path": "src/utils/logging/loggerfactory.py",
"snippet": "class LoggerFactory:\n DEFAULT_LOG_LEVEL = logging.INFO\n LOG_FILE_MAX_BYTES = 10 * 1024 * 1024 # 10 MB\n LOG_FILE_BACKUP_COUNT = 5 # Keep 5 backup files\n LONG_LOG_FORMAT = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n SHORT_LOG_FORMAT = \"%(levelname)s: %(message)s\"\n DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n\n @staticmethod\n def setup_logging(loggername, log_file=None, level=None, config=config):\n \"\"\"\n Set up logging configuration for a logger with the specified name.\n\n Parameters:\n logger_name (str): The name of the logger to set up.\n log_file (str): The path to the log file. If None, logs to stdout.\n level (int): The logging level. If None, defaults to the level specified in config.\n config (module): The configuration module with a 'log_level' attribute.\n\n Returns:\n logging.Logger: Configured logger instance.\n \"\"\"\n if level is None:\n level = getattr(logging, config.log_level, LoggerFactory.DEFAULT_LOG_LEVEL)\n \n # Since we are setting up handlers individually, we don't use basicConfig\n logger = logging.getLogger(loggername)\n logger.setLevel(level)\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(logging.Formatter(LoggerFactory.SHORT_LOG_FORMAT))\n logger.addHandler(console_handler)\n\n if log_file is not None:\n os.makedirs(os.path.dirname(log_file), exist_ok=True)\n file_handler = logging.handlers.RotatingFileHandler(\n log_file, maxBytes=LoggerFactory.LOG_FILE_MAX_BYTES, backupCount=LoggerFactory.LOG_FILE_BACKUP_COUNT)\n file_handler.setFormatter(logging.Formatter(LoggerFactory.LONG_LOG_FORMAT, LoggerFactory.DATE_FORMAT))\n logger.addHandler(file_handler)\n\n return logger\n\n @staticmethod\n def get_logger(name):\n \"\"\"\n Get a logger with the specified name.\n\n Parameters:\n name (str): The name of the logger to retrieve.\n\n Returns:\n logging.Logger: The logger instance with the given name.\n \"\"\"\n return logging.getLogger(name)"
},
{
"identifier": "ModelTrainer",
"path": "src/utils/training/modeltrainer.py",
"snippet": "class ModelTrainer():\n def __init__(self, device, trainloader, validloader, testloader, config=config):\n \"\"\"\n Initializes the ModelTrainer with the given datasets, device, and configuration.\n\n Parameters:\n device (torch.device): The device on which to train the model.\n trainloader (DataLoader): DataLoader for the training dataset.\n validloader (DataLoader): DataLoader for the validation dataset.\n testloader (DataLoader): DataLoader for the test dataset.\n config (module): Configuration module with necessary attributes.\n \"\"\"\n self.config = config\n self.device = device\n self.trainloader = trainloader\n self.validloader = validloader\n self.testloader = testloader\n self.model = modelfactory.create_model(self.config).to(device)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.config.learning_rate)\n\n # Compute label frequencies and create weights for the loss function\n #self.label_freqs = self.compute_label_frequencies()\n #self.pos_weight = self.compute_loss_weights(self.label_freqs).to(device)\n self.criterion = nn.BCEWithLogitsLoss()#pos_weight=self.pos_weight)\n self.epochs = self.config.num_epochs\n self.lr_scheduler = modelutils.get_learningRate_scheduler(self.optimizer, config)\n self.last_train_loss = 10000\n self.last_valid_loss = 10000\n self.last_valid_f1 = 0\n self.current_lr = self.config.learning_rate\n # Initialize TensorBoard writer\n self.tensorBoardWriter = TensorBoardWriter(config)\n\n modelToLoadPath = pathutils.get_model_to_load_path(self.config)\n if self.config.continue_training and os.path.exists(modelToLoadPath):\n logger.info(\"Loading the best model...\") \n if self.config.embedding_layer_enabled or self.config.gcn_enabled and self.config.model_to_load_raw_weights != \"\":\n self.model, modelData = modelloadingutils.load_pretrained_weights_exclude_classifier(self.model, self.config, False)\n modelData[\"f1_score\"] = 0.0\n else:\n modelData = modelloadingutils.load_model(modelToLoadPath, self.config)\n self.model.load_state_dict(modelData['model_state_dict'])\n self.optimizer.load_state_dict(modelData['optimizer_state_dict'])\n\n self.best_f1_score = modelData[\"f1_score\"]\n self.start_epoch = modelData[\"epoch\"] + 1\n self.epochs = self.epochs + self.start_epoch\n self.best_model_state = {\n 'epoch': modelData[\"epoch\"],\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'loss': self.criterion,\n 'f1_score': self.best_f1_score,\n 'model_name': self.config.model_name,\n 'image_size': self.config.image_size,\n 'requires_grad': self.config.model_requires_grad,\n 'num_classes': self.config.num_classes,\n 'dropout': self.config.model_dropout_prob,\n 'embedding_layer': self.config.embedding_layer_enabled,\n 'gcn_enabled': self.config.gcn_enabled,\n 'batch_size': self.config.batch_size,\n 'optimizer': 'Adam',\n 'loss_function': 'BCEWithLogitsLoss'\n }\n else:\n self.best_f1_score = 0.0\n self.start_epoch = 0\n self.best_model_state = None\n self.current_epoch = self.start_epoch - 1\n self.best_f1_score_at_last_reset = 0\n self.patience_counter = 0\n self.patience = self.config.early_stopping_patience\n \n def __enter__(self):\n \"\"\"\n Enter the runtime context for the ModelTrainer object.\n Allows the ModelTrainer to be used with the 'with' statement, ensuring resources are managed properly.\n\n Returns:\n ModelTrainer: The instance with which the context was entered.\n \"\"\"\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Exit the runtime context for the ModelTrainer object.\n This method is called after the 'with' block is executed, and it ensures that the TensorBoard writer is closed.\n\n Parameters:\n exc_type: Exception type, if any exception was raised within the 'with' block.\n exc_value: Exception value, the exception instance raised.\n traceback: Traceback object with details of where the exception occurred.\n \"\"\"\n self.tensorBoardWriter.close_writer()\n del self.model\n del self.optimizer\n torch.cuda.empty_cache()\n gc.collect()\n \n def train(self):\n \"\"\"\n Train the model for one epoch using the provided training dataset.\n :return: The average training loss for the epoch.\n \"\"\"\n self.current_epoch += 1\n logger.info('Training')\n self.model.train()\n train_running_loss = 0.0\n for data in tqdm(self.trainloader, total=len(self.trainloader)):\n images, targets = data['image'].to(self.device), data['label'].to(self.device).float()\n self.optimizer.zero_grad()\n\n if (self.config.embedding_layer_enabled or self.config.gcn_enabled):\n label_dropout_rate = 0.3\n use_labels = random.random() > label_dropout_rate\n if use_labels:\n outputs = self.model(images, targets)\n else:\n outputs = self.model(images)\n else:\n outputs = self.model(images)\n loss = self.criterion(outputs, targets)\n train_running_loss += loss.item()\n loss.backward()\n self.optimizer.step()\n \n train_loss = train_running_loss / len(self.trainloader.dataset)\n self.last_train_loss = train_loss\n return train_loss\n \n def validate(self, modelEvaluator, threshold=None):\n \"\"\"\n Validate the model on the validation dataset using a model evaluator.\n\n Parameters:\n modelEvaluator: An instance of the model evaluator class with an 'evaluate' method.\n threshold (Optional[float]): Threshold value for converting probabilities to class labels.\n\n Returns:\n tuple: A tuple containing the average validation loss and the micro-averaged F1 score.\n \"\"\"\n logger.info(\"Validating\")\n valid_loss, valid_f1, _, _ = modelEvaluator.evaluate(self.validloader, self.current_epoch, \"Validation\", threshold=threshold)\n self.last_valid_loss = valid_loss\n self.last_valid_f1 = valid_f1\n self.log_train_validation_results()\n return valid_loss, valid_f1\n \n def learningRateScheduler_check(self):\n \"\"\"\n Check and update the learning rate based on the validation loss. Log the updated learning rate to TensorBoard.\n \"\"\"\n self.lr_scheduler.step(self.last_valid_loss)\n self.current_lr = self.optimizer.param_groups[0]['lr']\n self.tensorBoardWriter.add_scalar('Learning Rate', self.current_lr, self.current_epoch)\n\n def log_train_validation_results(self):\n \"\"\"\n Log training and validation results to the logger and TensorBoard.\n Includes the train loss, validation loss, and validation F1 score for the current epoch.\n \"\"\"\n logger.info(f\"Train Loss: {self.last_train_loss:.4f}\")\n logger.info(f'Validation Loss: {self.last_valid_loss:.4f}')\n logger.info(f'Validation F1 Score: {self.last_valid_f1:.4f}')\n \n self.tensorBoardWriter.add_scalar('Loss/Train', self.last_train_loss, self.current_epoch)\n self.tensorBoardWriter.add_scalar('Loss/Validation', self.last_valid_loss, self.current_epoch)\n self.tensorBoardWriter.add_scalar('F1/Validation', self.last_valid_f1, self.current_epoch)\n\n def log_hparam_results(self, test_loss, test_f1):\n \"\"\"\n Log the hyperparameters and test metrics to TensorBoard.\n This method is used for visualizing the relationship between hyperparameters and the model's performance.\n\n Parameters:\n test_loss (float): The loss on the test dataset.\n test_f1 (float): The F1 score on the test dataset.\n \"\"\"\n hparams = metricutils.filter_dict_for_hparams(self.best_model_state)\n metrics = {\n 'best_val_f1_score': self.best_f1_score,\n 'final_train_loss': self.last_train_loss if self.last_train_loss else 0,\n 'final_valid_loss': self.last_valid_loss if self.last_valid_loss else 0,\n 'test_f1_score': test_f1,\n 'test_loss': test_loss\n }\n self.tensorBoardWriter.add_hparams(hparams, metrics)\n\n def log_gradients(self):\n \"\"\"\n Log the gradients of model parameters to TensorBoard.\n This is done periodically based on the current epoch to monitor training progress and diagnose issues.\n \"\"\"\n if self.current_epoch % 5 == 0: # Choose an interval that makes sense for your training regimen.\n for name, param in self.model.named_parameters():\n self.tensorBoardWriter.add_histogram(f'Parameters/{name}', param, self.current_epoch)\n if param.grad is not None:\n self.tensorBoardWriter.add_histogram(f'Gradients/{name}', param.grad, self.current_epoch)\n \n def check_early_stopping(self):\n \"\"\"\n Check if early stopping criteria are met based on the validation F1 score.\n If the score has not improved by a certain proportion over the patience window,\n trigger early stopping.\n\n Returns:\n bool: True if early stopping is triggered, False otherwise.\n \"\"\"\n improvement_threshold = self.config.early_stopping_threshold\n significant_improvement = False\n if self.last_valid_f1 > self.best_f1_score:\n logger.info(f\"Validation F1 Score improved from {self.best_f1_score:.4f} to {self.last_valid_f1:.4f}\")\n self.best_f1_score = self.last_valid_f1\n self.best_model_state = {\n 'epoch': self.current_epoch,\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'loss': self.criterion,\n 'f1_score': self.best_f1_score,\n 'model_name': self.config.model_name,\n 'requires_grad': self.config.model_requires_grad,\n 'num_classes': self.config.num_classes,\n 'dropout': self.config.model_dropout_prob,\n 'embedding_layer': self.config.embedding_layer_enabled,\n 'gcn_enabled': self.config.gcn_enabled,\n 'batch_size': self.config.batch_size,\n 'optimizer': 'Adam',\n 'loss_function': 'BCEWithLogitsLoss'\n }\n\n modelloadingutils.save_best_model(self.best_model_state)\n\n # Check for significant improvement since the last reset of the patience counter\n if self.last_valid_f1 - self.best_f1_score_at_last_reset >= improvement_threshold:\n logger.info(f\"Significant cumulative improvement of {self.last_valid_f1 - self.best_f1_score_at_last_reset:.4f} has been achieved since the last reset.\")\n significant_improvement = True\n self.best_f1_score_at_last_reset = self.last_valid_f1\n self.patience_counter = 0\n \n # Increment patience counter if no significant improvement\n if not significant_improvement:\n self.patience_counter += 1\n\n # If there hasn't been significant improvement over the patience window, trigger early stopping\n if self.patience_counter >= self.patience:\n logger.info(f\"Early stopping triggered after {self.patience} epochs without significant cumulative improvement.\")\n return True\n\n \n def save_final_model(self):\n \"\"\"\n Save the state of the model that achieved the best validation F1 score during training.\n The model state is saved to a file defined by the configuration.\n \"\"\"\n state_to_save = copy.deepcopy(self.best_model_state)\n modelloadingutils.save_final_model(self.best_model_state, self.best_f1_score, self.config)\n self.model.load_state_dict(state_to_save['model_state_dict'])\n\n def compute_label_frequencies(self):\n \"\"\"\n Computes the frequency of each label in the dataset.\n \n Returns:\n label_freqs (torch.Tensor): Tensor containing the frequency of each label.\n \"\"\"\n # Initialize a tensor to hold the frequency of each label.\n # This assumes that the number of labels is known and stored in `self.config.num_classes`.\n label_freqs = torch.zeros(self.config.num_classes, dtype=torch.float)\n\n # Iterate over the dataset and sum the one-hot encoded labels.\n for batch in tqdm(self.trainloader, total=len(self.trainloader)):\n labels = batch[\"label\"]\n label_freqs += labels.sum(dim=0) # Sum along the batch dimension.\n\n # Ensure that there's at least one count for each label to avoid division by zero.\n label_freqs = label_freqs.clamp(min=1) \n return label_freqs\n \n def compute_loss_weights(self, label_freqs):\n \"\"\"\n Computes the weights for each label to be used in the loss function.\n \n Parameters:\n label_freqs (torch.Tensor): Tensor containing the frequency of each label.\n \n Returns:\n weights (torch.Tensor): Tensor containing the weight for each label.\n \"\"\"\n # Compute the inverse frequency weights\n total_counts = label_freqs.sum()\n weights = total_counts / label_freqs\n \n # Normalize weights to prevent them from scaling the loss too much\n weights = weights / weights.mean()\n\n #weights = weights.view(-1) # Ensure it is a 1D tensor with shape [num_classes]\n #assert weights.shape[0] == self.config.num_classes, \"pos_weight must have the same size as num_classes\"\n \n return weights"
},
{
"identifier": "ModelEvaluator",
"path": "src/utils/evaluation/modelevaluator.py",
"snippet": "class ModelEvaluator:\n def __init__(self, model, criterion, device, tensorBoardWriter=None, config=config, model_data=None):\n \"\"\"\n Initializes the ModelEvaluator with a given model, loss criterion, device,\n optional TensorBoard writer, and configuration.\n\n Parameters:\n model (torch.nn.Module): The model to evaluate.\n criterion (function): The loss function.\n device (torch.device): The device to run evaluation on (CPU or GPU).\n tensorBoardWriter (TensorBoardWriter, optional): Writer for TensorBoard logging.\n config (object): An immutable configuration object with necessary parameters.\n \"\"\"\n self.model = model\n self.config = config\n self.criterion = criterion\n self.device = device\n self.num_classes = config.num_classes\n self.tensorBoardWriter = tensorBoardWriter\n self.model_data = model_data\n\n def __enter__(self):\n \"\"\"\n Context management method to use with 'with' statements.\n \"\"\"\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Context management method to close the TensorBoard writer upon exiting the 'with' block.\n \"\"\"\n if self.tensorBoardWriter:\n self.tensorBoardWriter.close_writer()\n del self.model\n torch.cuda.empty_cache()\n gc.collect()\n\n @classmethod\n def from_trainer(cls, model_trainer):\n \"\"\"\n Creates a ModelEvaluator instance from a ModelTrainer instance by extracting\n the relevant attributes.\n\n Parameters:\n model_trainer (ModelTrainer): The trainer instance to extract attributes from.\n\n Returns:\n ModelEvaluator: A new instance of ModelEvaluator.\n \"\"\"\n return cls(\n model=model_trainer.model,\n criterion=model_trainer.criterion,\n device=model_trainer.device,\n config=model_trainer.config,\n tensorBoardWriter=model_trainer.tensorBoardWriter,\n model_data=model_trainer.best_model_state\n )\n \n @classmethod\n def from_file(cls, device, thisconfig, tensorBoardWriter=None):\n \"\"\"\n Creates a ModelEvaluator instance from a model file by loading in the model and preparing it\n to be run.\n\n Parameters:\n device (torch.device): The device to run evaluation on (CPU or GPU).\n tensorBoardWriter (TensorBoardWriter, optional): Writer for TensorBoard logging.\n config (object): An immutable configuration object with necessary parameters.\n \"\"\"\n \n model = modelfactory.create_model(thisconfig).to(device)\n criterion = nn.BCEWithLogitsLoss()\n\n modelToLoadPath = pathutils.get_model_to_load_path(thisconfig)\n if os.path.exists(modelToLoadPath):\n logger.info(\"Loading the best model...\") \n modelData = modelloadingutils.load_model(modelToLoadPath, thisconfig)\n model.load_state_dict(modelData['model_state_dict'])\n else:\n logger.error(f\"Could not find a model at path: {modelToLoadPath}\")\n raise ValueError(f\"Could not find a model at path: {modelToLoadPath}. Check to ensure the config/json value for model_name_to_load is correct!\")\n \n return cls(\n model=model,\n criterion=criterion,\n device=device,\n config=thisconfig,\n tensorBoardWriter=tensorBoardWriter,\n model_data=modelData\n )\n \n def single_image_prediction(self, preprocessed_image, threshold=None):\n \"\"\"Run a prediction for a single preprocessed image.\"\"\"\n self.model.eval() # Set the model to evaluation mode\n \n # Move the preprocessed image to the same device as the model\n preprocessed_image = preprocessed_image.to(self.device)\n \n with torch.no_grad():\n # Add a batch dimension to the image tensor\n image_batch = preprocessed_image.unsqueeze(0)\n outputs = self.model(image_batch)\n if threshold is not None:\n # Move the outputs to the CPU and convert to NumPy before thresholding\n outputs_np = outputs.cpu().numpy()\n outputs_np = metricutils.getpredictions_with_threshold(outputs_np, threshold)\n # Wrap the NumPy array back into a PyTorch tensor if necessary\n outputs = torch.from_numpy(outputs_np)\n # Remove the batch dimension from the outputs before returning\n outputs = outputs.squeeze(0)\n return outputs\n \n def predict(self, data_loader, return_true_labels=True, threshold=None):\n \"\"\"\n Perform inference on the given data_loader and return raw predictions.\n\n Parameters:\n data_loader (DataLoader): DataLoader for inference.\n return_true_labels (bool): If true, return true labels. Otherwise, skip label processing.\n\n Returns:\n prediction_labels (numpy.ndarray): Raw model outputs.\n true_labels (numpy.ndarray, optional): Corresponding true labels, if available and requested.\n avg_loss (float, optional): Average loss over dataset, if labels are available.\n \"\"\"\n self.model.eval() # Set the model to evaluation mode\n prediction_outputs = [] # List to store all raw model outputs\n true_labels = [] # List to store all labels if they are available\n image_paths = [] # List to store all image paths if they are available\n frame_counts = [] # List to store all frame counts if they are available\n total_loss = 0.0 # Initialize total loss\n\n with torch.no_grad(): # Disable gradient calculation for efficiency\n for batch in tqdm(data_loader, total=len(data_loader)):\n images = batch['image'].to(self.device)\n outputs = self.model(images)\n prediction_outputs.append(outputs.cpu().numpy()) # Store raw model outputs\n \n # Process labels if they are available and requested\n if return_true_labels and 'label' in batch:\n labels = batch['label'].to(self.device)\n loss = self.criterion(outputs, labels.float()) # Calculate loss\n total_loss += loss.item() # Accumulate loss\n true_labels.append(labels.cpu().numpy()) # Store labels\n elif not return_true_labels and 'image_path' in batch:\n image_paths.append(batch['image_path'])\n elif not return_true_labels and 'frame_count' in batch:\n frame_counts.append(batch['frame_count'])\n\n # Concatenate all raw outputs and optionally labels from all batches\n prediction_outputs = np.vstack(prediction_outputs)\n results = {'predictions': prediction_outputs}\n \n if return_true_labels and true_labels:\n true_labels = np.vstack(true_labels)\n avg_loss = total_loss / len(data_loader.dataset)\n results['true_labels'] = true_labels\n results['avg_loss'] = avg_loss\n\n if image_paths:\n results['image_paths'] = image_paths\n\n if frame_counts:\n results['frame_counts'] = frame_counts\n\n if threshold != None:\n predictions_binary = metricutils.getpredictions_with_threshold(prediction_outputs, threshold)\n results['predictions'] = predictions_binary\n\n return results\n\n def evaluate_predictions(self, data_loader, prediction_outputs, true_labels, epoch, average, datasetSubset=None, metricMode=None, threshold=None):\n \"\"\"\n Evaluate the model on the given data_loader.\n\n Parameters:\n data_loader (DataLoader): DataLoader for evaluation.\n prediction_outputs (numpy.ndarray): Raw model outputs.\n true_labels (numpy.ndarray): Corresponding true labels.\n epoch (int): The current epoch number, used for TensorBoard logging.\n datasetSubset (str): Indicates the subset of data evaluated (e.g., 'test', 'validation').\n average (str): Indicates the type of averaging to perform when computing metrics. Use None to get per-class metrics.\n metricMode (str, optional): Indicates from where this is being evaluated from (e.g., 'Train', 'Test').\n threshold (float, optional): The threshold value for binary predictions.\n\n Returns:\n f1_score (float): The F1 score of the model on the dataset.\n precision (float): The precision of the model on the dataset.\n recall (float): The recall of the model on the dataset.\n \"\"\"\n\n predictions_binary = metricutils.getpredictions_with_threshold(prediction_outputs, threshold)\n # Compute evaluation metrics\n precision, recall, f1 = metricutils.compute_metrics(true_labels, predictions_binary, average=average)\n # Log images with predictions to TensorBoard for a random batch, if configured\n if metricMode is not None and self.tensorBoardWriter is not None and datasetSubset is not None:\n random_batch_index = random.randint(0, len(data_loader) - 1)\n batch_dict = next(itertools.islice(data_loader, random_batch_index, None))\n images = batch_dict['image'] # Assuming the device transfer happens elsewhere if needed\n labels = batch_dict['label']\n \n start_index = random_batch_index * data_loader.batch_size\n end_index = min((random_batch_index + 1) * data_loader.batch_size, len(predictions_binary))\n\n selected_predictions = predictions_binary[start_index:end_index]\n selected_predictions_tensor = torch.tensor(selected_predictions, device=self.device, dtype=torch.float32)\n self.tensorBoardWriter.write_image_test_results(images, labels, selected_predictions_tensor, epoch, metricMode, datasetSubset)\n\n # Return the average loss and computed metrics\n return f1, precision, recall\n\n def evaluate(self, data_loader, epoch, datasetSubset, metricMode=None, average='micro', threshold=None):\n \"\"\"\n Evaluate the model on the given data_loader.\n\n Parameters:\n data_loader (DataLoader): DataLoader for evaluation.\n epoch (int): The current epoch number, used for TensorBoard logging.\n datasetSubset (str): Indicates the subset of data being evaluated (e.g., 'test', 'validation').\n average (str): Indicates the type of averaging to perform when computing metrics. Use None to get per-class metrics.\n metricMode (str, optional): Indicates from where this is being evaluated from (e.g., 'Train', 'Test').\n threshold (float, optional): The threshold value for binary predictions.\n\n Returns:\n avg_loss (float): The average loss over the dataset.\n f1_score (float): The F1 score of the model on the dataset.\n precision (float): The precision of the model on the dataset.\n recall (float): The recall of the model on the dataset.\n \"\"\"\n # Perform inference and get raw outputs\n prediction_results = self.predict(data_loader)\n all_outputs, all_labels, avg_loss = prediction_results['predictions'], prediction_results['true_labels'], prediction_results['avg_loss']\n\n f1, precision, recall = self.evaluate_predictions(data_loader, all_outputs, all_labels, epoch, average, datasetSubset, metricMode, threshold)\n\n # Return the average loss and computed metrics\n return avg_loss, f1, precision, recall"
},
{
"identifier": "evaluate_model",
"path": "src/utils/evaluation/test_model.py",
"snippet": "def evaluate_model(this_config=config):\n # initialize the computation device\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n test_loader = datasetutils.get_data_loader_by_name(\"test\", config=this_config)\n valid_loader = datasetutils.get_data_loader_by_name(\"valid\", config=this_config)\n valid_test_loader = datasetutils.get_data_loader_by_name(\"valid+test\", config=this_config, shuffle=True)\n\n # intialize the model\n with ModelEvaluator.from_file(device, this_config, TensorBoardWriter(config=this_config)) as modelEvaluator:\n epochs = modelEvaluator.model_data[\"epoch\"]\n\n valid_start_time = time.time()\n valid_results = modelEvaluator.predict(valid_loader)\n valid_end_time = time.time()\n\n test_start_time = time.time()\n test_results = modelEvaluator.predict(test_loader)\n test_end_time = time.time()\n\n valid_test_start_time = time.time() \n validtest_results = modelEvaluator.predict(valid_test_loader)\n valid_test_end_time = time.time()\n\n\n valid_predictions, valid_correct_labels, valid_loss = valid_results['predictions'], valid_results['true_labels'], valid_results['avg_loss']\n test_predictions, test_correct_labels, test_loss = test_results['predictions'], test_results['true_labels'], test_results['avg_loss']\n validtest_predictions, validtest_correct_labels, validtest_loss = validtest_results['predictions'], validtest_results['true_labels'], validtest_results['avg_loss']\n\n valid_elapsed_time = valid_end_time - valid_start_time\n test_elapsed_time = test_end_time - test_start_time\n valid_test_elapsed_time = valid_test_end_time - valid_test_start_time\n\n valid_num_images = len(valid_loader.dataset)\n test_num_images = len(test_loader.dataset)\n valid_test_num_images = len(valid_test_loader.dataset)\n\n valid_images_per_second = valid_num_images / valid_elapsed_time\n test_images_per_second = test_num_images / test_elapsed_time\n valid_test_images_per_second = valid_test_num_images / valid_test_elapsed_time\n\n avg_images_per_second = (valid_images_per_second + test_images_per_second + valid_test_images_per_second) / 3\n\n logger.info(f\"Validation Img/sec: {valid_images_per_second}\")\n logger.info(f\"Test Img/sec: {test_images_per_second}\")\n logger.info(f\"Validation+Test Img/sec: {valid_test_images_per_second}\")\n logger.info(f\"Avg Img/sec: {avg_images_per_second}\")\n\n logger.info(f\"Validation Loss: {valid_loss}\")\n logger.info(f\"Test Loss: {test_loss}\")\n logger.info(f\"Validation+Test Loss: {validtest_loss}\")\n\n val_f1_default, val_precision_default, val_recall_default = modelEvaluator.evaluate_predictions(valid_loader, valid_predictions, valid_correct_labels, epochs, threshold=0.5, average=\"micro\")\n test_f1_default, test_precision_default, test_recall_default = modelEvaluator.evaluate_predictions(test_loader, test_predictions, test_correct_labels, epochs, threshold=0.5, average=\"micro\")\n validtest_f1_default, validtest_precision_default, validtest_recall_default = modelEvaluator.evaluate_predictions(valid_test_loader, validtest_predictions, validtest_correct_labels, epochs, threshold=0.5, average=\"micro\")\n\n logger.info(f\"Validation Default F1: F1: {val_f1_default}, Precision: {val_precision_default}, Recall: {val_recall_default} at Threshold: 0.5\")\n logger.info(f\"Test Default F1: F1: {test_f1_default}, Precision: {test_precision_default}, Recall: {test_recall_default} at Threshold: 0.5\")\n logger.info(f\"Valid+Test Default F1: F1: {validtest_f1_default}, Precision: {validtest_precision_default}, Recall: {validtest_recall_default} at Threshold: 0.5\")\n\n\n val_best_f1_threshold, val_f1_valoptimized, val_precision_valoptimized, val_recall_valoptimized = metricutils.find_best_threshold(valid_predictions, valid_correct_labels, \"f1\")\n logger.info(f\"Validation Best F1: F1: {val_f1_valoptimized}, Precision: {val_precision_valoptimized}, Recall: {val_recall_valoptimized} at Threshold:{val_best_f1_threshold}\")\n test_f1_valoptimized, test_precision_valoptimized, test_recall_valoptimized = modelEvaluator.evaluate_predictions(test_loader, test_predictions, test_correct_labels, epochs, threshold=val_best_f1_threshold, average=\"micro\", datasetSubset=\"Test\", metricMode=\"Test\")\n validtest_f1_valoptimized, validtest_precision_valoptimized, validtest_recall_valoptimized = modelEvaluator.evaluate_predictions(valid_test_loader, validtest_predictions, validtest_correct_labels, epochs, threshold=val_best_f1_threshold, average=\"micro\")\n logger.info(f\"Test Best F1 (measured from Val): F1: {test_f1_valoptimized}, Precision: {test_precision_valoptimized}, Recall: {test_recall_valoptimized} at Threshold:{val_best_f1_threshold}\")\n logger.info(f\"Valid+Test Best F1 (measured from Val): F1: {validtest_f1_valoptimized}, Precision: {validtest_precision_valoptimized}, Recall: {validtest_recall_valoptimized} at Threshold:{val_best_f1_threshold}\")\n\n best_f1_thresholds_per_class = metricutils.find_best_thresholds_per_class(valid_predictions, valid_correct_labels)\n test_f1_valoptimizedperclass, test_precision_valoptimizedperclass, test_recall_valoptimizedperclass = modelEvaluator.evaluate_predictions(test_loader, test_predictions, test_correct_labels, epochs, threshold=best_f1_thresholds_per_class, average=\"micro\")\n logger.info(f\"Test Best F1 Per Class (Val Optimized): F1: {test_f1_valoptimizedperclass}, Precision: {test_precision_valoptimizedperclass}, Recall: {test_recall_valoptimizedperclass} at Threshold:{best_f1_thresholds_per_class}\")\n\n hparams = metricutils.filter_dict_for_hparams(modelEvaluator.model_data)\n final_metrics = {\n 'F1/Default/Validation': val_f1_default,\n 'F1/Default/Test': test_f1_default,\n 'F1/Default/Valid+Test': validtest_f1_default,\n 'F1/ValOptimizedThreshold/Validation': val_f1_valoptimized,\n 'F1/ValOptimizedThreshold/Test': test_f1_valoptimized,\n 'F1/ValOptimizedThreshold/Valid+Test': validtest_f1_valoptimized,\n 'Precision/Default/Validation': val_precision_default,\n 'Precision/Default/Test': test_precision_default,\n 'Precision/Default/Valid+Test': validtest_precision_default,\n 'Precision/ValOptimizedThreshold/Validation': val_precision_valoptimized,\n 'Precision/ValOptimizedThreshold/Test': test_precision_valoptimized,\n 'Precision/ValOptimizedThreshold/Valid+Test': validtest_precision_valoptimized,\n 'Recall/Default/Validation': val_recall_default,\n 'Recall/Default/Test': test_recall_default,\n 'Recall/Default/Valid+Test': validtest_recall_default,\n 'Recall/ValOptimizedThreshold/Validation': val_recall_valoptimized,\n 'Recall/ValOptimizedThreshold/Test': test_recall_valoptimized,\n 'Recall/ValOptimizedThreshold/Valid+Test': validtest_recall_valoptimized,\n 'F1/ValOptimizedThresholdPerClass/Test': test_f1_valoptimizedperclass,\n 'Precision/ValOptimizedThresholdPerClass/Test': test_precision_valoptimizedperclass,\n 'Recall/ValOptimizedThresholdPerClass/Test': test_recall_valoptimizedperclass,\n 'ImagesPerSecond/Validation': valid_images_per_second,\n 'ImagesPerSecond/Test': test_images_per_second,\n 'ImagesPerSecond/Valid+Test': valid_test_images_per_second,\n 'ImagesPerSecond/Average': avg_images_per_second\n }\n modelEvaluator.tensorBoardWriter.add_scalars_from_dict(final_metrics, epochs)\n modelEvaluator.tensorBoardWriter.add_hparams(hparams, final_metrics)\n\n test_f1s_per_class, _, _ = modelEvaluator.evaluate_predictions(test_loader, test_predictions, test_correct_labels, epochs, threshold=val_best_f1_threshold, average=None)\n tagmappings = datasetutils.get_index_to_tag_mapping()\n for class_index in range(this_config.num_classes):\n modelEvaluator.tensorBoardWriter.add_scalar(f'F1_Class_{tagmappings[class_index]}/ValOptimizedThreshold/Test', test_f1s_per_class[class_index], epochs)"
}
] | from config import config
from src.utils.logging.loggerfactory import LoggerFactory
from src.utils.training.modeltrainer import ModelTrainer
from src.utils.evaluation.modelevaluator import ModelEvaluator
from src.utils.evaluation.test_model import evaluate_model
import torch
import utils.dataset.datasetutils as datasetutils | 8,344 | logger = LoggerFactory.get_logger(f"logger.{__name__}")
def train_model(config=config):
"""
Train a model based on the provided configuration.
Parameters:
config: Configuration module with necessary attributes.
"""
# Initialize the computation device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Get train, validation, and test dataset loaders
train_loader, valid_loader, test_loader = datasetutils.get_train_valid_test_loaders(config=config)
# Initialize the model trainer
| logger = LoggerFactory.get_logger(f"logger.{__name__}")
def train_model(config=config):
"""
Train a model based on the provided configuration.
Parameters:
config: Configuration module with necessary attributes.
"""
# Initialize the computation device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Get train, validation, and test dataset loaders
train_loader, valid_loader, test_loader = datasetutils.get_train_valid_test_loaders(config=config)
# Initialize the model trainer | with ModelTrainer(device, train_loader, valid_loader, test_loader, config=config) as modelTrainer, ModelEvaluator.from_trainer(modelTrainer) as modelEvaluator: | 2 | 2023-12-25 18:45:52+00:00 | 12k |
jpivarski/ragged | src/ragged/_spec_searching_functions.py | [
{
"identifier": "_box",
"path": "src/ragged/_spec_array_object.py",
"snippet": "def _box(\n cls: type[array],\n output: ak.Array | np.number | SupportsDLPack,\n *,\n dtype: None | Dtype = None,\n) -> array:\n if isinstance(output, ak.Array):\n impl = output\n shape, dtype_observed = _shape_dtype(output.layout)\n if dtype is not None and dtype != dtype_observed:\n impl = ak.values_astype(impl, dtype)\n else:\n dtype = dtype_observed\n device = ak.backend(output)\n\n elif isinstance(output, np.number):\n impl = np.array(output)\n shape = output.shape\n dtype_observed = output.dtype\n if dtype is not None and dtype != dtype_observed:\n impl = impl.astype(dtype)\n else:\n dtype = dtype_observed\n device = \"cpu\"\n\n else:\n impl = output\n shape = output.shape # type: ignore[union-attr]\n dtype_observed = output.dtype # type: ignore[union-attr]\n if dtype is not None and dtype != dtype_observed:\n impl = impl.astype(dtype)\n else:\n dtype = dtype_observed\n device = \"cpu\" if isinstance(output, np.ndarray) else \"cuda\"\n\n return cls._new(impl, shape, dtype, device) # pylint: disable=W0212"
},
{
"identifier": "_unbox",
"path": "src/ragged/_spec_array_object.py",
"snippet": "def _unbox(*inputs: array) -> tuple[ak.Array | SupportsDLPack, ...]:\n if len(inputs) > 1 and any(type(inputs[0]) is not type(x) for x in inputs):\n types = \"\\n\".join(f\"{type(x).__module__}.{type(x).__name__}\" for x in inputs)\n msg = f\"mixed array types: {types}\"\n raise TypeError(msg)\n\n return tuple(x._impl for x in inputs) # pylint: disable=W0212"
},
{
"identifier": "array",
"path": "src/ragged/_spec_array_object.py",
"snippet": "class array: # pylint: disable=C0103\n \"\"\"\n Ragged array class and constructor.\n\n https://data-apis.org/array-api/latest/API_specification/array_object.html\n \"\"\"\n\n # Constructors, internal functions, and other methods that are unbound by\n # the Array API specification.\n\n _impl: ak.Array | SupportsDLPack # ndim > 0 ak.Array or ndim == 0 NumPy or CuPy\n _shape: Shape\n _dtype: Dtype\n _device: Device\n\n @classmethod\n def _new(cls, impl: ak.Array, shape: Shape, dtype: Dtype, device: Device) -> array:\n \"\"\"\n Simple/fast array constructor for internal code.\n \"\"\"\n\n out = cls.__new__(cls)\n out._impl = impl\n out._shape = shape\n out._dtype = dtype\n out._device = device\n return out\n\n def __init__(\n self,\n obj: (\n array\n | ak.Array\n | bool\n | int\n | float\n | complex\n | NestedSequence[bool | int | float | complex]\n | SupportsBufferProtocol\n | SupportsDLPack\n ),\n dtype: None | Dtype | type | str = None,\n device: None | Device = None,\n copy: None | bool = None,\n ):\n \"\"\"\n Primary array constructor, same as `ragged.asarray`.\n\n Args:\n obj: Object to be converted to an array. May be a Python scalar, a\n (possibly nested) sequence of Python scalars, or an object\n supporting the Python buffer protocol or DLPack.\n dtype: Output array data type. If `dtype` is `None`, the output\n array data type is inferred from the data type(s) in `obj`.\n If all input values are Python scalars, then, in order of\n precedence,\n - if all values are of type `bool`, the output data type is\n `bool`.\n - if all values are of type `int` or are a mixture of `bool`\n and `int`, the output data type is `np.int64`.\n - if one or more values are `complex` numbers, the output\n data type is `np.complex128`.\n - if one or more values are `float`s, the output data type\n is `np.float64`.\n device: Device on which to place the created array. If device is\n `None` and `obj` is an array, the output array device is\n inferred from `obj`. If `\"cpu\"`, the array is backed by NumPy\n and resides in main memory; if `\"cuda\"`, the array is backed by\n CuPy and resides in CUDA global memory.\n copy: Boolean indicating whether or not to copy the input. If `True`,\n this function always copies. If `False`, the function never\n copies for input which supports the buffer protocol and raises\n a ValueError in case a copy would be necessary. If `None`, the\n function reuses the existing memory buffer if possible and\n copies otherwise.\n \"\"\"\n\n if isinstance(obj, array):\n self._impl = obj._impl\n self._shape, self._dtype = obj._shape, obj._dtype\n\n elif isinstance(obj, ak.Array):\n self._impl = obj\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n\n elif hasattr(obj, \"__dlpack_device__\") and getattr(obj, \"shape\", None) == ():\n device_type, _ = obj.__dlpack_device__()\n if (\n isinstance(device_type, enum.Enum) and device_type.value == 1\n ) or device_type == 1:\n self._impl = np.array(obj)\n self._shape, self._dtype = (), self._impl.dtype\n elif (\n isinstance(device_type, enum.Enum) and device_type.value == 2\n ) or device_type == 2:\n cp = _import.cupy()\n self._impl = cp.array(obj)\n self._shape, self._dtype = (), self._impl.dtype\n else:\n msg = f\"unsupported __dlpack_device__ type: {device_type}\"\n raise TypeError(msg)\n\n elif isinstance(obj, (bool, numbers.Complex)):\n self._impl = np.array(obj)\n self._shape, self._dtype = (), self._impl.dtype\n\n else:\n self._impl = ak.Array(obj)\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n\n if dtype is not None and not isinstance(dtype, np.dtype):\n dtype = np.dtype(dtype)\n\n if dtype is not None and dtype != self._dtype:\n if isinstance(self._impl, ak.Array):\n self._impl = ak.values_astype(self._impl, dtype)\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._impl = np.array(obj, dtype=dtype)\n self._dtype = dtype\n\n if self._dtype.fields is not None:\n msg = f\"dtype must not have fields: dtype.fields = {self._dtype.fields}\"\n raise TypeError(msg)\n\n if self._dtype.shape != ():\n msg = f\"dtype must not have a shape: dtype.shape = {self._dtype.shape}\"\n raise TypeError(msg)\n\n if self._dtype.type not in numeric_types:\n msg = f\"dtype must be numeric (bool, [u]int*, float*, complex*): dtype.type = {self._dtype.type}\"\n raise TypeError(msg)\n\n if device is not None:\n if isinstance(self._impl, ak.Array) and device != ak.backend(self._impl):\n self._impl = ak.to_backend(self._impl, device)\n elif isinstance(self._impl, np.ndarray) and device == \"cuda\":\n cp = _import.cupy()\n self._impl = cp.array(self._impl)\n\n assert copy is None, \"TODO\"\n\n def __str__(self) -> str:\n \"\"\"\n String representation of the array.\n \"\"\"\n\n if len(self._shape) == 0:\n return f\"{self._impl}\"\n elif len(self._shape) == 1:\n return f\"{ak._prettyprint.valuestr(self._impl, 1, 80)}\"\n else:\n prep = ak._prettyprint.valuestr(self._impl, 20, 80 - 4)[1:-1].replace(\n \"\\n \", \"\\n \"\n )\n return f\"[\\n {prep}\\n]\"\n\n def __repr__(self) -> str:\n \"\"\"\n REPL-string representation of the array.\n \"\"\"\n\n if len(self._shape) == 0:\n return f\"ragged.array({self._impl})\"\n elif len(self._shape) == 1:\n return f\"ragged.array({ak._prettyprint.valuestr(self._impl, 1, 80 - 14)})\"\n else:\n prep = ak._prettyprint.valuestr(self._impl, 20, 80 - 4)[1:-1].replace(\n \"\\n \", \"\\n \"\n )\n return f\"ragged.array([\\n {prep}\\n])\"\n\n def tolist(\n self,\n ) -> bool | int | float | complex | NestedSequence[bool | int | float | complex]:\n return self._impl.tolist() # type: ignore[no-any-return,union-attr]\n\n # Attributes: https://data-apis.org/array-api/latest/API_specification/array_object.html#attributes\n\n @property\n def dtype(self) -> Dtype:\n \"\"\"\n Data type of the array elements.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.dtype.html\n \"\"\"\n\n return self._dtype\n\n @property\n def device(self) -> Device:\n \"\"\"\n Hardware device the array data resides on.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.device.html\n \"\"\"\n\n return self._device\n\n @property\n def mT(self) -> array:\n \"\"\"\n Transpose of a matrix (or a stack of matrices).\n\n Raises:\n ValueError: If any ragged dimension's lists are not sorted from longest\n to shortest, which is the only way that left-aligned ragged\n transposition is possible.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.mT.html\n \"\"\"\n\n assert False, \"TODO 1\"\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Number of array dimensions (axes).\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.ndim.html\n \"\"\"\n\n return len(self._shape)\n\n @property\n def shape(self) -> Shape:\n \"\"\"\n Array dimensions.\n\n Regular dimensions are represented by `int` values in the `shape` and\n irregular (ragged) dimensions are represented by `None`.\n\n According to the specification, \"An array dimension must be `None` if\n and only if a dimension is unknown,\" which is a different\n interpretation than we are making here.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.shape.html\n \"\"\"\n\n return self._shape\n\n @property\n def size(self) -> None | int:\n \"\"\"\n Number of elements in an array.\n\n This property never returns `None` because we do not consider\n dimensions to be unknown, and numerical values within ragged\n lists can be counted.\n\n Example:\n An array like `ragged.array([[1.1, 2.2, 3.3], [], [4.4, 5.5]])` has\n a size of 5 because it contains 5 numerical values.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.size.html\n \"\"\"\n\n if len(self._shape) == 0:\n return 1\n else:\n return int(ak.count(self._impl))\n\n @property\n def T(self) -> array:\n \"\"\"\n Transpose of the array.\n\n Raises:\n ValueError: If any ragged dimension's lists are not sorted from longest\n to shortest, which is the only way that left-aligned ragged\n transposition is possible.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.T.html\n \"\"\"\n\n assert False, \"TODO 2\"\n\n # methods: https://data-apis.org/array-api/latest/API_specification/array_object.html#methods\n\n def __abs__(self) -> array:\n \"\"\"\n Calculates the absolute value for each element of an array instance.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__abs__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n return ns.abs(self)\n\n def __add__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates the sum for each element of an array instance with the\n respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__add__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.add(self, other)\n\n def __and__(self, other: int | bool | array, /) -> array:\n \"\"\"\n Evaluates `self_i & other_i` for each element of an array instance with\n the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__and__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.bitwise_and(self, other)\n\n def __array_namespace__(self, *, api_version: None | str = None) -> Any:\n \"\"\"\n Returns an object that has all the array API functions on it.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__array_namespace__.html\n \"\"\"\n\n import ragged # pylint: disable=C0415,R0401\n\n if api_version is not None and api_version != ragged.__array_api_version__:\n msg = f\"api_version {api_version!r} is not implemented; {ragged.__array_api_version__ = }\"\n raise NotImplementedError(msg)\n\n return ragged\n\n def __bool__(self) -> bool: # FIXME pylint: disable=E0304\n \"\"\"\n Converts a zero-dimensional array to a Python `bool` object.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__bool__.html\n \"\"\"\n\n return bool(self._impl)\n\n def __complex__(self) -> complex:\n \"\"\"\n Converts a zero-dimensional array to a Python `complex` object.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__complex__.html\n \"\"\"\n\n return complex(self._impl) # type: ignore[arg-type]\n\n def __dlpack__(self, *, stream: None | int | Any = None) -> PyCapsule:\n \"\"\"\n Exports the array for consumption by `from_dlpack()` as a DLPack\n capsule.\n\n Args:\n stream: CuPy Stream object (https://docs.cupy.dev/en/stable/reference/generated/cupy.cuda.Stream.html)\n if not `None`.\n\n Raises:\n ValueError: If any dimensions are ragged.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__dlpack__.html\n \"\"\"\n\n assert stream, \"TODO\"\n assert False, \"TODO 9\"\n\n def __dlpack_device__(self) -> tuple[enum.Enum, int]:\n \"\"\"\n Returns device type and device ID in DLPack format.\n\n Raises:\n ValueError: If any dimensions are ragged.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__dlpack_device__.html\n \"\"\"\n\n assert False, \"TODO 10\"\n\n def __eq__(self, other: int | float | bool | array, /) -> array: # type: ignore[override]\n \"\"\"\n Computes the truth value of `self_i == other_i` for each element of an\n array instance with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__eq__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.equal(self, other)\n\n def __float__(self) -> float:\n \"\"\"\n Converts a zero-dimensional array to a Python `float` object.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__float__.html\n \"\"\"\n\n return float(self._impl) # type: ignore[arg-type]\n\n def __floordiv__(self, other: int | float | array, /) -> array:\n \"\"\"\n Evaluates `self_i // other_i` for each element of an array instance\n with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__floordiv__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.floor_divide(self, other)\n\n def __ge__(self, other: int | float | array, /) -> array:\n \"\"\"\n Computes the truth value of `self_i >= other_i` for each element of an\n array instance with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__ge__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.greater_equal(self, other)\n\n def __getitem__(self, key: GetSliceKey, /) -> array:\n \"\"\"\n Returns self[key].\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__getitem__.html\n \"\"\"\n\n assert False, \"TODO 15\"\n\n def __gt__(self, other: int | float | array, /) -> array:\n \"\"\"\n Computes the truth value of `self_i > other_i` for each element of an\n array instance with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__gt__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.greater(self, other)\n\n def __index__(self) -> int: # FIXME pylint: disable=E0305\n \"\"\"\n Converts a zero-dimensional integer array to a Python `int` object.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__index__.html\n \"\"\"\n\n return self._impl.__index__() # type: ignore[no-any-return, union-attr]\n\n def __int__(self) -> int:\n \"\"\"\n Converts a zero-dimensional array to a Python `int` object.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__int__.html\n \"\"\"\n\n return int(self._impl) # type: ignore[arg-type]\n\n def __invert__(self) -> array:\n \"\"\"\n Evaluates `~self_i` for each element of an array instance.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__invert__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n return ns.bitwise_invert(self)\n\n def __le__(self, other: int | float | array, /) -> array:\n \"\"\"\n Computes the truth value of `self_i <= other_i` for each element of an\n array instance with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__le__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.less_equal(self, other)\n\n def __lshift__(self, other: int | array, /) -> array:\n \"\"\"\n Evaluates `self_i << other_i` for each element of an array instance\n with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__lshift__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.bitwise_left_shift(self, other)\n\n def __lt__(self, other: int | float | array, /) -> array:\n \"\"\"\n Computes the truth value of `self_i < other_i` for each element of an\n array instance with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__lt__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.less(self, other)\n\n def __matmul__(self, other: array, /) -> array:\n \"\"\"\n Computes the matrix product.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__matmul__.html\n \"\"\"\n\n assert False, \"TODO 22\"\n\n def __mod__(self, other: int | float | array, /) -> array:\n \"\"\"\n Evaluates `self_i % other_i` for each element of an array instance with\n the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__mod__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.remainder(self, other)\n\n def __mul__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates the product for each element of an array instance with the\n respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__mul__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.multiply(self, other)\n\n def __ne__(self, other: int | float | bool | array, /) -> array: # type: ignore[override]\n \"\"\"\n Computes the truth value of `self_i != other_i` for each element of an\n array instance with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__ne__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.not_equal(self, other)\n\n def __neg__(self) -> array:\n \"\"\"\n Evaluates `-self_i` for each element of an array instance.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__neg__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n return ns.negative(self)\n\n def __or__(self, other: int | bool | array, /) -> array:\n \"\"\"\n Evaluates `self_i | other_i` for each element of an array instance with\n the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__or__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.bitwise_or(self, other)\n\n def __pos__(self) -> array:\n \"\"\"\n Evaluates `+self_i` for each element of an array instance.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__pos__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n return ns.positive(self)\n\n def __pow__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates an implementation-dependent approximation of exponentiation\n by raising each element (the base) of an array instance to the power of\n `other_i` (the exponent), where `other_i` is the corresponding element\n of the array `other`.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__pow__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.pow(self, other)\n\n def __rshift__(self, other: int | array, /) -> array:\n \"\"\"\n Evaluates `self_i >> other_i` for each element of an array instance\n with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__rshift__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.bitwise_right_shift(self, other)\n\n def __setitem__(\n self, key: SetSliceKey, value: int | float | bool | array, /\n ) -> None:\n \"\"\"\n Sets `self[key]` to value.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__setitem__.html\n \"\"\"\n\n assert False, \"TODO 31\"\n\n def __sub__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates the difference for each element of an array instance with\n the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__sub__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.subtract(self, other)\n\n def __truediv__(self, other: int | float | array, /) -> array:\n \"\"\"\n Evaluates `self_i / other_i` for each element of an array instance with\n the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__truediv__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.divide(self, other)\n\n def __xor__(self, other: int | bool | array, /) -> array:\n \"\"\"\n Evaluates `self_i ^ other_i` for each element of an array instance with\n the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__xor__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.bitwise_xor(self, other)\n\n def to_device(self, device: Device, /, *, stream: None | int | Any = None) -> array:\n \"\"\"\n Copy the array from the device on which it currently resides to the\n specified device.\n\n Args:\n device: If `\"cpu\"`, the array is backed by NumPy and resides in\n main memory; if `\"cuda\"`, the array is backed by CuPy and\n resides in CUDA global memory.\n stream: CuPy Stream object (https://docs.cupy.dev/en/stable/reference/generated/cupy.cuda.Stream.html)\n for `device=\"cuda\"`.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.to_device.html\n \"\"\"\n\n if isinstance(self._impl, ak.Array):\n if device != ak.backend(self._impl):\n assert stream is None, \"TODO\"\n impl = ak.to_backend(self._impl, device)\n else:\n impl = self._impl\n\n elif isinstance(self._impl, np.ndarray):\n # self._impl is a NumPy 0-dimensional array\n if device == \"cuda\":\n assert stream is None, \"TODO\"\n cp = _import.cupy()\n impl = cp.array(self._impl)\n else:\n impl = self._impl\n\n else:\n # self._impl is a CuPy 0-dimensional array\n impl = self._impl.get() if device == \"cpu\" else self._impl # type: ignore[union-attr]\n\n return self._new(impl, self._shape, self._dtype, device)\n\n # in-place operators: https://data-apis.org/array-api/2022.12/API_specification/array_object.html#in-place-operators\n\n def __iadd__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates `self = self + other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self + other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __isub__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates `self = self - other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self - other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __imul__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates `self = self * other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self * other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __itruediv__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates `self = self / other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self / other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __ifloordiv__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates `self = self // other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self // other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __ipow__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates `self = self ** other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self**other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __imod__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates `self = self % other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self % other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __imatmul__(self, other: array, /) -> array:\n \"\"\"\n Calculates `self = self @ other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self @ other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __iand__(self, other: int | bool | array, /) -> array:\n \"\"\"\n Calculates `self = self & other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self & other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __ior__(self, other: int | bool | array, /) -> array:\n \"\"\"\n Calculates `self = self | other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self | other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __ixor__(self, other: int | bool | array, /) -> array:\n \"\"\"\n Calculates `self = self ^ other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self ^ other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __ilshift__(self, other: int | array, /) -> array:\n \"\"\"\n Calculates `self = self << other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self << other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __irshift__(self, other: int | array, /) -> array:\n \"\"\"\n Calculates `self = self >> other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self >> other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n # reflected operators: https://data-apis.org/array-api/2022.12/API_specification/array_object.html#reflected-operators\n\n __radd__ = __add__\n __rsub__ = __sub__\n __rmul__ = __mul__\n __rtruediv__ = __truediv__\n __rfloordiv__ = __floordiv__\n __rpow__ = __pow__\n __rmod__ = __mod__\n __rmatmul__ = __matmul__\n __rand__ = __and__\n __ror__ = __or__\n __rxor__ = __xor__\n __rlshift__ = __lshift__\n __rrshift__ = __rshift__"
}
] | import awkward as ak
import numpy as np
from ._spec_array_object import _box, _unbox, array | 10,499 | # BSD 3-Clause License; see https://github.com/scikit-hep/ragged/blob/main/LICENSE
"""
https://data-apis.org/array-api/latest/API_specification/searching_functions.html
"""
from __future__ import annotations
def _remove_optiontype(x: ak.contents.Content) -> ak.contents.Content:
if x.is_list:
return x.copy(content=_remove_optiontype(x.content))
elif x.is_option:
return x.content
else:
return x
def argmax(x: array, /, *, axis: None | int = None, keepdims: bool = False) -> array:
"""
Returns the indices of the maximum values along a specified axis.
When the maximum value occurs multiple times, only the indices
corresponding to the first occurrence are returned.
Args:
x: Input array.
axis: Axis along which to search. If `None`, the function returns the
index of the maximum value of the flattened array.
keepdims: If `True`, the reduced axes (dimensions) are included in the
result as singleton dimensions, and, accordingly, the result is
broadcastable with the input array. Otherwise, if `False`, the
reduced axes (dimensions) are not included in the result.
Returns:
If `axis` is `None`, a zero-dimensional array containing the index of
the first occurrence of the maximum value; otherwise, a
non-zero-dimensional array containing the indices of the maximum
values. The returned array has data type `np.int64`.
https://data-apis.org/array-api/latest/API_specification/generated/array_api.argmax.html
"""
out = np.argmax(*_unbox(x), axis=axis, keepdims=keepdims)
if out is None:
msg = "cannot compute argmax of an array with no data"
raise ValueError(msg)
if isinstance(out, ak.Array):
if ak.any(ak.is_none(out, axis=-1)):
msg = f"cannot compute argmax at axis={axis} because some lists at this depth have zero length"
raise ValueError(msg)
out = ak.Array(
_remove_optiontype(out.layout), behavior=out.behavior, attrs=out.attrs
)
| # BSD 3-Clause License; see https://github.com/scikit-hep/ragged/blob/main/LICENSE
"""
https://data-apis.org/array-api/latest/API_specification/searching_functions.html
"""
from __future__ import annotations
def _remove_optiontype(x: ak.contents.Content) -> ak.contents.Content:
if x.is_list:
return x.copy(content=_remove_optiontype(x.content))
elif x.is_option:
return x.content
else:
return x
def argmax(x: array, /, *, axis: None | int = None, keepdims: bool = False) -> array:
"""
Returns the indices of the maximum values along a specified axis.
When the maximum value occurs multiple times, only the indices
corresponding to the first occurrence are returned.
Args:
x: Input array.
axis: Axis along which to search. If `None`, the function returns the
index of the maximum value of the flattened array.
keepdims: If `True`, the reduced axes (dimensions) are included in the
result as singleton dimensions, and, accordingly, the result is
broadcastable with the input array. Otherwise, if `False`, the
reduced axes (dimensions) are not included in the result.
Returns:
If `axis` is `None`, a zero-dimensional array containing the index of
the first occurrence of the maximum value; otherwise, a
non-zero-dimensional array containing the indices of the maximum
values. The returned array has data type `np.int64`.
https://data-apis.org/array-api/latest/API_specification/generated/array_api.argmax.html
"""
out = np.argmax(*_unbox(x), axis=axis, keepdims=keepdims)
if out is None:
msg = "cannot compute argmax of an array with no data"
raise ValueError(msg)
if isinstance(out, ak.Array):
if ak.any(ak.is_none(out, axis=-1)):
msg = f"cannot compute argmax at axis={axis} because some lists at this depth have zero length"
raise ValueError(msg)
out = ak.Array(
_remove_optiontype(out.layout), behavior=out.behavior, attrs=out.attrs
)
| return _box(type(x), out) | 0 | 2023-12-26 10:53:35+00:00 | 12k |
see2023/Bert-VITS2-ext | onnx_modules/V200/text/chinese.py | [
{
"identifier": "punctuation",
"path": "onnx_modules/V200/text/symbols.py",
"snippet": ""
},
{
"identifier": "ToneSandhi",
"path": "onnx_modules/V200/text/tone_sandhi.py",
"snippet": "class ToneSandhi:\n def __init__(self):\n self.must_neural_tone_words = {\n \"麻烦\",\n \"麻利\",\n \"鸳鸯\",\n \"高粱\",\n \"骨头\",\n \"骆驼\",\n \"马虎\",\n \"首饰\",\n \"馒头\",\n \"馄饨\",\n \"风筝\",\n \"难为\",\n \"队伍\",\n \"阔气\",\n \"闺女\",\n \"门道\",\n \"锄头\",\n \"铺盖\",\n \"铃铛\",\n \"铁匠\",\n \"钥匙\",\n \"里脊\",\n \"里头\",\n \"部分\",\n \"那么\",\n \"道士\",\n \"造化\",\n \"迷糊\",\n \"连累\",\n \"这么\",\n \"这个\",\n \"运气\",\n \"过去\",\n \"软和\",\n \"转悠\",\n \"踏实\",\n \"跳蚤\",\n \"跟头\",\n \"趔趄\",\n \"财主\",\n \"豆腐\",\n \"讲究\",\n \"记性\",\n \"记号\",\n \"认识\",\n \"规矩\",\n \"见识\",\n \"裁缝\",\n \"补丁\",\n \"衣裳\",\n \"衣服\",\n \"衙门\",\n \"街坊\",\n \"行李\",\n \"行当\",\n \"蛤蟆\",\n \"蘑菇\",\n \"薄荷\",\n \"葫芦\",\n \"葡萄\",\n \"萝卜\",\n \"荸荠\",\n \"苗条\",\n \"苗头\",\n \"苍蝇\",\n \"芝麻\",\n \"舒服\",\n \"舒坦\",\n \"舌头\",\n \"自在\",\n \"膏药\",\n \"脾气\",\n \"脑袋\",\n \"脊梁\",\n \"能耐\",\n \"胳膊\",\n \"胭脂\",\n \"胡萝\",\n \"胡琴\",\n \"胡同\",\n \"聪明\",\n \"耽误\",\n \"耽搁\",\n \"耷拉\",\n \"耳朵\",\n \"老爷\",\n \"老实\",\n \"老婆\",\n \"老头\",\n \"老太\",\n \"翻腾\",\n \"罗嗦\",\n \"罐头\",\n \"编辑\",\n \"结实\",\n \"红火\",\n \"累赘\",\n \"糨糊\",\n \"糊涂\",\n \"精神\",\n \"粮食\",\n \"簸箕\",\n \"篱笆\",\n \"算计\",\n \"算盘\",\n \"答应\",\n \"笤帚\",\n \"笑语\",\n \"笑话\",\n \"窟窿\",\n \"窝囊\",\n \"窗户\",\n \"稳当\",\n \"稀罕\",\n \"称呼\",\n \"秧歌\",\n \"秀气\",\n \"秀才\",\n \"福气\",\n \"祖宗\",\n \"砚台\",\n \"码头\",\n \"石榴\",\n \"石头\",\n \"石匠\",\n \"知识\",\n \"眼睛\",\n \"眯缝\",\n \"眨巴\",\n \"眉毛\",\n \"相声\",\n \"盘算\",\n \"白净\",\n \"痢疾\",\n \"痛快\",\n \"疟疾\",\n \"疙瘩\",\n \"疏忽\",\n \"畜生\",\n \"生意\",\n \"甘蔗\",\n \"琵琶\",\n \"琢磨\",\n \"琉璃\",\n \"玻璃\",\n \"玫瑰\",\n \"玄乎\",\n \"狐狸\",\n \"状元\",\n \"特务\",\n \"牲口\",\n \"牙碜\",\n \"牌楼\",\n \"爽快\",\n \"爱人\",\n \"热闹\",\n \"烧饼\",\n \"烟筒\",\n \"烂糊\",\n \"点心\",\n \"炊帚\",\n \"灯笼\",\n \"火候\",\n \"漂亮\",\n \"滑溜\",\n \"溜达\",\n \"温和\",\n \"清楚\",\n \"消息\",\n \"浪头\",\n \"活泼\",\n \"比方\",\n \"正经\",\n \"欺负\",\n \"模糊\",\n \"槟榔\",\n \"棺材\",\n \"棒槌\",\n \"棉花\",\n \"核桃\",\n \"栅栏\",\n \"柴火\",\n \"架势\",\n \"枕头\",\n \"枇杷\",\n \"机灵\",\n \"本事\",\n \"木头\",\n \"木匠\",\n \"朋友\",\n \"月饼\",\n \"月亮\",\n \"暖和\",\n \"明白\",\n \"时候\",\n \"新鲜\",\n \"故事\",\n \"收拾\",\n \"收成\",\n \"提防\",\n \"挖苦\",\n \"挑剔\",\n \"指甲\",\n \"指头\",\n \"拾掇\",\n \"拳头\",\n \"拨弄\",\n \"招牌\",\n \"招呼\",\n \"抬举\",\n \"护士\",\n \"折腾\",\n \"扫帚\",\n \"打量\",\n \"打算\",\n \"打点\",\n \"打扮\",\n \"打听\",\n \"打发\",\n \"扎实\",\n \"扁担\",\n \"戒指\",\n \"懒得\",\n \"意识\",\n \"意思\",\n \"情形\",\n \"悟性\",\n \"怪物\",\n \"思量\",\n \"怎么\",\n \"念头\",\n \"念叨\",\n \"快活\",\n \"忙活\",\n \"志气\",\n \"心思\",\n \"得罪\",\n \"张罗\",\n \"弟兄\",\n \"开通\",\n \"应酬\",\n \"庄稼\",\n \"干事\",\n \"帮手\",\n \"帐篷\",\n \"希罕\",\n \"师父\",\n \"师傅\",\n \"巴结\",\n \"巴掌\",\n \"差事\",\n \"工夫\",\n \"岁数\",\n \"屁股\",\n \"尾巴\",\n \"少爷\",\n \"小气\",\n \"小伙\",\n \"将就\",\n \"对头\",\n \"对付\",\n \"寡妇\",\n \"家伙\",\n \"客气\",\n \"实在\",\n \"官司\",\n \"学问\",\n \"学生\",\n \"字号\",\n \"嫁妆\",\n \"媳妇\",\n \"媒人\",\n \"婆家\",\n \"娘家\",\n \"委屈\",\n \"姑娘\",\n \"姐夫\",\n \"妯娌\",\n \"妥当\",\n \"妖精\",\n \"奴才\",\n \"女婿\",\n \"头发\",\n \"太阳\",\n \"大爷\",\n \"大方\",\n \"大意\",\n \"大夫\",\n \"多少\",\n \"多么\",\n \"外甥\",\n \"壮实\",\n \"地道\",\n \"地方\",\n \"在乎\",\n \"困难\",\n \"嘴巴\",\n \"嘱咐\",\n \"嘟囔\",\n \"嘀咕\",\n \"喜欢\",\n \"喇嘛\",\n \"喇叭\",\n \"商量\",\n \"唾沫\",\n \"哑巴\",\n \"哈欠\",\n \"哆嗦\",\n \"咳嗽\",\n \"和尚\",\n \"告诉\",\n \"告示\",\n \"含糊\",\n \"吓唬\",\n \"后头\",\n \"名字\",\n \"名堂\",\n \"合同\",\n \"吆喝\",\n \"叫唤\",\n \"口袋\",\n \"厚道\",\n \"厉害\",\n \"千斤\",\n \"包袱\",\n \"包涵\",\n \"匀称\",\n \"勤快\",\n \"动静\",\n \"动弹\",\n \"功夫\",\n \"力气\",\n \"前头\",\n \"刺猬\",\n \"刺激\",\n \"别扭\",\n \"利落\",\n \"利索\",\n \"利害\",\n \"分析\",\n \"出息\",\n \"凑合\",\n \"凉快\",\n \"冷战\",\n \"冤枉\",\n \"冒失\",\n \"养活\",\n \"关系\",\n \"先生\",\n \"兄弟\",\n \"便宜\",\n \"使唤\",\n \"佩服\",\n \"作坊\",\n \"体面\",\n \"位置\",\n \"似的\",\n \"伙计\",\n \"休息\",\n \"什么\",\n \"人家\",\n \"亲戚\",\n \"亲家\",\n \"交情\",\n \"云彩\",\n \"事情\",\n \"买卖\",\n \"主意\",\n \"丫头\",\n \"丧气\",\n \"两口\",\n \"东西\",\n \"东家\",\n \"世故\",\n \"不由\",\n \"不在\",\n \"下水\",\n \"下巴\",\n \"上头\",\n \"上司\",\n \"丈夫\",\n \"丈人\",\n \"一辈\",\n \"那个\",\n \"菩萨\",\n \"父亲\",\n \"母亲\",\n \"咕噜\",\n \"邋遢\",\n \"费用\",\n \"冤家\",\n \"甜头\",\n \"介绍\",\n \"荒唐\",\n \"大人\",\n \"泥鳅\",\n \"幸福\",\n \"熟悉\",\n \"计划\",\n \"扑腾\",\n \"蜡烛\",\n \"姥爷\",\n \"照顾\",\n \"喉咙\",\n \"吉他\",\n \"弄堂\",\n \"蚂蚱\",\n \"凤凰\",\n \"拖沓\",\n \"寒碜\",\n \"糟蹋\",\n \"倒腾\",\n \"报复\",\n \"逻辑\",\n \"盘缠\",\n \"喽啰\",\n \"牢骚\",\n \"咖喱\",\n \"扫把\",\n \"惦记\",\n }\n self.must_not_neural_tone_words = {\n \"男子\",\n \"女子\",\n \"分子\",\n \"原子\",\n \"量子\",\n \"莲子\",\n \"石子\",\n \"瓜子\",\n \"电子\",\n \"人人\",\n \"虎虎\",\n }\n self.punc = \":,;。?!“”‘’':,;.?!\"\n\n # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041\n # e.g.\n # word: \"家里\"\n # pos: \"s\"\n # finals: ['ia1', 'i3']\n def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]:\n # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺\n for j, item in enumerate(word):\n if (\n j - 1 >= 0\n and item == word[j - 1]\n and pos[0] in {\"n\", \"v\", \"a\"}\n and word not in self.must_not_neural_tone_words\n ):\n finals[j] = finals[j][:-1] + \"5\"\n ge_idx = word.find(\"个\")\n if len(word) >= 1 and word[-1] in \"吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶\":\n finals[-1] = finals[-1][:-1] + \"5\"\n elif len(word) >= 1 and word[-1] in \"的地得\":\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 走了, 看着, 去过\n # elif len(word) == 1 and word in \"了着过\" and pos in {\"ul\", \"uz\", \"ug\"}:\n # finals[-1] = finals[-1][:-1] + \"5\"\n elif (\n len(word) > 1\n and word[-1] in \"们子\"\n and pos in {\"r\", \"n\"}\n and word not in self.must_not_neural_tone_words\n ):\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 桌上, 地下, 家里\n elif len(word) > 1 and word[-1] in \"上下里\" and pos in {\"s\", \"l\", \"f\"}:\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 上来, 下去\n elif len(word) > 1 and word[-1] in \"来去\" and word[-2] in \"上下进出回过起开\":\n finals[-1] = finals[-1][:-1] + \"5\"\n # 个做量词\n elif (\n ge_idx >= 1\n and (word[ge_idx - 1].isnumeric() or word[ge_idx - 1] in \"几有两半多各整每做是\")\n ) or word == \"个\":\n finals[ge_idx] = finals[ge_idx][:-1] + \"5\"\n else:\n if (\n word in self.must_neural_tone_words\n or word[-2:] in self.must_neural_tone_words\n ):\n finals[-1] = finals[-1][:-1] + \"5\"\n\n word_list = self._split_word(word)\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\n for i, word in enumerate(word_list):\n # conventional neural in Chinese\n if (\n word in self.must_neural_tone_words\n or word[-2:] in self.must_neural_tone_words\n ):\n finals_list[i][-1] = finals_list[i][-1][:-1] + \"5\"\n finals = sum(finals_list, [])\n return finals\n\n def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:\n # e.g. 看不懂\n if len(word) == 3 and word[1] == \"不\":\n finals[1] = finals[1][:-1] + \"5\"\n else:\n for i, char in enumerate(word):\n # \"不\" before tone4 should be bu2, e.g. 不怕\n if char == \"不\" and i + 1 < len(word) and finals[i + 1][-1] == \"4\":\n finals[i] = finals[i][:-1] + \"2\"\n return finals\n\n def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:\n # \"一\" in number sequences, e.g. 一零零, 二一零\n if word.find(\"一\") != -1 and all(\n [item.isnumeric() for item in word if item != \"一\"]\n ):\n return finals\n # \"一\" between reduplication words should be yi5, e.g. 看一看\n elif len(word) == 3 and word[1] == \"一\" and word[0] == word[-1]:\n finals[1] = finals[1][:-1] + \"5\"\n # when \"一\" is ordinal word, it should be yi1\n elif word.startswith(\"第一\"):\n finals[1] = finals[1][:-1] + \"1\"\n else:\n for i, char in enumerate(word):\n if char == \"一\" and i + 1 < len(word):\n # \"一\" before tone4 should be yi2, e.g. 一段\n if finals[i + 1][-1] == \"4\":\n finals[i] = finals[i][:-1] + \"2\"\n # \"一\" before non-tone4 should be yi4, e.g. 一天\n else:\n # \"一\" 后面如果是标点,还读一声\n if word[i + 1] not in self.punc:\n finals[i] = finals[i][:-1] + \"4\"\n return finals\n\n def _split_word(self, word: str) -> List[str]:\n word_list = jieba.cut_for_search(word)\n word_list = sorted(word_list, key=lambda i: len(i), reverse=False)\n first_subword = word_list[0]\n first_begin_idx = word.find(first_subword)\n if first_begin_idx == 0:\n second_subword = word[len(first_subword) :]\n new_word_list = [first_subword, second_subword]\n else:\n second_subword = word[: -len(first_subword)]\n new_word_list = [second_subword, first_subword]\n return new_word_list\n\n def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:\n if len(word) == 2 and self._all_tone_three(finals):\n finals[0] = finals[0][:-1] + \"2\"\n elif len(word) == 3:\n word_list = self._split_word(word)\n if self._all_tone_three(finals):\n # disyllabic + monosyllabic, e.g. 蒙古/包\n if len(word_list[0]) == 2:\n finals[0] = finals[0][:-1] + \"2\"\n finals[1] = finals[1][:-1] + \"2\"\n # monosyllabic + disyllabic, e.g. 纸/老虎\n elif len(word_list[0]) == 1:\n finals[1] = finals[1][:-1] + \"2\"\n else:\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\n if len(finals_list) == 2:\n for i, sub in enumerate(finals_list):\n # e.g. 所有/人\n if self._all_tone_three(sub) and len(sub) == 2:\n finals_list[i][0] = finals_list[i][0][:-1] + \"2\"\n # e.g. 好/喜欢\n elif (\n i == 1\n and not self._all_tone_three(sub)\n and finals_list[i][0][-1] == \"3\"\n and finals_list[0][-1][-1] == \"3\"\n ):\n finals_list[0][-1] = finals_list[0][-1][:-1] + \"2\"\n finals = sum(finals_list, [])\n # split idiom into two words who's length is 2\n elif len(word) == 4:\n finals_list = [finals[:2], finals[2:]]\n finals = []\n for sub in finals_list:\n if self._all_tone_three(sub):\n sub[0] = sub[0][:-1] + \"2\"\n finals += sub\n\n return finals\n\n def _all_tone_three(self, finals: List[str]) -> bool:\n return all(x[-1] == \"3\" for x in finals)\n\n # merge \"不\" and the word behind it\n # if don't merge, \"不\" sometimes appears alone according to jieba, which may occur sandhi error\n def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n last_word = \"\"\n for word, pos in seg:\n if last_word == \"不\":\n word = last_word + word\n if word != \"不\":\n new_seg.append((word, pos))\n last_word = word[:]\n if last_word == \"不\":\n new_seg.append((last_word, \"d\"))\n last_word = \"\"\n return new_seg\n\n # function 1: merge \"一\" and reduplication words in it's left and right, e.g. \"听\",\"一\",\"听\" ->\"听一听\"\n # function 2: merge single \"一\" and the word behind it\n # if don't merge, \"一\" sometimes appears alone according to jieba, which may occur sandhi error\n # e.g.\n # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]\n # output seg: [['听一听', 'v']]\n def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n # function 1\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and word == \"一\"\n and i + 1 < len(seg)\n and seg[i - 1][0] == seg[i + 1][0]\n and seg[i - 1][1] == \"v\"\n ):\n new_seg[i - 1][0] = new_seg[i - 1][0] + \"一\" + new_seg[i - 1][0]\n else:\n if (\n i - 2 >= 0\n and seg[i - 1][0] == \"一\"\n and seg[i - 2][0] == word\n and pos == \"v\"\n ):\n continue\n else:\n new_seg.append([word, pos])\n seg = new_seg\n new_seg = []\n # function 2\n for i, (word, pos) in enumerate(seg):\n if new_seg and new_seg[-1][0] == \"一\":\n new_seg[-1][0] = new_seg[-1][0] + word\n else:\n new_seg.append([word, pos])\n return new_seg\n\n # the first and the second words are all_tone_three\n def _merge_continuous_three_tones(\n self, seg: List[Tuple[str, str]]\n ) -> List[Tuple[str, str]]:\n new_seg = []\n sub_finals_list = [\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\n for (word, pos) in seg\n ]\n assert len(sub_finals_list) == len(seg)\n merge_last = [False] * len(seg)\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and self._all_tone_three(sub_finals_list[i - 1])\n and self._all_tone_three(sub_finals_list[i])\n and not merge_last[i - 1]\n ):\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\n if (\n not self._is_reduplication(seg[i - 1][0])\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\n ):\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n merge_last[i] = True\n else:\n new_seg.append([word, pos])\n else:\n new_seg.append([word, pos])\n\n return new_seg\n\n def _is_reduplication(self, word: str) -> bool:\n return len(word) == 2 and word[0] == word[1]\n\n # the last char of first word and the first char of second word is tone_three\n def _merge_continuous_three_tones_2(\n self, seg: List[Tuple[str, str]]\n ) -> List[Tuple[str, str]]:\n new_seg = []\n sub_finals_list = [\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\n for (word, pos) in seg\n ]\n assert len(sub_finals_list) == len(seg)\n merge_last = [False] * len(seg)\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and sub_finals_list[i - 1][-1][-1] == \"3\"\n and sub_finals_list[i][0][-1] == \"3\"\n and not merge_last[i - 1]\n ):\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\n if (\n not self._is_reduplication(seg[i - 1][0])\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\n ):\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n merge_last[i] = True\n else:\n new_seg.append([word, pos])\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n for i, (word, pos) in enumerate(seg):\n if i - 1 >= 0 and word == \"儿\" and seg[i - 1][0] != \"#\":\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def _merge_reduplication(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n for i, (word, pos) in enumerate(seg):\n if new_seg and word == new_seg[-1][0]:\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def pre_merge_for_modify(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n seg = self._merge_bu(seg)\n try:\n seg = self._merge_yi(seg)\n except:\n print(\"_merge_yi failed\")\n seg = self._merge_reduplication(seg)\n seg = self._merge_continuous_three_tones(seg)\n seg = self._merge_continuous_three_tones_2(seg)\n seg = self._merge_er(seg)\n return seg\n\n def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]:\n finals = self._bu_sandhi(word, finals)\n finals = self._yi_sandhi(word, finals)\n finals = self._neural_sandhi(word, pos, finals)\n finals = self._three_sandhi(word, finals)\n return finals"
}
] | import os
import re
import cn2an
import jieba.posseg as psg
from pypinyin import lazy_pinyin, Style
from .symbols import punctuation
from .tone_sandhi import ToneSandhi
from text import chinese_bert
from text.chinese_bert import get_bert_feature | 7,598 |
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
|
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
| tone_modifier = ToneSandhi() | 1 | 2023-12-27 03:09:11+00:00 | 12k |
chinhsuanwu/ifusion-threestudio | threestudio/models/geometry/base.py | [
{
"identifier": "IsosurfaceHelper",
"path": "threestudio/models/isosurface.py",
"snippet": "class IsosurfaceHelper(nn.Module):\n points_range: Tuple[float, float] = (0, 1)\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"N 3\"]:\n raise NotImplementedError"
},
{
"identifier": "MarchingCubeCPUHelper",
"path": "threestudio/models/isosurface.py",
"snippet": "class MarchingCubeCPUHelper(IsosurfaceHelper):\n def __init__(self, resolution: int) -> None:\n super().__init__()\n self.resolution = resolution\n import mcubes\n\n self.mc_func: Callable = mcubes.marching_cubes\n self._grid_vertices: Optional[Float[Tensor, \"N3 3\"]] = None\n self._dummy: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_dummy\", torch.zeros(0, dtype=torch.float32), persistent=False\n )\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"N3 3\"]:\n if self._grid_vertices is None:\n # keep the vertices on CPU so that we can support very large resolution\n x, y, z = (\n torch.linspace(*self.points_range, self.resolution),\n torch.linspace(*self.points_range, self.resolution),\n torch.linspace(*self.points_range, self.resolution),\n )\n x, y, z = torch.meshgrid(x, y, z, indexing=\"ij\")\n verts = torch.cat(\n [x.reshape(-1, 1), y.reshape(-1, 1), z.reshape(-1, 1)], dim=-1\n ).reshape(-1, 3)\n self._grid_vertices = verts\n return self._grid_vertices\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support deformation. Ignoring.\"\n )\n level = -level.view(self.resolution, self.resolution, self.resolution)\n v_pos, t_pos_idx = self.mc_func(\n level.detach().cpu().numpy(), 0.0\n ) # transform to numpy\n v_pos, t_pos_idx = (\n torch.from_numpy(v_pos).float().to(self._dummy.device),\n torch.from_numpy(t_pos_idx.astype(np.int64)).long().to(self._dummy.device),\n ) # transform back to torch tensor on CUDA\n v_pos = v_pos / (self.resolution - 1.0)\n return Mesh(v_pos=v_pos, t_pos_idx=t_pos_idx)"
},
{
"identifier": "MarchingTetrahedraHelper",
"path": "threestudio/models/isosurface.py",
"snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh"
},
{
"identifier": "Mesh",
"path": "threestudio/models/mesh.py",
"snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss"
},
{
"identifier": "BaseModule",
"path": "threestudio/utils/base.py",
"snippet": "class BaseModule(nn.Module, Updateable):\n @dataclass\n class Config:\n weights: Optional[str] = None\n\n cfg: Config # add this to every subclass of BaseModule to enable static type checking\n\n def __init__(\n self, cfg: Optional[Union[dict, DictConfig]] = None, *args, **kwargs\n ) -> None:\n super().__init__()\n self.cfg = parse_structured(self.Config, cfg)\n self.device = get_device()\n self.configure(*args, **kwargs)\n if self.cfg.weights is not None:\n # format: path/to/weights:module_name\n weights_path, module_name = self.cfg.weights.split(\":\")\n state_dict, epoch, global_step = load_module_weights(\n weights_path, module_name=module_name, map_location=\"cpu\"\n )\n self.load_state_dict(state_dict)\n self.do_update_step(\n epoch, global_step, on_load_weights=True\n ) # restore states\n # dummy tensor to indicate model state\n self._dummy: Float[Tensor, \"...\"]\n self.register_buffer(\"_dummy\", torch.zeros(0).float(), persistent=False)\n\n def configure(self, *args, **kwargs) -> None:\n pass"
},
{
"identifier": "chunk_batch",
"path": "threestudio/utils/ops.py",
"snippet": "def chunk_batch(func: Callable, chunk_size: int, *args, **kwargs) -> Any:\n if chunk_size <= 0:\n return func(*args, **kwargs)\n B = None\n for arg in list(args) + list(kwargs.values()):\n if isinstance(arg, torch.Tensor):\n B = arg.shape[0]\n break\n assert (\n B is not None\n ), \"No tensor found in args or kwargs, cannot determine batch size.\"\n out = defaultdict(list)\n out_type = None\n # max(1, B) to support B == 0\n for i in range(0, max(1, B), chunk_size):\n out_chunk = func(\n *[\n arg[i : i + chunk_size] if isinstance(arg, torch.Tensor) else arg\n for arg in args\n ],\n **{\n k: arg[i : i + chunk_size] if isinstance(arg, torch.Tensor) else arg\n for k, arg in kwargs.items()\n },\n )\n if out_chunk is None:\n continue\n out_type = type(out_chunk)\n if isinstance(out_chunk, torch.Tensor):\n out_chunk = {0: out_chunk}\n elif isinstance(out_chunk, tuple) or isinstance(out_chunk, list):\n chunk_length = len(out_chunk)\n out_chunk = {i: chunk for i, chunk in enumerate(out_chunk)}\n elif isinstance(out_chunk, dict):\n pass\n else:\n print(\n f\"Return value of func must be in type [torch.Tensor, list, tuple, dict], get {type(out_chunk)}.\"\n )\n exit(1)\n for k, v in out_chunk.items():\n v = v if torch.is_grad_enabled() else v.detach()\n out[k].append(v)\n\n if out_type is None:\n return None\n\n out_merged: Dict[Any, Optional[torch.Tensor]] = {}\n for k, v in out.items():\n if all([vv is None for vv in v]):\n # allow None in return value\n out_merged[k] = None\n elif all([isinstance(vv, torch.Tensor) for vv in v]):\n out_merged[k] = torch.cat(v, dim=0)\n else:\n raise TypeError(\n f\"Unsupported types in return value of func: {[type(vv) for vv in v if not isinstance(vv, torch.Tensor)]}\"\n )\n\n if out_type is torch.Tensor:\n return out_merged[0]\n elif out_type in [tuple, list]:\n return out_type([out_merged[i] for i in range(chunk_length)])\n elif out_type is dict:\n return out_merged"
},
{
"identifier": "scale_tensor",
"path": "threestudio/utils/ops.py",
"snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat"
}
] | from dataclasses import dataclass, field
from threestudio.models.isosurface import (
IsosurfaceHelper,
MarchingCubeCPUHelper,
MarchingTetrahedraHelper,
)
from threestudio.models.mesh import Mesh
from threestudio.utils.base import BaseModule
from threestudio.utils.ops import chunk_batch, scale_tensor
from threestudio.utils.typing import *
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio | 7,670 |
def contract_to_unisphere(
x: Float[Tensor, "... 3"], bbox: Float[Tensor, "2 3"], unbounded: bool = False
) -> Float[Tensor, "... 3"]:
if unbounded:
x = scale_tensor(x, bbox, (0, 1))
x = x * 2 - 1 # aabb is at [-1, 1]
mag = x.norm(dim=-1, keepdim=True)
mask = mag.squeeze(-1) > 1
x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])
x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]
else:
x = scale_tensor(x, bbox, (0, 1))
return x
class BaseGeometry(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
@staticmethod
def create_from(
other: "BaseGeometry", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs
) -> "BaseGeometry":
raise TypeError(
f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}"
)
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
class BaseImplicitGeometry(BaseGeometry):
@dataclass
class Config(BaseGeometry.Config):
radius: float = 1.0
isosurface: bool = True
isosurface_method: str = "mt"
isosurface_resolution: int = 128
isosurface_threshold: Union[float, str] = 0.0
isosurface_chunk: int = 0
isosurface_coarse_to_fine: bool = True
isosurface_deformable_grid: bool = False
isosurface_remove_outliers: bool = True
isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01
cfg: Config
def configure(self) -> None:
self.bbox: Float[Tensor, "2 3"]
self.register_buffer(
"bbox",
torch.as_tensor(
[
[-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],
[self.cfg.radius, self.cfg.radius, self.cfg.radius],
],
dtype=torch.float32,
),
)
self.isosurface_helper: Optional[IsosurfaceHelper] = None
self.unbounded: bool = False
def _initilize_isosurface_helper(self):
if self.cfg.isosurface and self.isosurface_helper is None:
if self.cfg.isosurface_method == "mc-cpu":
self.isosurface_helper = MarchingCubeCPUHelper(
self.cfg.isosurface_resolution
).to(self.device)
elif self.cfg.isosurface_method == "mt":
|
def contract_to_unisphere(
x: Float[Tensor, "... 3"], bbox: Float[Tensor, "2 3"], unbounded: bool = False
) -> Float[Tensor, "... 3"]:
if unbounded:
x = scale_tensor(x, bbox, (0, 1))
x = x * 2 - 1 # aabb is at [-1, 1]
mag = x.norm(dim=-1, keepdim=True)
mask = mag.squeeze(-1) > 1
x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])
x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]
else:
x = scale_tensor(x, bbox, (0, 1))
return x
class BaseGeometry(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
@staticmethod
def create_from(
other: "BaseGeometry", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs
) -> "BaseGeometry":
raise TypeError(
f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}"
)
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
class BaseImplicitGeometry(BaseGeometry):
@dataclass
class Config(BaseGeometry.Config):
radius: float = 1.0
isosurface: bool = True
isosurface_method: str = "mt"
isosurface_resolution: int = 128
isosurface_threshold: Union[float, str] = 0.0
isosurface_chunk: int = 0
isosurface_coarse_to_fine: bool = True
isosurface_deformable_grid: bool = False
isosurface_remove_outliers: bool = True
isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01
cfg: Config
def configure(self) -> None:
self.bbox: Float[Tensor, "2 3"]
self.register_buffer(
"bbox",
torch.as_tensor(
[
[-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],
[self.cfg.radius, self.cfg.radius, self.cfg.radius],
],
dtype=torch.float32,
),
)
self.isosurface_helper: Optional[IsosurfaceHelper] = None
self.unbounded: bool = False
def _initilize_isosurface_helper(self):
if self.cfg.isosurface and self.isosurface_helper is None:
if self.cfg.isosurface_method == "mc-cpu":
self.isosurface_helper = MarchingCubeCPUHelper(
self.cfg.isosurface_resolution
).to(self.device)
elif self.cfg.isosurface_method == "mt": | self.isosurface_helper = MarchingTetrahedraHelper( | 2 | 2023-12-27 20:30:33+00:00 | 12k |
gardenifi/server | tests/raspi/mqtt_test.py | [
{
"identifier": "Mqtt",
"path": "app/raspi/mqtt.py",
"snippet": "class Mqtt:\n \"\"\"MQTT Methods Class.\"\"\"\n\n __instance = None\n __lock = threading.Lock()\n client = None\n\n def __new__(cls):\n \"\"\"\n Create a new instance of the Mqtt class using the singleton design pattern.\n\n Returns:\n An instance of the Mqtt class.\n\n Example Usage:\n instance = Mqtt()\n \"\"\"\n if cls.__instance is None:\n with cls.__lock:\n cls.__instance = super().__new__(cls) # pylint: disable=duplicate-code\n cls._mqtt_thread = None\n cls._periodic_updates_thread = None\n\n logger.debug(f\"Returning Mqtt Object Class: {cls.__instance}\")\n return cls.__instance\n\n @classmethod\n def destroy_instance(cls):\n \"\"\"\n Destroy the instance of the Mqtt class.\n\n This method sets the instance of the Mqtt class to None, effectively destroying the instance.\n\n Example Usage:\n ```python\n instance = Mqtt() # Create an instance of the Mqtt class\n Mqtt.destroy_instance() # Destroy the instance\n print(instance) # Output: None\n ```\n\n Inputs:\n None\n\n Outputs:\n None\n \"\"\"\n logger.debug(f\"Destroying Mqtt Object Class: {cls.__instance}\")\n cls.__instance = None\n cls._mqtt_thread = None\n cls._periodic_updates_thread = None\n\n def get_mqtt_thread(self):\n \"\"\"Getter.\"\"\"\n logger.debug(f\"Getting current thread: {self._mqtt_thread}\")\n return self._mqtt_thread\n\n def set_mqtt_thread(self, mqtt_thread):\n \"\"\"Setter.\"\"\"\n logger.debug(f\"Setting new thread: {mqtt_thread}\")\n self._mqtt_thread = mqtt_thread\n\n def get_periodic_updates_thread(self):\n \"\"\"Getter.\"\"\"\n return self._periodic_updates_thread\n\n def set_periodic_updates_thread(self, periodic_updates_thread):\n \"\"\"Setter.\"\"\"\n self._periodic_updates_thread = periodic_updates_thread\n\n def is_running(self):\n \"\"\"Check whether mqtt thread state.\"\"\"\n # logger.info(str(mqtt_thread))\n # logger.info(str(mqtt_thread is not None))\n # logger.info(str(mqtt_thread.is_alive()))\n return self._mqtt_thread is not None and self._mqtt_thread.is_alive()\n\n @staticmethod\n def on_disconnect(client, data, return_code=0):\n \"\"\"OnDisconnect callback.\"\"\"\n logger.debug(f\"MQTT OnDisconnect: {client}:{data}:{return_code}\")\n\n # The callback for when the client\n # receives a CONNACK response from the server.\n @staticmethod\n def on_connect(client, userdata, flags, return_code):\n \"\"\"OnConnect callback.\"\"\"\n logger.debug(f\"MQTT OnConnect: {client}:{userdata}:{flags}:{return_code}\")\n client.connected_flag = True\n\n # subscribe to the RASPIRRI TOPICS\n logger.debug(\n f\"MQTT OnConnect: Subscribing to topics:\\\n {MQTT_TOPIC_STATUS},\\\n {MQTT_TOPIC_CONFIG},\\\n {MQTT_TOPIC_CMD},\\\n {MQTT_TOPIC_VALVES}\"\n )\n client.subscribe(MQTT_TOPIC_STATUS)\n client.subscribe(MQTT_TOPIC_CONFIG)\n client.subscribe(MQTT_TOPIC_CMD)\n client.subscribe(MQTT_TOPIC_VALVES)\n\n if return_code == 0:\n logger.info(\"Connected successfully\")\n Helpers().load_toggle_statuses_from_file()\n if Mqtt().get_periodic_updates_thread() is None:\n Mqtt().set_periodic_updates_thread(\n Thread(daemon=True, name=\"PeriodicUpdatesThread\", target=Mqtt.send_periodic_updates, args=(client,))\n )\n Mqtt().get_periodic_updates_thread().start()\n else:\n logger.info(f\"Connect returned result code: {return_code}\")\n\n @staticmethod\n def handle_valves(client, data):\n \"\"\"Handle valves.\"\"\"\n try:\n logger.info(f\"valves data received={data}\")\n Helpers().set_valves(data)\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END)\n\n # Program Configuration handler\n # 1. It should parse the configuration as a JSON string\n # 2. If it is correct it should store it as a local file\n # 3. A scheduler should launch to turn on the irrigator for every cycle\n @staticmethod\n def handle_config(client, data):\n \"\"\"Handle cfg.\"\"\"\n try:\n json_data = json.loads(data)\n logger.info(f\"prestored programs={json_data}\")\n for program in json_data:\n logger.info(f\"program={program}\")\n if program == {}:\n Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END)\n return\n Services().store_program_cycles(program, True)\n Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END)\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END)\n\n @staticmethod\n def handle_command(client, data):\n \"\"\"Handle cmd.\"\"\"\n try:\n json_data = json.loads(data)\n logger.info(json_data)\n cmd = json_data[\"cmd\"]\n command = Command(cmd)\n try:\n valve = json_data[\"out\"]\n except Exception as exception:\n logger.warning(\n f\"Could not find valve out parameter. \\\n Will use valve 1: {exception}\"\n )\n valve = 1\n file_path = PROGRAM + str(valve) + PROGRAM_EXT\n\n if command in (Command.TURN_ON_VALVE, Command.TURN_OFF_VALVE):\n Helpers().toggle(cmd, \"out\" + str(valve))\n statuses = Helpers().get_toggle_statuses()\n logger.info(f\"Publishing right away Statuses to MQTT topic: {MQTT_TOPIC_STATUS}: {statuses}\")\n Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, str(statuses))\n elif command == Command.SEND_PROGRAM:\n logger.info(f\"Looking for {file_path}\")\n if os.path.exists(file_path):\n logger.info(f\"{file_path} exists!\")\n with open(file_path, encoding=\"utf-8\") as json_file:\n json_data = json.load(json_file)\n Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, str(json_data))\n else:\n Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + file_path + \" does not exist!\" + MQTT_END)\n elif command == Command.DELETE_PROGRAM:\n if not Services().delete_program(valve):\n Mqtt.publish_to_topic(\n client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + file_path + \" does not exist! Cannot be deleted.\" + MQTT_END\n )\n elif command == Command.SEND_TIMEZONE:\n Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + str(Helpers().get_timezone() + MQTT_END))\n elif command == Command.REBOOT_RPI:\n Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END)\n Helpers().system_reboot()\n elif command == Command.UPDATE_RPI:\n Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END)\n Helpers().system_update()\n else:\n Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + \"Wrong command used!\" + MQTT_END)\n\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END)\n\n @staticmethod\n def publish_to_topic(client, topic, data, retained=True):\n \"\"\"Publish to MQTT Topic.\"\"\"\n client.publish(topic, data, qos=2, retain=retained)\n\n # The callback for when a PUBLISH message is received from the server.\n @staticmethod\n def on_message(client, userdata, msg):\n \"\"\"OnMessage handler.\"\"\"\n topic = msg.topic\n data = msg.payload.decode(\"utf-8\")\n logger.info(f\"Received message from topic:{topic}, userdata:{userdata}, data:{data}\")\n if topic == MQTT_TOPIC_CONFIG:\n Mqtt.handle_config(client, data)\n elif msg.topic == MQTT_TOPIC_CMD:\n Mqtt.handle_command(client, data)\n elif msg.topic == MQTT_TOPIC_VALVES:\n Mqtt.handle_valves(client, data)\n\n @staticmethod\n def send_periodic_updates(client):\n \"\"\"Send periodic updates.\"\"\"\n while True:\n try:\n logger.info(\"Sending Periodic Updates to status topic every 10s...\")\n statuses = Helpers().get_toggle_statuses()\n logger.info(f\"Publishing Statuses to MQTT topic: {MQTT_TOPIC_STATUS}: {statuses}\")\n Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, str(statuses))\n metadata = {}\n metadata[\"ip_address\"] = Helpers().extract_local_ip()\n metadata[\"uptime\"] = Helpers().get_uptime()\n metadata[\"git_commit\"] = Helpers().get_git_commit_id()\n Mqtt.publish_to_topic(client, MQTT_TOPIC_METADATA, str(metadata))\n if \"valves\" in statuses:\n Mqtt.publish_to_topic(client, MQTT_TOPIC_VALVES, str(statuses[\"valves\"]))\n else:\n statuses[\"valves\"] = []\n logger.info(f\"Valves sent to MQTT Topic: {statuses['valves']}\")\n if not Mqtt().is_running():\n Mqtt().start_mqtt_thread()\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n statuses = {}\n finally:\n time.sleep(10)\n\n @staticmethod\n def start_mqtt_thread():\n \"\"\"Start MQTT thread.\"\"\"\n try:\n logger.info(\"Trying to start MQTT Thread...\")\n while Mqtt().get_mqtt_thread() is None:\n logger.info(\"Mqtt thread is None. Creating a new one!\")\n new_thread = Thread(target=Mqtt.mqtt_init, daemon=True, name=\"MQTT_Main_Thread\")\n Mqtt().set_mqtt_thread(new_thread)\n time.sleep(3)\n if not Mqtt().get_mqtt_thread().is_alive():\n Mqtt().get_mqtt_thread().start()\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n\n @staticmethod\n def on_shutdown(client):\n \"\"\"Calling it on shutdown (SIGTERM and SIGINT signals)\"\"\"\n client.loop_stop() # Stop the loop to allow pending messages to be sent\n client.disconnect()\n Mqtt().set_mqtt_thread(None)\n sys.exit(0)\n\n @staticmethod\n def mqtt_init():\n \"\"\"MQTT initialization.\"\"\"\n try:\n logger.info(\"Initializing MQTT\")\n\n # create the client\n client = mqtt.Client(client_id=MQTT_CLIENT_ID, clean_session=True)\n client.on_connect = Mqtt.on_connect\n client.on_disconnect = Mqtt.on_disconnect\n client.on_message = Mqtt.on_message\n\n # enable TLS\n client.tls_set(tls_version=mqtt.ssl.PROTOCOL_TLS)\n\n # set username and password\n client.username_pw_set(MQTT_USER, MQTT_PASS)\n\n # set Last Will message on disconnection\n client.will_set(MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + '\"LOST_CONNECTION\"' + MQTT_END, qos=1, retain=True)\n\n last_will_interval = 5 # Set the timeout for the last will message (in seconds)\n client.will_delay_interval = last_will_interval\n\n # connect to HiveMQ Cloud on port 8883 with 5 seconds keep-alive interval\n client.connect(MQTT_HOST, int(MQTT_PORT), last_will_interval)\n\n # Find local stored programs and publish them again to config topic\n program_data = []\n for valve in range(1, 5):\n json_data = Services().load_program_cycles_if_exists(valve)\n if json_data is not None:\n program_data.append(json_data)\n\n program_data = str(program_data).replace(\"'\", '\"').replace(\"True\", '\"True\"').replace(\"False\", '\"False\"')\n logger.info(f\"program_data={program_data}\")\n Mqtt.publish_to_topic(client, MQTT_TOPIC_CONFIG, str(program_data))\n Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END)\n\n logger.info(\"Before client.loop_forever()\")\n Mqtt.client = client\n # Blocking call that processes network traffic,\n # dispatches callbacks and handles reconnecting.\n client.loop_forever()\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n Mqtt().set_mqtt_thread(None)"
},
{
"identifier": "Helpers",
"path": "app/raspi/helpers.py",
"snippet": "class Helpers:\n \"\"\"\n The `Helpers` class provides various helper methods for performing tasks\n such as setting valves, getting system information, storing and loading\n objects to/from files, managing WiFi networks, and updating the `wpa_supplicant.conf` file.\n \"\"\"\n\n __instance = None\n __lock = threading.Lock()\n\n def __new__(cls):\n \"\"\"\n Create a new instance of the Helpers class using the singleton design pattern.\n\n Returns:\n An instance of the Helpers class.\n\n Example Usage:\n instance = Helpers()\n \"\"\"\n if cls.__instance is None:\n with cls.__lock:\n cls.__instance = super().__new__(cls) # pylint: disable=duplicate-code\n cls._toggle_statuses = {}\n cls._ap_array = []\n cls._is_connected_to_inet = False\n return cls.__instance\n\n @classmethod\n def destroy_instance(cls):\n \"\"\"\n Destroy the instance of the Helpers class.\n\n This method sets the instance of the Helpers class to None, effectively destroying the instance.\n\n Example Usage:\n ```python\n instance = Helpers() # Create an instance of the Helpers class\n Helpers.destroy_instance() # Destroy the instance\n print(instance) # Output: None\n ```\n\n Inputs:\n None\n\n Outputs:\n None\n \"\"\"\n cls.__instance = None\n cls._toggle_statuses = {}\n cls._ap_array = []\n cls._is_connected_to_inet = False\n\n @property\n def toggle_statuses(self):\n \"\"\"\n Getter method for the toggle_statuses property.\n\n Returns:\n dict: A dictionary containing toggle statuses.\n\n Example:\n Access toggle statuses using `instance.toggle_statuses`.\n \"\"\"\n return self._toggle_statuses\n\n @toggle_statuses.setter\n def toggle_statuses(self, value):\n \"\"\"\n Setter method for the toggle_statuses property.\n\n Args:\n value (dict): A dictionary containing toggle statuses to set.\n\n Example:\n Set toggle statuses using `instance.toggle_statuses = new_statuses`.\n \"\"\"\n self._toggle_statuses = value\n\n @property\n def ap_array(self):\n \"\"\"\n Getter method for the _ap_array property.\n\n Returns:\n An array of wifi networks\n\n Example:\n Access toggle statuses using `instance.ap_array`.\n \"\"\"\n return self._ap_array\n\n @ap_array.setter\n def ap_array(self, value):\n \"\"\"\n Setter method for the _ap_array property.\n\n Args:\n value (dict): An array containing the wifi networks to set.\n\n Example:\n Set toggle statuses using `instance.ap_array = new_ap_array`.\n \"\"\"\n self._ap_array = value\n\n def set_valves(self, valves):\n \"\"\"\n Set valve statuses in the toggle_statuses dictionary.\n\n Args:\n valves (str or dict): A string or dictionary representing valve statuses.\n\n Example:\n instance.set_valves('{\"valve1\": true, \"valve2\": false}')\n \"\"\"\n try:\n if isinstance(valves, str):\n valves = ast.literal_eval(valves)\n else:\n valves = ast.literal_eval(str(valves))\n self._toggle_statuses[\"valves\"] = valves\n except Exception as exception:\n logger.error(f\"Error in set_valves: {exception}\")\n raise\n\n def extract_local_ip(self):\n \"\"\"\n Extract the local IP address of the device.\n\n Returns:\n str: The local IP address.\n\n Example:\n local_ip = instance.extract_local_ip()\n \"\"\"\n tcp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n tcp_sock.connect((\"8.8.8.8\", 1))\n ip_address = tcp_sock.getsockname()[0]\n except Exception:\n ip_address = \"127.0.0.1\"\n finally:\n tcp_sock.close()\n return ip_address\n\n def get_uptime(self):\n \"\"\"\n Get the system uptime.\n\n Returns:\n str: The system uptime.\n\n Example:\n uptime = instance.get_uptime()\n \"\"\"\n try:\n result = subprocess.run([\"uptime\", \"-p\"], stdout=subprocess.PIPE, text=True, check=True)\n return result.stdout.replace(\"\\n\", \"\")\n except Exception as e:\n logger.error(f\"Error retrieving uptime: {e}\")\n return str(e)\n\n def get_git_commit_id(self):\n \"\"\"\n Get the Git commit ID of the current project.\n\n Returns:\n str: The Git commit ID.\n\n Example:\n commit_id = instance.get_git_commit_id()\n \"\"\"\n # Open the file in read mode ('r')\n try:\n with open(RPI_SERVER_GIT_COMMIT, encoding=\"utf-8\") as file:\n # Read the entire content of the file\n content = file.read().replace(\"\\n\", \"\")\n logger.debug(f\"File content: {content}\")\n return content\n except FileNotFoundError as e:\n logger.error(f\"The file '{RPI_SERVER_GIT_COMMIT}' does not exist.\")\n return str(e)\n except Exception as e:\n traceback.print_exc()\n logger.error(f\"Error retrieving git log: {e}\")\n return str(e)\n\n def store_object_to_file(self, filename, local_object):\n \"\"\"\n Store a local object to a file using pickle.\n\n Args:\n filename (str): The name of the file to store the object.\n local_object (object): The object to be stored.\n\n Example:\n instance.store_object_to_file('data.pkl', data)\n \"\"\"\n try:\n with open(filename, \"wb\") as obj_file:\n pickle.dump(local_object, obj_file)\n logger.info(f\"Stored local object file: {filename}: {local_object}\")\n obj_file.close()\n return local_object\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def store_toggle_statuses_to_file(self):\n \"\"\"\n Store toggle statuses to a file.\n\n Returns:\n dict: The toggle statuses being stored.\n\n Example:\n stored_statuses = instance.store_toggle_statuses_to_file()\n \"\"\"\n return self.store_object_to_file(STATUSES_FILE, self._toggle_statuses)\n\n def store_wifi_networks_to_file(self):\n \"\"\"\n Store WiFi networks to a file.\n\n Returns:\n list: The WiFi networks being stored.\n\n Example:\n stored_networks = instance.store_wifi_networks_to_file()\n \"\"\"\n return self.store_object_to_file(NETWORKS_FILE, self._ap_array)\n\n def load_object_from_file(self, filename):\n \"\"\"\n Load a local object from a file using pickle.\n\n Args:\n filename (str): The name of the file to load the object from.\n\n Returns:\n object: The loaded object.\n\n Example:\n loaded_object = instance.load_object_from_file('data.pkl')\n \"\"\"\n try:\n local_obj = {}\n with open(filename, \"rb\") as obj_file:\n local_obj = pickle.load(obj_file)\n logger.info(f\"Loaded local object file: {filename}: {local_obj}\")\n obj_file.close()\n return local_obj\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n self.store_object_to_file(filename, local_obj)\n return local_obj\n\n def load_toggle_statuses_from_file(self):\n \"\"\"\n Load toggle statuses from a file and update the instance's _toggle_statuses attribute.\n \"\"\"\n self._toggle_statuses = self.load_object_from_file(STATUSES_FILE)\n\n def load_wifi_networks_from_file(self):\n \"\"\"\n Load WiFi networks from a file and update the instance's _ap_array attribute.\n \"\"\"\n self._ap_array = self.load_object_from_file(NETWORKS_FILE)\n\n def get_timezone(self):\n \"\"\"\n Get the system timezone.\n\n Returns:\n str: The system timezone.\n\n Example:\n timezone = instance.get_timezone()\n \"\"\"\n return str(time.tzname[time.daylight])\n\n def check_empty_toggle(self, valve):\n \"\"\"\n Check if a toggle status is empty for a specific valve and set a default value if it is.\n\n Args:\n valve (str): The name of the valve.\n\n Example:\n instance.check_empty_toggle(\"out1\")\n \"\"\"\n if self._toggle_statuses.get(valve) is None:\n self._toggle_statuses[valve] = 0\n self._toggle_statuses[valve] = self.set_gpio_outputs(self._toggle_statuses[valve], valve)\n\n def get_toggle_statuses(self):\n \"\"\"\n Get and update toggle statuses, system information, and store them to a file.\n\n Returns:\n dict: The updated toggle statuses.\n\n Example:\n updated_statuses = instance.get_toggle_statuses()\n \"\"\"\n if \"valves\" not in self._toggle_statuses:\n self.set_valves([])\n\n self.check_empty_toggle(\"out1\")\n self.check_empty_toggle(\"out2\")\n self.check_empty_toggle(\"out3\")\n self.check_empty_toggle(\"out4\")\n\n self._toggle_statuses[\"server_time\"] = str(datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\"))\n self._toggle_statuses[\"tz\"] = self.get_timezone()\n self._toggle_statuses[\"hw_id\"] = RPI_HW_ID\n\n logger.info(f\"Valves statuses:{self._toggle_statuses}\")\n self.store_toggle_statuses_to_file()\n\n return self._toggle_statuses\n\n def set_gpio_outputs(self, status, valve):\n \"\"\"\n Set GPIO outputs for a specified valve.\n\n Args:\n status (int): The status to be set (0 or 1).\n valve (str): The name of the valve.\n\n Returns:\n int: The modified status.\n\n Example:\n modified_status = instance.set_gpio_outputs(1, \"out1\")\n \"\"\"\n status = bool(status in (1, 2))\n logger.info(f\"Set Output of Valve: {valve}::{status}\")\n if ARCH == \"arm\":\n if valve == \"out2\":\n logger.info(f\"===========> Setting PIN 11 GPIO.output...{status}\")\n # RuntimeError: Please set pin numbering mode using GPIO.setmode(GPIO.BOARD) or GPIO.setmode(GPIO.BCM)\n GPIO.output(11, status)\n logger.info(f\"===========> PIN 11 Status GPIO.input: {GPIO.input(11)}\")\n return 1 if status is True else 0\n\n def toggle(self, status, valve):\n \"\"\"\n Toggle a valve, set GPIO outputs, update toggle statuses, and store them to a file.\n\n Args:\n status (int): The new status to be set (0 or 1).\n valve (str): The name of the valve.\n\n Returns:\n str: A confirmation message.\n\n Example:\n confirmation = instance.toggle(1, \"out1\")\n \"\"\"\n status = self.set_gpio_outputs(status, valve)\n self._toggle_statuses[valve] = status\n logger.info(f\"Modified valves statuses: {self._toggle_statuses}\")\n self.store_toggle_statuses_to_file()\n return \"OK\"\n\n @property\n def is_connected_to_inet(self):\n \"\"\"\n Get the current internet connection status.\n\n Returns:\n bool: True if connected, False otherwise.\n\n Example:\n connection_status = instance.is_connected_to_inet()\n \"\"\"\n return self._is_connected_to_inet\n\n @is_connected_to_inet.setter\n def is_connected_to_inet(self, value):\n \"\"\"\n Set the current internet connection status.\n\n Returns:\n None\n\n Example:\n instance.is_connected_to_inet = connection_status\n \"\"\"\n self._is_connected_to_inet = value\n\n def system_reboot(self):\n \"\"\"\n Reboot the system after a 2-second delay.\n \"\"\"\n logger.info(\"Rebooting in 2 seconds...\")\n time.sleep(2)\n try:\n subprocess.run([\"reboot\"], stdout=subprocess.PIPE, text=True, check=True)\n except Exception as e:\n logger.error(f\"Error rebooting: {e}\")\n\n def system_update(self):\n \"\"\"\n Update the system through git.\n \"\"\"\n logger.info(\"Git update code and restart...\")\n try:\n subprocess.run([\"/usr/bin/git\", \"pull\"], stdout=subprocess.PIPE, text=True, check=True)\n os.kill(os.getpid(), signal.SIGTERM)\n except Exception as e:\n logger.error(f\"Error updating git: {e}\")\n\n def checking_for_duplicate_ssids(self, ssid, ap_array):\n \"\"\"\n Check for duplicate SSIDs in the list of WiFi networks.\n\n Args:\n ssid (str): The SSID to check.\n ap_array (list): The list of WiFi networks.\n\n Returns:\n bool: True if a duplicate is found, False otherwise.\n\n Example:\n is_duplicate = instance.checking_for_duplicate_ssids(\"MyWiFi\", wifi_networks)\n \"\"\"\n for wifi in ap_array:\n if wifi[\"ssid\"] == ssid:\n return True\n return False\n\n def scan_rpi_wifi_networks(self, refresh=False):\n \"\"\"\n Scan for available WiFi networks and update the instance's _ap_array attribute.\n\n Args:\n refresh (bool): If True, force a refresh of the WiFi networks list.\n\n Returns:\n list: The updated list of WiFi networks.\n\n Example:\n wifi_networks = instance.scan_rpi_wifi_networks()\n \"\"\"\n self._ap_array = []\n index = 0\n if not os.path.exists(NETWORKS_FILE):\n refresh = True\n if refresh:\n if ARCH == \"arm\":\n with subprocess.Popen([\"iwlist\", \"scan\"], stdout=subprocess.PIPE) as iwlist_raw:\n ap_list, err = iwlist_raw.communicate()\n if err is not None:\n logger.error(f\"Popen error: {err}\")\n return self._ap_array\n logger.debug(f\"iwlist scan command output: {ap_list}\")\n for line in ap_list.decode(\"utf-8\").rsplit(\"\\n\"):\n logger.debug(f\"Line: {line}\")\n if \"ESSID\" in line:\n ap_ssid = line[27:-1]\n if ap_ssid != \"\" and not self.checking_for_duplicate_ssids(ap_ssid, self._ap_array):\n index += 1\n logger.info(f\"id = {index}, ssid = {ap_ssid}\")\n wifi_network = {\"id\": index, \"ssid\": str(ap_ssid)}\n self._ap_array.append(json.loads(json.dumps(wifi_network)))\n self.store_wifi_networks_to_file()\n else:\n self._ap_array = []\n else:\n self.load_wifi_networks_from_file()\n\n return self._ap_array\n\n def store_wpa_ssid_key(self, ssid, wifi_key):\n \"\"\"\n Store the WPA SSID and key, and update the WPA supplicant configuration.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n\n Returns:\n bool: True if the update is successful, False otherwise.\n\n Example:\n success = instance.store_wpa_ssid_key(\"MyWiFi\", \"MyPassword\")\n \"\"\"\n try:\n logger.info(f\"ssid: {ssid}, wifi_key: {wifi_key}\")\n return self.update_wpa_supplicant(ssid, wifi_key)\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def is_raspberry_pi_zero(self):\n \"\"\"\n Check whether we're hosted in an RPi Zero or not.\n \"\"\"\n try:\n with open(\"/proc/cpuinfo\", encoding=\"utf8\") as cpuinfo:\n for line in cpuinfo:\n if line.startswith(\"Model\"):\n model_info = line.strip().split(\":\")\n model_name = model_info[1].strip()\n return \"Raspberry Pi Zero\" in model_name\n return False\n except FileNotFoundError as fnfex:\n logger.error(f\"Error: {fnfex}\")\n return False\n\n def write_wpa_supplicant(self, ssid, wifi_key):\n \"\"\"\n Write the WPA supplicant configuration to a temporary file.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n \"\"\"\n with open(WPA_SUPL_CONF_TMP, \"w\", encoding=\"utf8\") as temp_conf_file:\n temp_conf_file.write(\"ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev\\n\")\n temp_conf_file.write(\"update_config=1\\n\")\n temp_conf_file.write(\"\\n\")\n temp_conf_file.write(\"network={\\n\")\n temp_conf_file.write('\tssid=\"' + str(ssid) + '\"\\n')\n if wifi_key == \"\":\n temp_conf_file.write(\"\tkey_mgmt=NONE\\n\")\n else:\n temp_conf_file.write('\tpsk=\"' + str(wifi_key) + '\"\\n')\n temp_conf_file.write(\"}\\n\")\n temp_conf_file.close()\n\n def get_wireless_interface(self):\n \"\"\"\n Get the wireless interface name of the device.\n\n Returns:\n str: The wireless interface name.\n\n Example:\n interface_name = instance.get_wireless_interface()\n \"\"\"\n try:\n ifconfig_output = subprocess.check_output([\"ifconfig\"]).decode(\"utf-8\")\n wireless_interfaces = re.findall(r\"wlan[0-9]+\", ifconfig_output)\n if wireless_interfaces:\n return wireless_interfaces[0]\n except subprocess.CalledProcessError as ex:\n logger.error(f\"Error: {ex}\")\n raise\n return None\n\n def update_wpa_supplicant(self, ssid, wifi_key):\n \"\"\"\n Update the WPA supplicant configuration and check for internet connectivity.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n\n Returns:\n bool: True if connected to the internet after the update, False otherwise.\n\n Example:\n connected = instance.update_wpa_supplicant(\"MyWiFi\", \"MyPassword\")\n \"\"\"\n try:\n self._is_connected_to_inet = False\n if RUNNING_UNIT_TESTS and ssid == DUMMY_SSID and wifi_key == DUMMY_PASSKEY:\n return True\n # In case of Raspberry Pi Zero NetworkManager stucks. So let's go with the wap_supplicant\n # modification approach.\n if self.is_raspberry_pi_zero():\n self.write_wpa_supplicant(ssid, wifi_key)\n os.system(\n \"cp /etc/wpa_supplicant/wpa_supplicant.conf \\\n /etc/wpa_supplicant/wpa_supplicant.conf.bak\"\n )\n os.system(\"cp \" + WPA_SUPL_CONF_TMP + \" /etc/wpa_supplicant/wpa_supplicant.conf\")\n wpa_cli_cmd = \"sudo wpa_cli -i wlan0 reconfigure\"\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command {wpa_cli_cmd}:{output.decode('utf8')}\")\n else:\n wpa_cli_cmd = f\"sudo nmcli device wifi connect {ssid} password {wifi_key}\"\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command `{wpa_cli_cmd}:{output.decode('utf8')}`\")\n\n wireless_interface = self.get_wireless_interface()\n logger.info(f\"wireless_interface `{wireless_interface}`\")\n wpa_cli_cmd = f\"wpa_cli -i {wireless_interface} status | grep state | cut -d'=' -f2\"\n logger.info(f\"Command to run: `{wpa_cli_cmd}`\")\n retries = 0\n while retries < 30:\n retries = retries + 1\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command `{wpa_cli_cmd}`:{output.decode('utf8')}\")\n if str(output.decode(\"utf8\")) == \"COMPLETED\\n\":\n self._is_connected_to_inet = True\n else:\n time.sleep(2)\n\n logger.info(f\"Connected to internet: {self._is_connected_to_inet}\")\n return self._is_connected_to_inet\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def sleep_and_reboot_for_wpa(self):\n \"\"\"\n Sleep for a short period and then reboot the system.\n \"\"\"\n self.system_reboot()"
},
{
"identifier": "MQTT_TOPIC_STATUS",
"path": "app/raspi/const.py",
"snippet": "MQTT_TOPIC_STATUS = MQTT_TOPIC_BASE + load_env_variable(\"MQTT_TOPIC_STATUS\", \"/status\")"
},
{
"identifier": "MQTT_STATUS_ERR",
"path": "app/raspi/const.py",
"snippet": "MQTT_STATUS_ERR = '{\"sts\": 1, \"err\": '"
},
{
"identifier": "MQTT_END",
"path": "app/raspi/const.py",
"snippet": "MQTT_END = \"}\""
},
{
"identifier": "MQTT_TOPIC_CMD",
"path": "app/raspi/const.py",
"snippet": "MQTT_TOPIC_CMD = MQTT_TOPIC_BASE + load_env_variable(\"MQTT_TOPIC_CMD\", \"/command\")"
},
{
"identifier": "MQTT_TOPIC_VALVES",
"path": "app/raspi/const.py",
"snippet": "MQTT_TOPIC_VALVES = MQTT_TOPIC_BASE + load_env_variable(\"MQTT_TOPIC_VALVES\", \"/valves\")"
},
{
"identifier": "MQTT_CLIENT_ID",
"path": "app/raspi/const.py",
"snippet": "MQTT_CLIENT_ID = \"RaspirriV1-MQTT-Client\" + str(uuid.uuid4())"
},
{
"identifier": "MQTT_USER",
"path": "app/raspi/const.py",
"snippet": "MQTT_USER = load_env_variable(\"MQTT_USER\", \"user\")"
},
{
"identifier": "MQTT_PASS",
"path": "app/raspi/const.py",
"snippet": "MQTT_PASS = load_env_variable(\"MQTT_PASS\", \"pass\")"
},
{
"identifier": "MQTT_HOST",
"path": "app/raspi/const.py",
"snippet": "MQTT_HOST = load_env_variable(\"MQTT_HOST\", \"localhost\")"
},
{
"identifier": "MQTT_PORT",
"path": "app/raspi/const.py",
"snippet": "MQTT_PORT = load_env_variable(\"MQTT_PORT\", \"1883\")"
},
{
"identifier": "STATUSES_FILE",
"path": "app/raspi/const.py",
"snippet": "STATUSES_FILE = \"statuses.pkl\""
}
] | import threading
import os
from app.raspi.mqtt import Mqtt
from app.raspi.helpers import Helpers
from app.raspi.const import (
MQTT_TOPIC_STATUS,
MQTT_STATUS_ERR,
MQTT_END,
MQTT_TOPIC_CMD,
MQTT_TOPIC_VALVES,
MQTT_CLIENT_ID,
MQTT_USER,
MQTT_PASS,
MQTT_HOST,
MQTT_PORT,
STATUSES_FILE,
) | 9,685 |
def test_mqtt_singleton(self):
"""
Test that Mqtt object is a singleton.
"""
mqtt_instance1 = Mqtt()
mqtt_instance2 = Mqtt()
assert mqtt_instance1 is mqtt_instance2
def test_mqtt_destroy_instance(self):
"""
Test that Mqtt object can be destroyed.
"""
mqtt_instance = Mqtt()
mqtt_instance.destroy_instance()
assert mqtt_instance.get_mqtt_thread() is None
assert mqtt_instance.get_periodic_updates_thread() is None
def test_mqtt_set_and_get_thread(self):
"""
Test that Mqtt thread can be set and retrieved.
"""
def dummy_target_function():
pass
mqtt_instance = Mqtt()
thread = threading.Thread(target=dummy_target_function)
mqtt_instance.set_mqtt_thread(thread)
assert mqtt_instance.get_mqtt_thread() is thread
def test_mqtt_on_disconnect(self, mocker):
"""
Test that MQTT OnDisconnect method is called.
"""
mqtt_instance = Mqtt()
client = mocker.Mock()
data = mocker.Mock()
return_code = 0
mqtt_instance.on_disconnect(client, data, return_code)
client.connected_flag = False
assert client.connected_flag is False
def test_mqtt_on_connect_non_zero_result_code(self, mocker):
"""
Test that MQTT OnConnect method returns a non-zero result code.
"""
mqtt_instance = Mqtt()
client = mocker.Mock()
userdata = mocker.Mock()
flags = mocker.Mock()
return_code = 1
mqtt_instance.on_connect(client, userdata, flags, return_code)
assert client.connected_flag is True
def test_mqtt_handle_valves_exception(self, mocker):
"""
Test that MQTT HandleValves method raises an exception.
"""
mqtt_instance = Mqtt()
client = mocker.Mock()
data = mocker.Mock()
mocker.patch.object(Helpers, "set_valves", side_effect=Exception("Test Exception"))
mqtt_instance.handle_valves(client, data)
client.publish.assert_called_with(MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + "Test Exception" + MQTT_END, qos=2, retain=True)
def test_on_connect_subscribes_to_topics(self, mocker):
"""
Test that MQTT OnConnect method subscribes to topics.
"""
mqtt_instance = Mqtt()
client_mock = mocker.Mock()
userdata_mock = mocker.Mock()
flags_mock = mocker.Mock()
return_code = 0
mqtt_instance.on_connect(client_mock, userdata_mock, flags_mock, return_code)
client_mock.subscribe.assert_called_with(MQTT_TOPIC_VALVES)
def test_on_connect_starts_periodic_updates_thread(self, mocker):
"""
Test that MQTT OnConnect method starts periodic updates thread.
"""
mqtt_instance = Mqtt()
client_mock = mocker.Mock()
userdata_mock = mocker.Mock()
flags_mock = mocker.Mock()
return_code = 0
mqtt_instance.on_connect(client_mock, userdata_mock, flags_mock, return_code)
assert mqtt_instance.get_periodic_updates_thread().is_alive() is True
def test_on_message_handles_commands(self, mocker):
"""
Test that MQTT OnMessage method handles valves, config, command, and sys commands.
"""
mqtt_instance = Mqtt()
client_mock = mocker.Mock()
userdata_mock = mocker.Mock()
msg_mock = mocker.Mock()
msg_mock.topic = MQTT_TOPIC_CMD
msg_mock.payload.decode.return_value = '{"cmd": 1, "out": 1}'
mqtt_instance.on_message(client_mock, userdata_mock, msg_mock)
assert os.path.exists(STATUSES_FILE), f"The file '{STATUSES_FILE}' does not exist."
def test_mqtt_init(self, mocker):
"""
Test that MQTT Init method initializes MQTT client and connects to the broker.
"""
# Mock the necessary dependencies
mocker.patch("app.raspi.mqtt.logger")
mock_mqtt = mocker.patch("app.raspi.mqtt.mqtt.Client")
mock_client = mock_mqtt.return_value
mock_services = mocker.patch("app.raspi.mqtt.Services")
mock_services.return_value.load_program_cycles_if_exists.side_effect = [None, {"program": "data"}]
# Create an instance of Mqtt and call the mqtt_init method
mqtt_instance = Mqtt()
mqtt_instance.mqtt_init()
# Assert that the necessary methods were called
| """MIT License
Copyright (c) 2023, Marios Karagiannopoulos
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
**Attribution Requirement:**
When using or distributing the software, an attribution to Marios Karagiannopoulos must be included.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
class TestMqtt:
"""
Unit tests for the Mqtt class.
"""
def test_mqtt_singleton(self):
"""
Test that Mqtt object is a singleton.
"""
mqtt_instance1 = Mqtt()
mqtt_instance2 = Mqtt()
assert mqtt_instance1 is mqtt_instance2
def test_mqtt_destroy_instance(self):
"""
Test that Mqtt object can be destroyed.
"""
mqtt_instance = Mqtt()
mqtt_instance.destroy_instance()
assert mqtt_instance.get_mqtt_thread() is None
assert mqtt_instance.get_periodic_updates_thread() is None
def test_mqtt_set_and_get_thread(self):
"""
Test that Mqtt thread can be set and retrieved.
"""
def dummy_target_function():
pass
mqtt_instance = Mqtt()
thread = threading.Thread(target=dummy_target_function)
mqtt_instance.set_mqtt_thread(thread)
assert mqtt_instance.get_mqtt_thread() is thread
def test_mqtt_on_disconnect(self, mocker):
"""
Test that MQTT OnDisconnect method is called.
"""
mqtt_instance = Mqtt()
client = mocker.Mock()
data = mocker.Mock()
return_code = 0
mqtt_instance.on_disconnect(client, data, return_code)
client.connected_flag = False
assert client.connected_flag is False
def test_mqtt_on_connect_non_zero_result_code(self, mocker):
"""
Test that MQTT OnConnect method returns a non-zero result code.
"""
mqtt_instance = Mqtt()
client = mocker.Mock()
userdata = mocker.Mock()
flags = mocker.Mock()
return_code = 1
mqtt_instance.on_connect(client, userdata, flags, return_code)
assert client.connected_flag is True
def test_mqtt_handle_valves_exception(self, mocker):
"""
Test that MQTT HandleValves method raises an exception.
"""
mqtt_instance = Mqtt()
client = mocker.Mock()
data = mocker.Mock()
mocker.patch.object(Helpers, "set_valves", side_effect=Exception("Test Exception"))
mqtt_instance.handle_valves(client, data)
client.publish.assert_called_with(MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + "Test Exception" + MQTT_END, qos=2, retain=True)
def test_on_connect_subscribes_to_topics(self, mocker):
"""
Test that MQTT OnConnect method subscribes to topics.
"""
mqtt_instance = Mqtt()
client_mock = mocker.Mock()
userdata_mock = mocker.Mock()
flags_mock = mocker.Mock()
return_code = 0
mqtt_instance.on_connect(client_mock, userdata_mock, flags_mock, return_code)
client_mock.subscribe.assert_called_with(MQTT_TOPIC_VALVES)
def test_on_connect_starts_periodic_updates_thread(self, mocker):
"""
Test that MQTT OnConnect method starts periodic updates thread.
"""
mqtt_instance = Mqtt()
client_mock = mocker.Mock()
userdata_mock = mocker.Mock()
flags_mock = mocker.Mock()
return_code = 0
mqtt_instance.on_connect(client_mock, userdata_mock, flags_mock, return_code)
assert mqtt_instance.get_periodic_updates_thread().is_alive() is True
def test_on_message_handles_commands(self, mocker):
"""
Test that MQTT OnMessage method handles valves, config, command, and sys commands.
"""
mqtt_instance = Mqtt()
client_mock = mocker.Mock()
userdata_mock = mocker.Mock()
msg_mock = mocker.Mock()
msg_mock.topic = MQTT_TOPIC_CMD
msg_mock.payload.decode.return_value = '{"cmd": 1, "out": 1}'
mqtt_instance.on_message(client_mock, userdata_mock, msg_mock)
assert os.path.exists(STATUSES_FILE), f"The file '{STATUSES_FILE}' does not exist."
def test_mqtt_init(self, mocker):
"""
Test that MQTT Init method initializes MQTT client and connects to the broker.
"""
# Mock the necessary dependencies
mocker.patch("app.raspi.mqtt.logger")
mock_mqtt = mocker.patch("app.raspi.mqtt.mqtt.Client")
mock_client = mock_mqtt.return_value
mock_services = mocker.patch("app.raspi.mqtt.Services")
mock_services.return_value.load_program_cycles_if_exists.side_effect = [None, {"program": "data"}]
# Create an instance of Mqtt and call the mqtt_init method
mqtt_instance = Mqtt()
mqtt_instance.mqtt_init()
# Assert that the necessary methods were called | mock_mqtt.assert_called_with(client_id=MQTT_CLIENT_ID, clean_session=True) | 7 | 2023-12-22 08:06:09+00:00 | 12k |
bclavie/RAGatouille | ragatouille/RAGTrainer.py | [
{
"identifier": "LateInteractionModel",
"path": "ragatouille/models/base.py",
"snippet": "class LateInteractionModel(ABC):\n @abstractmethod\n def __init__(\n self,\n pretrained_model_name_or_path: Union[str, Path],\n n_gpu,\n ):\n ...\n\n @abstractmethod\n def train():\n ...\n\n @abstractmethod\n def index(self, name: str, collection: list[str]):\n ...\n\n @abstractmethod\n def add_to_index(self):\n ...\n\n @abstractmethod\n def search(self, name: str, query: Union[str, list[str]]):\n ...\n\n @abstractmethod\n def _search(self, name: str, query: str):\n ...\n\n @abstractmethod\n def _batch_search(self, name: str, queries: list[str]):\n ..."
},
{
"identifier": "ColBERT",
"path": "ragatouille/models/colbert.py",
"snippet": "class ColBERT(LateInteractionModel):\n def __init__(\n self,\n pretrained_model_name_or_path: Union[str, Path],\n n_gpu: int = -1,\n index_name: Optional[str] = None,\n verbose: int = 1,\n load_from_index: bool = False,\n **kwargs,\n ):\n self.verbose = verbose\n self.collection = None\n if n_gpu == -1:\n n_gpu = 1 if torch.cuda.device_count() == 0 else torch.cuda.device_count()\n\n if load_from_index:\n ckpt_config = ColBERTConfig.load_from_index(\n str(pretrained_model_name_or_path)\n )\n self.config = ckpt_config\n self.run_config = RunConfig(\n nranks=n_gpu, experiment=self.config.experiment, root=self.config.root\n )\n self.checkpoint = self.config.checkpoint\n self.index_name = self.config.index_name\n self.collection = self._get_collection_from_file(\n str(pretrained_model_name_or_path / \"collection.json\")\n )\n else:\n ckpt_config = ColBERTConfig.load_from_checkpoint(\n str(pretrained_model_name_or_path)\n )\n self.run_config = RunConfig(\n nranks=n_gpu, experiment=\"colbert\", root=\".ragatouille/\"\n )\n local_config = ColBERTConfig(**kwargs)\n self.config = ColBERTConfig.from_existing(\n ckpt_config,\n local_config,\n )\n self.checkpoint = pretrained_model_name_or_path\n self.index_name = index_name\n\n self.run_context = Run().context(self.run_config)\n self.run_context.__enter__() # Manually enter the context\n self.searcher = None\n\n def _update_index(self, new_documents: list[str], searcher: Searcher):\n updater = IndexUpdater(\n config=self.config, searcher=searcher, checkpoint=self.checkpoint\n )\n updater.add(new_documents)\n updater.persist_to_disk()\n\n def _get_collection_from_file(self, collection_path: str):\n return srsly.read_json(collection_path)\n\n def _write_collection_to_file(self, collection, collection_path: str):\n srsly.write_json(collection_path, collection)\n\n def add_to_index(\n self,\n new_documents: list[str],\n index_name: Optional[str] = None,\n ):\n self.index_name = index_name if index_name is not None else self.index_name\n if self.index_name is None:\n print(\n \"Cannot add to index without an index_name! Please provide one.\",\n \"Returning empty results.\",\n )\n return None\n\n print(\n \"WARNING: add_to_index support is currently experimental!\",\n \"add_to_index support will be more thorough in future versions\",\n )\n\n searcher = Searcher(\n checkpoint=self.checkpoint,\n config=None,\n collection=self.collection,\n index=self.index_name,\n verbose=self.verbose,\n )\n new_documents = list(set(new_documents))\n current_len = len(searcher.collection)\n new_doc_len = len(new_documents)\n\n if (\n current_len + new_doc_len < 5000\n or new_doc_len > current_len * 0.05\n or current_len + new_doc_len\n > 100 # Export bug handler -- TODO: Remove this requirement\n ):\n new_documents += [x for x in searcher.collection]\n self.index(\n new_documents,\n index_name=self.index_name,\n max_document_length=self.config.doc_maxlen,\n overwrite=\"force_silent_overwrite\",\n )\n else:\n self._update_index(new_documents, searcher)\n\n print(\n f\"Successfully updated index with {new_doc_len} new documents!\\n\",\n f\"New index size: {new_doc_len + current_len}\",\n )\n\n return str(\n Path(self.run_config.root)\n / Path(self.run_config.experiment)\n / \"indexes\"\n / self.index_name\n )\n\n def index(\n self,\n collection: list[str],\n index_name: Optional[\"str\"] = None,\n max_document_length: int = 256,\n overwrite: Union[bool, str] = \"reuse\",\n ):\n self.config.doc_maxlen = max_document_length\n if index_name is not None:\n if self.index_name is not None:\n print(\n f\"New index_name received!\",\n f\"Updating current index_name ({self.index_name}) to {index_name}\",\n )\n self.index_name = index_name\n else:\n if self.index_name is None:\n print(\n f\"No index_name received!\",\n f\"Using default index_name ({self.checkpoint}_new_index)\",\n )\n self.index_name = self.checkpoint + \"new_index\"\n\n collection = list(set(collection))\n self.collection = collection\n\n nbits = 2\n if len(collection) < 5000:\n nbits = 8\n elif len(collection) < 10000:\n nbits = 4\n self.config = ColBERTConfig.from_existing(\n self.config, ColBERTConfig(nbits=nbits)\n )\n self.indexer = Indexer(\n checkpoint=self.checkpoint,\n config=self.config,\n verbose=self.verbose,\n )\n self.indexer.index(\n name=self.index_name, collection=collection, overwrite=overwrite\n )\n\n index_path = str(\n Path(self.run_config.root)\n / Path(self.run_config.experiment)\n / \"indexes\"\n / self.index_name\n )\n self._write_collection_to_file(collection, index_path + \"/collection.json\")\n print(\"Done indexing!\")\n\n def _load_searcher(\n self,\n index_name: Optional[str],\n force_fast: bool = False,\n ):\n if index_name is not None:\n if self.index_name is not None:\n print(\n f\"New index_name received!\",\n f\"Updating current index_name ({self.index_name}) to {index_name}\",\n )\n self.index_name = index_name\n else:\n if self.index_name is None:\n print(\n \"Cannot search without an index_name! Please provide one.\",\n \"Returning empty results.\",\n )\n return None\n print(\n f\"Loading searcher for index {self.index_name} for the first time...\",\n \"This may take a few seconds\",\n )\n self.searcher = Searcher(\n checkpoint=self.checkpoint,\n config=None,\n collection=self.collection,\n index=self.index_name,\n )\n\n if not force_fast:\n if len(self.searcher.collection) < 10000:\n self.searcher.configure(ncells=4)\n self.searcher.configure(centroid_score_threshold=0.4)\n self.searcher.configure(ndocs=512)\n elif len(self.searcher.collection) < 100000:\n self.searcher.configure(ncells=2)\n self.searcher.configure(centroid_score_threshold=0.45)\n self.searcher.configure(ndocs=1024)\n # Otherwise, use defaults for k\n else:\n # Use fast settingss\n self.searcher.configure(ncells=1)\n self.searcher.configure(centroid_score_threshold=0.5)\n self.searcher.configure(ndocs=256)\n\n print(\"Searcher loaded!\")\n\n def search(\n self,\n query: Union[str, list[str]],\n index_name: Optional[\"str\"] = None,\n k: int = 10,\n force_fast: bool = False,\n zero_index_ranks: bool = False,\n ):\n if self.searcher is None or (\n index_name is not None and self.index_name != index_name\n ):\n self._load_searcher(index_name=index_name, force_fast=force_fast)\n\n if isinstance(query, str):\n results = [self._search(query, k)]\n else:\n results = self._batch_search(query, k)\n\n to_return = []\n\n for result in results:\n result_for_query = []\n for id_, rank, score in zip(*result):\n result_for_query.append(\n {\n \"content\": self.searcher.collection[id_],\n \"score\": score,\n \"rank\": rank - 1 if zero_index_ranks else rank,\n }\n )\n to_return.append(result_for_query)\n\n if len(to_return) == 1:\n return to_return[0]\n return to_return\n\n def _search(self, query: str, k: int):\n return self.searcher.search(query, k=k)\n\n def _batch_search(self, query: list[str], k: int):\n queries = {i: x for i, x in enumerate(query)}\n results = self.searcher.search_all(queries, k=k)\n results = [\n [list(zip(*value))[i] for i in range(3)]\n for value in results.todict().values()\n ]\n return results\n\n def train(self, data_dir, training_config: ColBERTConfig):\n training_config = ColBERTConfig.from_existing(self.config, training_config)\n training_config.nway = 2\n with Run().context(self.run_config):\n trainer = Trainer(\n triples=str(data_dir / \"triples.train.colbert.jsonl\"),\n queries=str(data_dir / \"queries.train.colbert.tsv\"),\n collection=str(data_dir / \"corpus.train.colbert.tsv\"),\n config=training_config,\n )\n\n trainer.train(checkpoint=self.checkpoint)\n\n def __del__(self):\n # Clean up context\n self.run_context.__exit__(None, None, None)"
},
{
"identifier": "HardNegativeMiner",
"path": "ragatouille/negative_miners/base.py",
"snippet": "class HardNegativeMiner(ABC):\n @abstractmethod\n def export_index(self, path: Union[str, Path]) -> bool:\n ...\n\n @abstractmethod\n def mine_hard_negatives(\n self,\n queries: list[str],\n collection: list[str],\n neg_k: int,\n ):\n ...\n\n @abstractmethod\n def _mine(\n self,\n queries: list[str],\n k: int,\n ):\n ..."
},
{
"identifier": "SimpleMiner",
"path": "ragatouille/negative_miners/simpleminer.py",
"snippet": "class SimpleMiner(HardNegativeMiner):\n \"\"\"The simplest approach to hard negatives mining.\n Select the most appropriate, small-sized embedding model for the target language.\n And retrieve random negatives in the top 10-100 results.\n Strong baseline for quick, low-engineering hard negative mining.\"\"\"\n\n def __init__(\n self,\n language_code: str,\n model_size: Literal[\"small\", \"base\", \"large\"] = \"small\",\n ) -> None:\n self.n_gpu = torch.cuda.device_count()\n self.target_language = language_code\n self.model_size = model_size\n if language_code not in [\"en\", \"zh\"]:\n language_code = \"other\"\n self.model_name = f\"{language_code}_{model_size}\"\n hub_model = DenseModels[self.model_name].value\n print(f\"Loading Hard Negative SimpleMiner dense embedding model {hub_model}...\")\n self.model = SentenceTransformer(hub_model)\n self.has_index = False\n self.min_rank = 10\n\n def build_index(\n self,\n collection,\n batch_size: int = 128,\n save_index: bool = False,\n save_path: Union[str, Path] = None,\n force_fp32: bool = True,\n ):\n print(f\"Building hard negative index for {len(collection)} documents...\")\n if len(collection) > 1000:\n pool = self.model.start_multi_process_pool()\n embeds = self.model.encode_multi_process(\n collection, pool, batch_size=batch_size\n )\n self.model.stop_multi_process_pool(pool)\n else:\n embeds = self.model.encode(collection, batch_size=batch_size)\n\n print(\"All documents embedded, now adding to index...\")\n\n self.max_rank = min(110, int(len(collection) // 10))\n self.max_rank = min(self.max_rank, len(collection))\n\n storage_type = StorageDataType.Float32\n if len(collection) > 500000 and not force_fp32:\n storage_type = StorageDataType.E4M3\n\n self.voyager_index = Index(\n Space.Cosine,\n num_dimensions=self.model.get_sentence_embedding_dimension(),\n storage_data_type=storage_type,\n )\n\n self.corpus_map = {i: doc for i, doc in enumerate(collection)}\n id_to_vector = {}\n for i, emb in enumerate(embeds):\n id_to_vector[i] = emb\n self.corpus_map[i] = collection[i]\n del embeds\n\n self.voyager_index.add_items(\n vectors=[x for x in id_to_vector.values()],\n ids=[x for x in id_to_vector.keys()],\n num_threads=-1,\n )\n\n del id_to_vector\n\n if save_index:\n print(f\"Saving index to {save_path}...\")\n self.export_index(save_path)\n else:\n print(\"save_index set to False, skipping saving hard negative index\")\n print(\"Hard negative index generated\")\n self.has_index = True\n\n def query_index(self, query, top_k=110):\n results = self.voyager_index.query(\n query, k=min(top_k, self.voyager_index.__len__())\n )\n return results\n\n def mine_hard_negatives(\n self,\n queries: Union[list[str], str],\n collection: Optional[list[str]] = None,\n save_index: bool = False,\n save_path: Union[str, Path] = None,\n force_fp32: bool = True,\n ):\n if self.has_index is False and collection is not None:\n self.build_index(\n collection,\n save_index=save_index,\n save_path=save_path,\n force_fp32=force_fp32,\n )\n if isinstance(queries, str):\n print(\"mining\")\n return self._mine(queries)\n return self._batch_mine(queries)\n\n def _mine(\n self,\n query: str,\n ):\n q_emb = self.model.encode(query)\n query_results = self.query_index(q_emb, top_k=self.max_rank)\n if len(query_results) > self.min_rank:\n query_results = query_results[self.min_rank : self.max_rank]\n query_results = [self.corpus_map[x] for x in query_results[0]]\n return query_results\n\n def _batch_mine(\n self,\n queries: list[str],\n ):\n \"\"\"Separate function to parallelise later on\"\"\"\n print(f\"Retrieving hard negatives for {len(queries)} queries...\")\n results = []\n print(\"Embedding queries...\")\n query_embeddings = self.model.encode(queries, show_progress_bar=True)\n print(\"Retrieving hard negatives...\")\n for q_emb in tqdm(query_embeddings):\n query_results = self.query_index(q_emb, top_k=self.max_rank)\n query_results = query_results[self.min_rank : self.max_rank]\n query_results = [self.corpus_map[x.id] for x in query_results]\n results.append(query_results)\n print(f\"\"\"Done generating hard negatives.\"\"\")\n return results\n\n def export_index(self, path: Union[str, Path]) -> bool:\n self.voyager_index.save(path)\n return True"
},
{
"identifier": "seeded_shuffle",
"path": "ragatouille/utils.py",
"snippet": "def seeded_shuffle(collection: list, seed: int = 42):\n random.seed(seed)\n random.shuffle(collection)\n return collection"
},
{
"identifier": "TrainingDataProcessor",
"path": "ragatouille/data/training_data_processor.py",
"snippet": "class TrainingDataProcessor:\n def __init__(\n self,\n collection: list[str],\n queries: list[str],\n negative_miner=None,\n ):\n self.collection = collection\n self.queries = queries\n self.negative_miner = negative_miner\n self._make_data_map()\n self.training_triplets = []\n\n def process_raw_data(\n self,\n raw_data,\n data_type: Literal[\"pairs\", \"triplets\", \"labeled_pairs\"],\n data_dir: Union[str, Path],\n export: bool = True,\n mine_hard_negatives: bool = True,\n num_new_negatives: int = 10,\n positive_label: int = 1,\n negative_label: int = 0,\n hard_negative_minimum_rank: int = 10,\n ):\n self.negative_miner.min_rank = hard_negative_minimum_rank\n if self.negative_miner is None and mine_hard_negatives:\n raise ValueError(\n \"mine_hard_negatives is True but no negative miner was provided!\"\n )\n if data_type == \"pairs\":\n self._process_raw_pairs(\n raw_data=raw_data,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=num_new_negatives,\n )\n elif data_type == \"labeled_pairs\":\n self._process_raw_labeled_pairs(\n raw_data=raw_data,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=num_new_negatives,\n positive_label=positive_label,\n negative_label=negative_label,\n )\n elif data_type == \"triplets\":\n self._process_raw_triplets(\n raw_data=raw_data,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=num_new_negatives,\n )\n\n if export:\n self.export_training_data(data_dir)\n\n def _make_individual_triplets(self, query, positives, negatives):\n \"\"\"Create the training data in ColBERT(v1) format from raw lists of triplets\"\"\"\n triplets = []\n q = self.query_map[query]\n random.seed(42)\n if len(positives) > 1:\n all_pos_texts = [p for p in positives]\n max_triplets_per_query = 20\n negs_per_positive = max(1, max_triplets_per_query // len(all_pos_texts))\n initial_triplets_count = 0\n for pos in all_pos_texts:\n p = self.passage_map[pos]\n chosen_negs = random.sample(\n negatives, min(len(negatives), negs_per_positive)\n )\n for neg in chosen_negs:\n n = self.passage_map[neg]\n initial_triplets_count += 1\n triplets.append([q, p, n])\n\n extra_triplets_needed = max_triplets_per_query - initial_triplets_count\n while extra_triplets_needed > 0:\n p = self.passage_map[random.choice(all_pos_texts)]\n n = self.passage_map[random.choice(negatives)]\n triplets.append([q, p, n])\n extra_triplets_needed -= 1\n else:\n p = self.passage_map[positives[0]]\n for n in negatives:\n triplets.append([q, p, self.passage_map[n]])\n\n return triplets\n\n def _get_new_negatives(self, query, passages, mine_hard_negatives, n_new_negatives):\n \"\"\"Generate new negatives for each query, using either:\n - The assigned hard negative miner if mine_hard_negatives is True\n - Randomly sampling from the full collection otherwise\n \"\"\"\n if mine_hard_negatives:\n hard_negatives = self.negative_miner.mine_hard_negatives(\n query, n_new_negatives\n )\n candidates = [\n x\n for x in hard_negatives\n if x not in passages[\"positives\"] and x not in passages[\"negatives\"]\n ]\n new_negatives = random.sample(\n candidates,\n min(n_new_negatives, len(candidates)),\n )\n else:\n new_negatives = [\n x\n for x in random.sample(self.collection, n_new_negatives)\n if x not in passages[\"positives\"] and x not in passages[\"negatives\"]\n ]\n\n return new_negatives\n\n def _process_raw_pairs(self, raw_data, mine_hard_negatives, n_new_negatives):\n \"\"\"Convert unlabeled pairs into training triplets.\n It's assumed unlabeled pairs are always in the format (query, relevant_passage)\"\"\"\n training_triplets = []\n raw_grouped_triplets = defaultdict(lambda: defaultdict(list))\n\n for query, positive in raw_data:\n if isinstance(positive, str):\n positive = [positive]\n raw_grouped_triplets[query][\"positives\"] += positive\n\n for query, passages in raw_grouped_triplets.items():\n if n_new_negatives > 0:\n passages[\"negatives\"] += self._get_new_negatives(\n query=query,\n passages=passages,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=n_new_negatives,\n )\n training_triplets += self._make_individual_triplets(\n query=query,\n positives=passages[\"positives\"],\n negatives=passages[\"negatives\"],\n )\n self.training_triplets = training_triplets\n\n def _process_raw_labeled_pairs(\n self,\n raw_data,\n mine_hard_negatives,\n n_new_negatives,\n positive_label,\n negative_label,\n ):\n \"\"\"\n Convert labeled pairs intro training triplets.\n Labeled pairs are in the format (query, passage, label)\n \"\"\"\n training_triplets = []\n raw_grouped_triplets = defaultdict(lambda: defaultdict(list))\n\n for query, passage, label in raw_data:\n if isinstance(passage, str):\n passage = [passage]\n if label == positive_label:\n label = \"positives\"\n elif label == negative_label:\n label = \"negatives\"\n else:\n raise ValueError(\n f\"Label {label} must correspond to either positive_label or negative_label!\"\n )\n\n raw_grouped_triplets[query][label] += passage\n\n for query, passages in raw_grouped_triplets.items():\n if n_new_negatives > 0:\n passages[\"negatives\"] += self._get_new_negatives(\n query=query,\n passages=passages,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=n_new_negatives,\n )\n\n training_triplets += self._make_individual_triplets(\n query=query,\n positives=passages[\"positives\"],\n negatives=passages[\"negatives\"],\n )\n self.training_triplets = training_triplets\n\n def _process_raw_triplets(self, raw_data, mine_hard_negatives, n_new_negatives):\n \"\"\"\n Convert raw triplets\n (query, positives : str | list[str], negatives: str | list[str])\n into training triplets.\n \"\"\"\n training_triplets = []\n raw_grouped_triplets = defaultdict(lambda: defaultdict(list))\n for query, positive, negative in raw_data:\n if isinstance(positive, str):\n positive = [positive]\n if isinstance(negative, str):\n negative = [negative]\n\n raw_grouped_triplets[query][\"positives\"] += positive\n raw_grouped_triplets[query][\"negatives\"] += negative\n\n for query, passages in raw_grouped_triplets.items():\n if n_new_negatives > 0:\n passages[\"negatives\"] += self._get_new_negatives(\n query=query,\n passages=passages,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=n_new_negatives,\n )\n training_triplets += self._make_individual_triplets(\n query=query,\n positives=passages[\"positives\"],\n negatives=passages[\"negatives\"],\n )\n self.training_triplets = training_triplets\n\n def _make_data_map(self):\n \"\"\"\n Generate a query_text: query_id and passage_text: passage_id mapping\n To easily generate ColBERT-format training data.\n \"\"\"\n self.query_map = {}\n self.passage_map = {}\n\n for i, query in enumerate(self.queries):\n self.query_map[query] = i\n for i, passage in enumerate(list(self.collection)):\n self.passage_map[passage] = i\n\n def export_training_data(self, path: Union[str, Path]):\n \"\"\"\n Export training data for both training and versioning purposes.\n {path} should ideally be dvc versioned.\n \"\"\"\n\n path = Path(path)\n\n # Create the directory if it does not exist\n os.makedirs(path, exist_ok=True)\n\n with open(path / \"queries.train.colbert.tsv\", \"w\") as f:\n for query, idx in self.query_map.items():\n query = query.replace(\"\\t\", \" \").replace(\"\\n\", \" \")\n f.write(f\"{idx}\\t{query}\\n\")\n with open(path / \"corpus.train.colbert.tsv\", \"w\") as f:\n for document, idx in self.passage_map.items():\n document = document.replace(\"\\t\", \" \").replace(\"\\n\", \" \")\n f.write(f\"{idx}\\t{document}\\n\")\n\n srsly.write_jsonl(path / \"triples.train.colbert.jsonl\", self.training_triplets)"
}
] | from pathlib import Path
from typing import Union, Literal, Optional
from colbert.infra import ColBERTConfig
from ragatouille.models import LateInteractionModel, ColBERT
from ragatouille.negative_miners import HardNegativeMiner, SimpleMiner
from ragatouille.utils import seeded_shuffle
from ragatouille.data import TrainingDataProcessor | 7,432 |
class RAGTrainer:
"""Main trainer to fine-tune/train ColBERT models with a few lines."""
model: Union[LateInteractionModel, None] = None
negative_miner: Union[HardNegativeMiner, None] = None
collection: list[str] = []
queries: Union[list[str], None] = None
raw_data: Union[list[tuple], list[list], None] = None
training_triplets: list[list[int]] = list()
def __init__(
self,
model_name: str,
pretrained_model_name: str,
language_code: str = "en",
n_usable_gpus: int = -1,
):
"""
Initialise a RAGTrainer instance. This will load a base model: either an existing ColBERT model to fine-tune or a BERT/RoBERTa-like model to build a new ColBERT model from.
Parameters:
model_name: str - Name of the model to train. This will be used to name the checkpoints and the index.
pretrained_model_name: str - Name of the pretrained model to use as a base. Can be a local path to a checkpoint or a huggingface model name.
language_code: str - Language code of the model to train. This will be used to name the checkpoints and the index.
n_usable_gpus: int - Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available.
Returns:
self (RAGTrainer): The current instance of RAGTrainer, with the base model initialised.
"""
self.model_name = model_name
self.pretrained_model_name = pretrained_model_name
self.language_code = language_code
self.model = ColBERT(
pretrained_model_name_or_path=pretrained_model_name, n_gpu=n_usable_gpus
)
def add_documents(self, documents: list[str]):
self.collection += documents
seeded_shuffle(self.collection)
def export_training_data(self, path: Union[str, Path]):
"""
Manually export the training data processed by prepare_training_data to a given path.
Parameters:
path: Union[str, Path] - Path to the directory where the data will be exported."""
self.data_processor.export_training_data(path)
def prepare_training_data(
self,
raw_data: Union[list[tuple], list[list]],
all_documents: Optional[list[str]] = None,
data_out_path: Union[str, Path] = "./data/",
num_new_negatives: int = 10,
hard_negative_minimum_rank: int = 10,
mine_hard_negatives: bool = True,
hard_negative_model_size: str = "small",
pairs_with_labels: bool = False,
positive_label: Union[int, str] = 1,
negative_label: Union[int, str] = 0,
) -> str:
"""
Fully pre-process input-data in various raw formats into ColBERT-ready files and triplets.
Will accept a variety of formats, such as unannotated pairs, annotated pairs, triplets of strings and triplets of list of strings.
Will process into a ColBERT-ready format and export to data_out_path.
Will generate hard negatives if mine_hard_negatives is True.
num_new_negatives decides how many negatives will be generated. if mine_hard_negatives is False and num_new_negatives is > 0, these negatives will be randomly sampled.
Parameters:
raw_data: Union[list[tuple], list[list]] - List of pairs, annotated pairs, or triplets of strings.
all_documents: Optional[list[str]] - A corpus of documents to be used for sampling negatives.
data_out_path: Union[str, Path] - Path to the directory where the data will be exported (can be a tmp directory).
num_new_negatives: int - Number of new negatives to generate for each query.
mine_hard_negatives: bool - Whether to use hard negatives mining or not.
hard_negative_model_size: str - Size of the model to use for hard negatives mining.
pairs_with_labels: bool - Whether the raw_data is a list of pairs with labels or not.
positive_label: Union[int, str] - Label to use for positive pairs.
negative_label: Union[int, str] - Label to use for negative pairs.
Returns:
data_out_path: Union[str, Path] - Path to the directory where the data has been exported.
"""
if all_documents is not None:
self.collection += all_documents
self.data_dir = Path(data_out_path)
if len(raw_data[0]) == 2:
data_type = "pairs"
if pairs_with_labels:
data_type = "labeled_pairs"
elif len(raw_data[0]) == 3:
data_type = "triplets"
else:
raise ValueError("Raw data must be a list of pairs or triplets of strings.")
self.collection += [x[1] for x in raw_data]
if data_type == "triplets":
self.collection += [x[2] for x in raw_data]
self.queries = set([x[0] for x in raw_data])
self.collection = list(set(self.collection))
seeded_shuffle(self.collection)
if mine_hard_negatives:
|
class RAGTrainer:
"""Main trainer to fine-tune/train ColBERT models with a few lines."""
model: Union[LateInteractionModel, None] = None
negative_miner: Union[HardNegativeMiner, None] = None
collection: list[str] = []
queries: Union[list[str], None] = None
raw_data: Union[list[tuple], list[list], None] = None
training_triplets: list[list[int]] = list()
def __init__(
self,
model_name: str,
pretrained_model_name: str,
language_code: str = "en",
n_usable_gpus: int = -1,
):
"""
Initialise a RAGTrainer instance. This will load a base model: either an existing ColBERT model to fine-tune or a BERT/RoBERTa-like model to build a new ColBERT model from.
Parameters:
model_name: str - Name of the model to train. This will be used to name the checkpoints and the index.
pretrained_model_name: str - Name of the pretrained model to use as a base. Can be a local path to a checkpoint or a huggingface model name.
language_code: str - Language code of the model to train. This will be used to name the checkpoints and the index.
n_usable_gpus: int - Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available.
Returns:
self (RAGTrainer): The current instance of RAGTrainer, with the base model initialised.
"""
self.model_name = model_name
self.pretrained_model_name = pretrained_model_name
self.language_code = language_code
self.model = ColBERT(
pretrained_model_name_or_path=pretrained_model_name, n_gpu=n_usable_gpus
)
def add_documents(self, documents: list[str]):
self.collection += documents
seeded_shuffle(self.collection)
def export_training_data(self, path: Union[str, Path]):
"""
Manually export the training data processed by prepare_training_data to a given path.
Parameters:
path: Union[str, Path] - Path to the directory where the data will be exported."""
self.data_processor.export_training_data(path)
def prepare_training_data(
self,
raw_data: Union[list[tuple], list[list]],
all_documents: Optional[list[str]] = None,
data_out_path: Union[str, Path] = "./data/",
num_new_negatives: int = 10,
hard_negative_minimum_rank: int = 10,
mine_hard_negatives: bool = True,
hard_negative_model_size: str = "small",
pairs_with_labels: bool = False,
positive_label: Union[int, str] = 1,
negative_label: Union[int, str] = 0,
) -> str:
"""
Fully pre-process input-data in various raw formats into ColBERT-ready files and triplets.
Will accept a variety of formats, such as unannotated pairs, annotated pairs, triplets of strings and triplets of list of strings.
Will process into a ColBERT-ready format and export to data_out_path.
Will generate hard negatives if mine_hard_negatives is True.
num_new_negatives decides how many negatives will be generated. if mine_hard_negatives is False and num_new_negatives is > 0, these negatives will be randomly sampled.
Parameters:
raw_data: Union[list[tuple], list[list]] - List of pairs, annotated pairs, or triplets of strings.
all_documents: Optional[list[str]] - A corpus of documents to be used for sampling negatives.
data_out_path: Union[str, Path] - Path to the directory where the data will be exported (can be a tmp directory).
num_new_negatives: int - Number of new negatives to generate for each query.
mine_hard_negatives: bool - Whether to use hard negatives mining or not.
hard_negative_model_size: str - Size of the model to use for hard negatives mining.
pairs_with_labels: bool - Whether the raw_data is a list of pairs with labels or not.
positive_label: Union[int, str] - Label to use for positive pairs.
negative_label: Union[int, str] - Label to use for negative pairs.
Returns:
data_out_path: Union[str, Path] - Path to the directory where the data has been exported.
"""
if all_documents is not None:
self.collection += all_documents
self.data_dir = Path(data_out_path)
if len(raw_data[0]) == 2:
data_type = "pairs"
if pairs_with_labels:
data_type = "labeled_pairs"
elif len(raw_data[0]) == 3:
data_type = "triplets"
else:
raise ValueError("Raw data must be a list of pairs or triplets of strings.")
self.collection += [x[1] for x in raw_data]
if data_type == "triplets":
self.collection += [x[2] for x in raw_data]
self.queries = set([x[0] for x in raw_data])
self.collection = list(set(self.collection))
seeded_shuffle(self.collection)
if mine_hard_negatives: | self.negative_miner = SimpleMiner( | 3 | 2023-12-29 16:26:42+00:00 | 12k |
shibing624/chatgpt-webui | main.py | [
{
"identifier": "http_proxy",
"path": "src/config.py",
"snippet": "def retrieve_openai_api(api_key=None):\ndef retrieve_proxy(proxy=None):\ndef update_doc_config(two_column_pdf):"
},
{
"identifier": "get_model",
"path": "src/models.py",
"snippet": "def get_model(\n model_name,\n lora_model_path=None,\n access_key=None,\n temperature=None,\n top_p=None,\n system_prompt=None,\n user_name=\"\",\n original_model=None,\n):\n msg = i18n(\"模型设置为了:\") + f\" {model_name}\"\n model_type = ModelType.get_type(model_name)\n lora_choices = [\"No LoRA\"]\n if model_type != ModelType.OpenAI:\n config.local_embedding = True\n model = original_model\n chatbot = gr.Chatbot.update(label=model_name)\n try:\n if model_type == ModelType.OpenAI:\n logger.info(f\"正在加载OpenAI模型: {model_name}\")\n model = OpenAIClient(\n model_name=model_name,\n api_key=access_key,\n system_prompt=system_prompt,\n user_name=user_name,\n )\n logger.info(f\"OpenAI模型加载完成: {model_name}\")\n elif model_type == ModelType.OpenAIVision:\n logger.info(f\"正在加载OpenAI Vision模型: {model_name}\")\n access_key = os.environ.get(\"OPENAI_API_KEY\", access_key)\n model = OpenAIVisionClient(\n model_name, api_key=access_key, user_name=user_name)\n elif model_type == ModelType.ChatGLM:\n logger.info(f\"正在加载ChatGLM模型: {model_name}\")\n model = ChatGLMClient(model_name, user_name=user_name)\n elif model_type == ModelType.LLaMA:\n logger.info(f\"正在加载LLaMA模型: {model_name}\")\n model = LLaMAClient(model_name, user_name=user_name)\n elif model_type == ModelType.Unknown:\n raise ValueError(f\"未知模型: {model_name}\")\n except Exception as e:\n logger.error(e)\n logger.info(msg)\n presudo_key = hide_middle_chars(access_key)\n if original_model is not None and model is not None:\n model.history = original_model.history\n model.history_file_path = original_model.history_file_path\n return model, msg, chatbot, gr.Dropdown.update(choices=lora_choices, visible=False), access_key, presudo_key"
},
{
"identifier": "postprocess",
"path": "src/overwrites.py",
"snippet": "def postprocess(\n self,\n y,\n):\n \"\"\"\n Parameters:\n y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.\n Returns:\n List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed.\n \"\"\"\n if y is None:\n return []\n processed_messages = []\n for message_pair in y:\n assert isinstance(\n message_pair, (tuple, list)\n ), f\"Expected a list of lists or list of tuples. Received: {message_pair}\"\n assert (\n len(message_pair) == 2\n ), f\"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}\"\n\n processed_messages.append(\n [\n self._postprocess_chat_messages(message_pair[0], \"user\"),\n self._postprocess_chat_messages(message_pair[1], \"bot\"),\n ]\n )\n return processed_messages"
},
{
"identifier": "postprocess_chat_messages",
"path": "src/overwrites.py",
"snippet": "def postprocess_chat_messages(\n self, chat_message, role: str\n):\n if chat_message is None:\n return None\n elif isinstance(chat_message, (tuple, list)):\n file_uri = chat_message[0]\n if validate_url(file_uri):\n filepath = file_uri\n else:\n filepath = self.make_temp_copy_if_needed(file_uri)\n\n mime_type = client_utils.get_mimetype(filepath)\n return {\n \"name\": filepath,\n \"mime_type\": mime_type,\n \"alt_text\": chat_message[1] if len(chat_message) > 1 else None,\n \"data\": None, # These last two fields are filled in by the frontend\n \"is_file\": True,\n }\n elif isinstance(chat_message, str):\n # chat_message = inspect.cleandoc(chat_message)\n # escape html spaces\n # chat_message = chat_message.replace(\" \", \" \")\n if role == \"bot\":\n chat_message = convert_bot_before_marked(chat_message)\n elif role == \"user\":\n chat_message = convert_user_before_marked(chat_message)\n return chat_message\n else:\n raise ValueError(f\"Invalid message for Chatbot component: {chat_message}\")"
},
{
"identifier": "reload_javascript",
"path": "src/overwrites.py",
"snippet": "def reload_javascript():\n js = javascript_html()\n js += '<script async type=\"module\" src=\"https://cdn.jsdelivr.net/npm/marked/marked.min.js\"></script>'\n js += '<script async type=\"module\" src=\"https://spin.js.org/spin.umd.js\"></script><link type=\"text/css\" href=\"https://spin.js.org/spin.css\" rel=\"stylesheet\" />'\n js += '<script async src=\"https://cdn.jsdelivr.net/npm/@fancyapps/[email protected]/dist/fancybox/fancybox.umd.js\"></script><link rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/@fancyapps/[email protected]/dist/fancybox/fancybox.css\" />'\n\n meta = \"\"\"\n <meta name=\"apple-mobile-web-app-title\" content=\"ChatGPT-WebUI\">\n <meta name=\"apple-mobile-web-app-capable\" content=\"yes\">\n <meta name=\"application-name\" content=\"ChatGPT-WebUI\">\n <meta name='viewport' content='width=device-width, initial-scale=1.0, user-scalable=no, viewport-fit=cover'>\n <meta name=\"theme-color\" content=\"#ffffff\">\n \"\"\"\n css = css_html()\n\n def template_response(*args, **kwargs):\n res = GradioTemplateResponseOriginal(*args, **kwargs)\n res.body = res.body.replace(b'</head>', f'{meta}{js}</head>'.encode(\"utf8\"))\n res.body = res.body.replace(b'</body>', f'{css}</body>'.encode(\"utf8\"))\n res.init_headers()\n return res\n\n gr.routes.templates.TemplateResponse = template_response"
},
{
"identifier": "get_html",
"path": "src/overwrites.py",
"snippet": "def get_html(filename):\n path = os.path.join(chuanhu_path, \"assets\", \"html\", filename)\n if os.path.exists(path):\n with open(path, encoding=\"utf8\") as file:\n return file.read()\n return \"\""
},
{
"identifier": "MODELS",
"path": "src/presets.py",
"snippet": "class I18nAuto:\n def __init__(self):\n def __call__(self, key):\nCHATGLM_MODEL = None\nCHATGLM_TOKENIZER = None\nLLAMA_MODEL = None\nLLAMA_INFERENCER = None\nINITIAL_SYSTEM_PROMPT = \"You are a helpful assistant.\"\nAPI_HOST = \"api.openai.com\"\nOPENAI_API_BASE = \"https://api.openai.com/v1\"\nCHAT_COMPLETION_URL = \"https://api.openai.com/v1/chat/completions\"\nIMAGES_COMPLETION_URL = \"https://api.openai.com/v1/images/generations\"\nCOMPLETION_URL = \"https://api.openai.com/v1/completions\"\nBALANCE_API_URL = \"https://api.openai.com/dashboard/billing/credit_grants\"\nUSAGE_API_URL = \"https://api.openai.com/dashboard/billing/usage\"\nHISTORY_DIR = os.path.join(pwd_path, '../history')\nTEMPLATES_DIR = os.path.join(pwd_path, '../templates')\nSTANDARD_ERROR_MSG = i18n(\"☹️发生了错误:\") # 错误信息的标准前缀\nGENERAL_ERROR_MSG = i18n(\"获取对话时发生错误,请查看后台日志\")\nERROR_RETRIEVE_MSG = i18n(\"请检查网络连接,或者API-Key是否有效。\")\nCONNECTION_TIMEOUT_MSG = i18n(\"连接超时,无法获取对话。\") # 连接超时\nREAD_TIMEOUT_MSG = i18n(\"读取超时,无法获取对话。\") # 读取超时\nPROXY_ERROR_MSG = i18n(\"代理错误,无法获取对话。\") # 代理错误\nSSL_ERROR_PROMPT = i18n(\"SSL错误,无法获取对话。\") # SSL 错误\nNO_APIKEY_MSG = i18n(\"API key为空,请检查是否输入正确。\") # API key 长度不足 51 位\nNO_INPUT_MSG = i18n(\"请输入对话内容。\") # 未输入对话内容\nBILLING_NOT_APPLICABLE_MSG = i18n(\"账单信息不适用\") # 本地运行的模型返回的账单信息\nTIMEOUT_STREAMING = 60 # 流式对话时的超时时间\nTIMEOUT_ALL = 200 # 非流式对话时的超时时间\nENABLE_STREAMING_OPTION = True # 是否启用选择选择是否实时显示回答的勾选框\nHIDE_MY_KEY = True # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True\nCONCURRENT_COUNT = 100 # 允许同时使用的用户数量\nSIM_K = 5\nINDEX_QUERY_TEMPRATURE = 1.0\nCHUANHU_TITLE = i18n(\"ChatGPT 🚀\")\nCHUANHU_DESCRIPTION = i18n(\"GitHub: [shibing624/chatgpt-webui](https://github.com/shibing624/chatgpt-webui)\")\nONLINE_MODELS = [\n \"gpt-3.5-turbo\",\n \"gpt-3.5-turbo-16k\",\n \"gpt-3.5-turbo-0301\",\n \"gpt-3.5-turbo-0613\",\n \"gpt-3.5-turbo-1106\",\n \"gpt-4\",\n \"gpt-4-32k\",\n \"gpt-4-1106-preview\",\n \"gpt-4-vision-preview\",\n]\nMODEL_TOKEN_LIMIT = {\n \"gpt-3.5-turbo\": 4096,\n \"gpt-3.5-turbo-16k\": 16384,\n \"gpt-3.5-turbo-0301\": 4096,\n \"gpt-3.5-turbo-0613\": 4096,\n \"gpt-3.5-turbo-1106\": 16384,\n \"gpt-4\": 8192,\n \"gpt-4-32k\": 32768,\n \"gpt-4-1106-preview\": 128000,\n \"gpt-4-vision-preview\": 128000,\n}\nLOCAL_MODELS = {\n \"chatglm3-6b\": \"THUDM/chatglm3-6b\",\n \"llama-2-7b-chat\": \"TheBloke/Llama-2-7B-Chat-GPTQ\",\n \"yi-6b-chat-8bits\": \"01-ai/Yi-6B-Chat-8bits\",\n \"yi-6b-chat\": \"01-ai/Yi-6B-Chat\",\n}\nMODELS = ONLINE_MODELS + list(LOCAL_MODELS.keys())\nDEFAULT_MODEL = 0\nTOKEN_OFFSET = 1000 # 模型的token上限减去这个值,得到软上限。到达软上限之后,自动尝试减少token占用。\nDEFAULT_TOKEN_LIMIT = 3000 # 默认的token上限\nREDUCE_TOKEN_FACTOR = 0.5 # 与模型token上限想乘,得到目标token数。减少token占用时,将token占用减少到目标token数以下。\nREPLY_LANGUAGES = [\n \"简体中文\",\n \"繁體中文\",\n \"English\",\n \"日本語\",\n \"Español\",\n \"Français\",\n \"Deutsch\",\n \"跟随问题语言(不稳定)\"\n]\nHISTORY_NAME_METHODS = [\n i18n(\"根据日期时间\"),\n i18n(\"第一条提问\"),\n i18n(\"模型自动总结(消耗tokens)\"),\n]\nWEBSEARCH_PTOMPT_TEMPLATE = \"\"\"\\\nWeb search results:\n\n{web_results}\nCurrent date: {current_date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.\nQuery: {query}\nReply in {reply_language}\n\"\"\"\nPROMPT_TEMPLATE = \"\"\"\\\nContext information is below.\n---------------------\n{context_str}\n---------------------\nCurrent date: {current_date}.\nUsing the provided context information, write a comprehensive reply to the given query.\nMake sure to cite results using [number] notation after the reference.\nIf the provided context information refer to multiple subjects with the same name, write separate answers for each subject.\nUse prior knowledge only if the given context didn't provide enough information.\nAnswer the question: {query_str}\nReply in {reply_language}\n\"\"\"\nREFINE_TEMPLATE = \"\"\"\\\nThe original question is as follows: {query_str}\nWe have provided an existing answer: {existing_answer}\nWe have the opportunity to refine the existing answer\n(only if needed) with some more context below.\n------------\n{context_msg}\n------------\nGiven the new context, refine the original answer to better\nReply in {reply_language}\nIf the context isn't useful, return the original answer.\n\"\"\"\nSUMMARIZE_PROMPT = \"\"\"Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY IN 中文:\"\"\"\nSUMMARY_CHAT_SYSTEM_PROMPT = \"\"\"\\\nPlease summarize the following conversation for a chat topic.\nNo more than 16 characters.\nNo special characters.\nPunctuation mark is banned.\nNot including '.' ':' '?' '!' '“' '*' '<' '>'.\nReply in user's language.\n\"\"\"\nALREADY_CONVERTED_MARK = \"<!-- ALREADY CONVERTED BY PARSER. -->\"\nSTART_OF_OUTPUT_MARK = \"<!-- SOO IN MESSAGE -->\"\nEND_OF_OUTPUT_MARK = \"<!-- EOO IN MESSAGE -->\""
},
{
"identifier": "delete_chat_history",
"path": "src/utils.py",
"snippet": " class DataframeData(TypedDict):\nclass ConfigType(Enum):\nclass ConfigItem:\nclass SetupWizard:\ndef predict(current_model, *args):\ndef billing_info(current_model):\ndef set_key(current_model, *args):\ndef load_chat_history(current_model, *args):\ndef delete_chat_history(current_model, *args):\ndef interrupt(current_model, *args):\ndef reset(current_model, *args):\ndef retry(current_model, *args):\ndef delete_first_conversation(current_model, *args):\ndef delete_last_conversation(current_model, *args):\ndef set_system_prompt(current_model, *args):\ndef rename_chat_history(current_model, *args):\ndef auto_name_chat_history(current_model, *args):\ndef export_markdown(current_model, *args):\ndef upload_chat_history(current_model, *args):\ndef set_token_upper_limit(current_model, *args):\ndef set_temperature(current_model, *args):\ndef set_top_p(current_model, *args):\ndef set_n_choices(current_model, *args):\ndef set_stop_sequence(current_model, *args):\ndef set_max_tokens(current_model, *args):\ndef set_presence_penalty(current_model, *args):\ndef set_frequency_penalty(current_model, *args):\ndef set_logit_bias(current_model, *args):\ndef set_user_identifier(current_model, *args):\ndef set_single_turn(current_model, *args):\ndef handle_file_upload(current_model, *args):\ndef handle_summarize_index(current_model, *args):\ndef like(current_model, *args):\ndef dislike(current_model, *args):\ndef count_token(input_str):\ndef markdown_to_html_with_syntax_highlight(md_str): # deprecated\n def replacer(match):\ndef normalize_markdown(md_text: str) -> str: # deprecated\ndef convert_mdtext(md_text): # deprecated\ndef clip_rawtext(chat_message, need_escape=True):\ndef convert_bot_before_marked(chat_message):\ndef convert_user_before_marked(chat_message):\ndef escape_markdown(text):\ndef convert_asis(userinput): # deprecated\ndef detect_converted_mark(userinput): # deprecated\ndef detect_language(code): # deprecated\ndef construct_text(role, text):\ndef construct_user(text):\ndef construct_system(text):\ndef construct_assistant(text):\ndef save_file(filename, model, chatbot):\ndef sorted_by_pinyin(list):\ndef sorted_by_last_modified_time(list, dir):\ndef get_file_names_by_type(dir, filetypes=[\".json\"]):\ndef get_file_names_by_pinyin(dir, filetypes=[\".json\"]):\ndef get_file_names_dropdown_by_pinyin(dir, filetypes=[\".json\"]):\ndef get_file_names_by_last_modified_time(dir, filetypes=[\".json\"]):\ndef get_history_names(user_name=\"\"):\ndef get_first_history_name(user_name=\"\"):\ndef get_history_list(user_name=\"\"):\ndef init_history_list(user_name=\"\"):\ndef filter_history(user_name, keyword):\ndef load_template(filename, mode=0):\ndef get_template_names():\ndef get_template_dropdown():\ndef get_template_content(templates, selection, original_system_prompt):\ndef reset_textbox():\ndef reset_default():\ndef change_api_host(host):\ndef change_proxy(proxy):\ndef hide_middle_chars(s):\ndef submit_key(key):\ndef replace_today(prompt):\ndef get_geoip():\n def fetch_ip():\ndef find_n(lst, max_num):\ndef start_outputing():\ndef end_outputing():\ndef cancel_outputing():\ndef transfer_input(inputs):\ndef update_chuanhu():\ndef add_source_numbers(lst, source_name=\"Source\", use_source=True):\ndef add_details(lst):\ndef sheet_to_string(sheet, sheet_name=None):\ndef excel_to_string(file_path):\ndef get_last_day_of_month(any_day):\ndef get_model_source(model_name, alternative_source):\ndef refresh_ui_elements_on_load(current_model, selected_model_name, user_name):\ndef toggle_like_btn_visibility(selected_model_name):\ndef get_corresponding_file_type_by_model_name(selected_model_name):\ndef new_auto_history_filename(username):\ndef get_history_filepath(username):\ndef beautify_err_msg(err_msg):\ndef auth_from_conf(username, password):\ndef get_files_hash(file_src=None, file_paths=None):\ndef myprint(**args):\ndef replace_special_symbols(string, replace_string=\" \"):\n def __init__(self, key, name, default=None, type=ConfigType.String) -> None:\ndef generate_prompt_string(config_item):\ndef generate_result_string(config_item, config_value):\n def __init__(self, file_path=config_file) -> None:\n def set(self, config_items: List[ConfigItem], prompt: str):\n def set_users(self):\n def __setitem__(self, setting_key: str, value):\n def __getitem__(self, setting_key: str):\n def save(self):\ndef setup_wizard():\ndef save_pkl(data, file_path):\ndef load_pkl(file_path):\ndef chinese_preprocessing_func(text: str) -> List[str]:\nSERVER_GEO_IP_MSG = None\nFETCHING_IP = False\n SERVER_GEO_IP_MSG = i18n(\"你可以使用聊天功能。\")\n SERVER_GEO_IP_MSG = \"**您的IP区域:中国。**\"\n SERVER_GEO_IP_MSG = i18n(\"您的IP区域:\") + f\"{country}。\"\n FETCHING_IP = False\n FETCHING_IP = True"
}
] | import gradio as gr
from loguru import logger
from src.config import (
http_proxy,
hide_history_when_not_logged_in,
chat_name_method_index,
my_api_key, multi_api_key, server_name,
server_port, share, config_file, api_host,
authflag,
dockerflag,
show_api_billing,
latex_delimiters_set,
user_avatar, bot_avatar,
update_doc_config,
)
from src.models import get_model
from src.overwrites import (
postprocess, postprocess_chat_messages,
reload_javascript, get_html,
)
from src.presets import (
MODELS,
HISTORY_NAME_METHODS,
small_and_beautiful_theme,
CONCURRENT_COUNT,
CHUANHU_TITLE,
HIDE_MY_KEY,
DEFAULT_MODEL,
REPLY_LANGUAGES,
INITIAL_SYSTEM_PROMPT,
ENABLE_STREAMING_OPTION,
CHUANHU_DESCRIPTION,
favicon_path,
API_HOST,
HISTORY_DIR,
assets_path,
)
from src.utils import (
delete_chat_history,
filter_history,
get_history_list,
auto_name_chat_history,
get_template_dropdown,
rename_chat_history,
init_history_list,
get_first_history_name,
setup_wizard,
auth_from_conf,
get_geoip,
get_template_names,
load_template,
get_history_names,
reset,
predict,
interrupt,
retry,
i18n,
dislike,
toggle_like_btn_visibility,
set_key,
set_single_turn,
hide_middle_chars,
set_system_prompt,
start_outputing,
set_token_upper_limit,
set_temperature,
set_user_identifier,
set_top_p,
delete_first_conversation,
delete_last_conversation,
set_n_choices,
set_logit_bias,
load_chat_history,
end_outputing,
set_max_tokens,
reset_default,
reset_textbox,
set_stop_sequence,
set_presence_penalty, set_frequency_penalty,
upload_chat_history,
export_markdown,
billing_info,
get_template_content,
like,
transfer_input,
handle_file_upload,
handle_summarize_index,
) | 7,899 | regenerate_i18n=i18n("重新生成"),
deleteRound_i18n=i18n("删除这轮问答"),
renameChat_i18n=i18n("重命名该对话"),
validFileName_i18n=i18n("请输入有效的文件名,不要包含以下特殊字符:"),
clearFileHistoryMsg_i18n=i18n("⚠️请先删除知识库中的历史文件,再尝试上传!"),
dropUploadMsg_i18n=i18n("释放文件以上传"),
))
with gr.Box(elem_id="fake-gradio-components", visible=False):
changeSingleSessionBtn = gr.Button(
visible=False, elem_classes="invisible-btn", elem_id="change-single-session-btn")
historySelectBtn = gr.Button(
visible=False, elem_classes="invisible-btn", elem_id="history-select-btn") # Not used
def create_greeting(request: gr.Request):
if hasattr(request, "username") and request.username:
logger.info(f"Get User Name: {request.username}")
user_info, user_name = gr.Markdown.update(
value=f"User: {request.username}"), request.username
else:
user_info, user_name = gr.Markdown.update(
value=f"", visible=False), ""
current_model = get_model(
model_name=MODELS[DEFAULT_MODEL], access_key=my_api_key, user_name=user_name)[0]
if not hide_history_when_not_logged_in or user_name:
loaded_stuff = current_model.auto_load()
else:
loaded_stuff = [gr.update(), gr.update(), gr.Chatbot.update(label=MODELS[DEFAULT_MODEL]),
current_model.single_turn, current_model.temperature, current_model.top_p,
current_model.n_choices, current_model.stop_sequence, current_model.token_upper_limit,
current_model.max_generation_token, current_model.presence_penalty,
current_model.frequency_penalty, current_model.logit_bias, current_model.user_identifier]
return user_info, user_name, current_model, toggle_like_btn_visibility(
DEFAULT_MODEL), *loaded_stuff, init_history_list(user_name)
demo.load(create_greeting, inputs=None, outputs=[
user_info, user_name, current_model, like_dislike_area, saveFileName, systemPromptTxt, chatbot,
single_turn_checkbox, temperature_slider, top_p_slider, n_choices_slider, stop_sequence_txt,
max_context_length_slider, max_generation_slider, presence_penalty_slider, frequency_penalty_slider,
logit_bias_txt, user_identifier_txt, historySelectList], api_name="load")
chatgpt_predict_args = dict(
fn=predict,
inputs=[
current_model,
user_question,
chatbot,
use_streaming_checkbox,
use_websearch_checkbox,
index_files,
language_select_dropdown,
],
outputs=[chatbot, status_display],
show_progress=True,
)
start_outputing_args = dict(
fn=start_outputing,
inputs=[],
outputs=[submitBtn, cancelBtn],
show_progress=True,
)
end_outputing_args = dict(
fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn]
)
reset_textbox_args = dict(
fn=reset_textbox, inputs=[], outputs=[user_input]
)
transfer_input_args = dict(
fn=transfer_input, inputs=[user_input], outputs=[
user_question, user_input, submitBtn, cancelBtn], show_progress=True
)
get_usage_args = dict(
fn=billing_info, inputs=[current_model], outputs=[
usageTxt], show_progress=False
)
load_history_from_file_args = dict(
fn=load_chat_history,
inputs=[current_model, historySelectList],
outputs=[saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider,
n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider,
presence_penalty_slider, frequency_penalty_slider, logit_bias_txt, user_identifier_txt],
)
refresh_history_args = dict(
fn=get_history_list, inputs=[user_name], outputs=[historySelectList]
)
auto_name_chat_history_args = dict(
fn=auto_name_chat_history,
inputs=[current_model, name_chat_method, user_question, chatbot, single_turn_checkbox],
outputs=[historySelectList],
show_progress=False,
)
# Chatbot
cancelBtn.click(interrupt, [current_model], [])
user_input.submit(
**transfer_input_args).then(
**chatgpt_predict_args).then(
**end_outputing_args).then(
**auto_name_chat_history_args)
user_input.submit(**get_usage_args)
submitBtn.click(**transfer_input_args).then(
**chatgpt_predict_args, api_name="predict").then(
**end_outputing_args).then(
**auto_name_chat_history_args)
submitBtn.click(**get_usage_args)
index_files.upload(handle_file_upload, [current_model, index_files, chatbot, language_select_dropdown], [
index_files, chatbot, status_display])
summarize_btn.click(handle_summarize_index, [
current_model, index_files, chatbot, language_select_dropdown], [chatbot, status_display])
emptyBtn.click(
| # -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description:
"""
gr.Chatbot._postprocess_chat_messages = postprocess_chat_messages
gr.Chatbot.postprocess = postprocess
with gr.Blocks(theme=small_and_beautiful_theme) as demo:
user_name = gr.Textbox("", visible=False)
promptTemplates = gr.State(load_template(get_template_names()[0], mode=2))
user_question = gr.State("")
assert type(my_api_key) == str
user_api_key = gr.State(my_api_key)
current_model = gr.State()
topic = gr.State(i18n("未命名对话历史记录"))
with gr.Row(elem_id="chuanhu-header"):
gr.HTML(get_html("header_title.html").format(
app_title=CHUANHU_TITLE), elem_id="app-title")
status_display = gr.Markdown(get_geoip, elem_id="status-display")
with gr.Row(elem_id="float-display"):
user_info = gr.Markdown(
value="getting user info...", elem_id="user-info")
with gr.Row(equal_height=True, elem_id="chuanhu-body"):
with gr.Column(elem_id="menu-area"):
with gr.Column(elem_id="chuanhu-history"):
with gr.Box():
with gr.Row(elem_id="chuanhu-history-header"):
with gr.Row(elem_id="chuanhu-history-search-row"):
with gr.Column(min_width=150, scale=2):
historySearchTextbox = gr.Textbox(show_label=False, container=False, placeholder=i18n(
"搜索(支持正则)..."), lines=1, elem_id="history-search-tb")
with gr.Column(min_width=52, scale=1, elem_id="gr-history-header-btns"):
uploadFileBtn = gr.UploadButton(
interactive=True, label="", file_types=[".json"], elem_id="gr-history-upload-btn")
historyRefreshBtn = gr.Button("", elem_id="gr-history-refresh-btn")
with gr.Row(elem_id="chuanhu-history-body"):
with gr.Column(scale=6, elem_id="history-select-wrap"):
historySelectList = gr.Radio(
label=i18n("从列表中加载对话"),
choices=get_history_names(),
value=get_first_history_name(),
# multiselect=False,
container=False,
elem_id="history-select-dropdown"
)
with gr.Row(visible=False):
with gr.Column(min_width=42, scale=1):
historyDeleteBtn = gr.Button(
"🗑️", elem_id="gr-history-delete-btn")
with gr.Column(min_width=42, scale=1):
historyDownloadBtn = gr.Button(
"⏬", elem_id="gr-history-download-btn")
with gr.Column(min_width=42, scale=1):
historyMarkdownDownloadBtn = gr.Button(
"⤵️", elem_id="gr-history-mardown-download-btn")
with gr.Row(visible=False):
with gr.Column(scale=6):
saveFileName = gr.Textbox(
show_label=True,
placeholder=i18n("设置文件名: 默认为.json,可选为.md"),
label=i18n("设置保存文件名"),
value=i18n("对话历史记录"),
elem_classes="no-container"
# container=False,
)
with gr.Column(scale=1):
renameHistoryBtn = gr.Button(
i18n("💾 保存对话"), elem_id="gr-history-save-btn")
exportMarkdownBtn = gr.Button(
i18n("📝 导出为 Markdown"), elem_id="gr-markdown-export-btn")
with gr.Column(elem_id="chuanhu-menu-footer"):
with gr.Row(elem_id="chuanhu-func-nav"):
gr.HTML(get_html("func_nav.html"))
# gr.HTML(get_html("footer.html").format(versions=versions_html()), elem_id="footer")
# gr.Markdown(CHUANHU_DESCRIPTION, elem_id="chuanhu-author")
with gr.Column(elem_id="chuanhu-area", scale=5):
with gr.Column(elem_id="chatbot-area"):
with gr.Row(elem_id="chatbot-header"):
model_select_dropdown = gr.Dropdown(
label=i18n("选择模型"), choices=MODELS, multiselect=False, value=MODELS[DEFAULT_MODEL],
interactive=True,
show_label=False, container=False, elem_id="model-select-dropdown"
)
lora_select_dropdown = gr.Dropdown(
label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True,
container=False, visible=False,
)
gr.HTML(get_html("chatbot_header_btn.html").format(
json_label=i18n("历史记录(JSON)"),
md_label=i18n("导出为 Markdown")
), elem_id="chatbot-header-btn-bar")
with gr.Row():
chatbot = gr.Chatbot(
label="ChatGPT",
elem_id="chuanhu-chatbot",
latex_delimiters=latex_delimiters_set,
sanitize_html=False,
# height=700,
show_label=False,
avatar_images=[user_avatar, bot_avatar],
show_share_button=False,
)
with gr.Row(elem_id="chatbot-footer"):
with gr.Box(elem_id="chatbot-input-box"):
with gr.Row(elem_id="chatbot-input-row"):
gr.HTML(get_html("chatbot_more.html").format(
single_turn_label=i18n("单轮对话"),
websearch_label=i18n("在线搜索"),
upload_file_label=i18n("上传文件"),
uploaded_files_label=i18n("知识库文件"),
uploaded_files_tip=i18n("在工具箱中管理知识库文件")
))
with gr.Row(elem_id="chatbot-input-tb-row"):
with gr.Column(min_width=225, scale=12):
user_input = gr.Textbox(
elem_id="user-input-tb",
show_label=False,
placeholder=i18n("在这里输入"),
elem_classes="no-container",
max_lines=5,
# container=False
)
with gr.Column(min_width=42, scale=1, elem_id="chatbot-ctrl-btns"):
submitBtn = gr.Button(
value="", variant="primary", elem_id="submit-btn")
cancelBtn = gr.Button(
value="", variant="secondary", visible=False, elem_id="cancel-btn")
# Note: Buttons below are set invisible in UI. But they are used in JS.
with gr.Row(elem_id="chatbot-buttons", visible=False):
with gr.Column(min_width=120, scale=1):
emptyBtn = gr.Button(
i18n("🧹 新的对话"), elem_id="empty-btn"
)
with gr.Column(min_width=120, scale=1):
retryBtn = gr.Button(
i18n("🔄 重新生成"), elem_id="gr-retry-btn")
with gr.Column(min_width=120, scale=1):
delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
with gr.Column(min_width=120, scale=1):
delLastBtn = gr.Button(
i18n("🗑️ 删除最新对话"), elem_id="gr-dellast-btn")
with gr.Row(visible=False) as like_dislike_area:
with gr.Column(min_width=20, scale=1):
likeBtn = gr.Button(
"👍", elem_id="gr-like-btn")
with gr.Column(min_width=20, scale=1):
dislikeBtn = gr.Button(
"👎", elem_id="gr-dislike-btn")
with gr.Column(elem_id="toolbox-area", scale=1):
# For CSS setting, there is an extra box. Don't remove it.
with gr.Box(elem_id="chuanhu-toolbox"):
with gr.Row():
gr.Markdown("## " + i18n("工具箱"))
gr.HTML(get_html("close_btn.html").format(
obj="toolbox"), elem_classes="close-btn")
with gr.Tabs(elem_id="chuanhu-toolbox-tabs"):
with gr.Tab(label=i18n("对话")):
with gr.Accordion(label=i18n("模型"), open=not HIDE_MY_KEY, visible=not HIDE_MY_KEY):
keyTxt = gr.Textbox(
show_label=True,
placeholder=f"Your API-key...",
value=hide_middle_chars(user_api_key.value),
type="password",
visible=not HIDE_MY_KEY,
label="API-Key",
)
if multi_api_key:
usageTxt = gr.Markdown(i18n(
"多账号模式已开启,无需输入key,可直接开始对话"), elem_id="usage-display",
elem_classes="insert-block", visible=show_api_billing)
else:
usageTxt = gr.Markdown(i18n(
"**发送消息** 或 **提交key** 以显示额度"), elem_id="usage-display",
elem_classes="insert-block", visible=show_api_billing)
gr.Markdown("---", elem_classes="hr-line", visible=not HIDE_MY_KEY)
with gr.Accordion(label="Prompt", open=True):
systemPromptTxt = gr.Textbox(
show_label=True,
placeholder=i18n("在这里输入System Prompt..."),
label="System prompt",
value=INITIAL_SYSTEM_PROMPT,
lines=8
)
retain_system_prompt_checkbox = gr.Checkbox(
label=i18n("新建对话保留Prompt"), value=False, visible=True,
elem_classes="switch-checkbox")
with gr.Accordion(label=i18n("加载Prompt模板"), open=False):
with gr.Column():
with gr.Row():
with gr.Column(scale=6):
templateFileSelectDropdown = gr.Dropdown(
label=i18n("选择Prompt模板集合文件"),
choices=get_template_names(),
multiselect=False,
value=get_template_names()[0],
container=False,
)
with gr.Column(scale=1):
templateRefreshBtn = gr.Button(
i18n("🔄 刷新"))
with gr.Row():
with gr.Column():
templateSelectDropdown = gr.Dropdown(
label=i18n("从Prompt模板中加载"),
choices=load_template(
get_template_names()[
0], mode=1
),
multiselect=False,
container=False,
)
gr.Markdown("---", elem_classes="hr-line")
with gr.Accordion(label=i18n("知识库"), open=True, elem_id="gr-kb-accordion", visible=True):
use_websearch_checkbox = gr.Checkbox(label=i18n(
"使用在线搜索"), value=False, elem_classes="switch-checkbox", elem_id="gr-websearch-cb",
visible=False)
index_files = gr.Files(label=i18n(
"上传"), type="file",
file_types=[".pdf", ".docx", ".pptx", ".epub", ".xlsx", ".txt", "text", "image"],
elem_id="upload-index-file")
two_column = gr.Checkbox(label=i18n(
"双栏pdf"), value=False)
summarize_btn = gr.Button(i18n("总结"), visible=False)
with gr.Tab(label=i18n("参数")):
gr.Markdown(i18n("# ⚠️ 务必谨慎更改 ⚠️"),
elem_id="advanced-warning")
with gr.Accordion(i18n("参数"), open=True):
temperature_slider = gr.Slider(
minimum=-0,
maximum=2.0,
value=1.0,
step=0.1,
interactive=True,
label="temperature",
)
top_p_slider = gr.Slider(
minimum=-0,
maximum=1.0,
value=1.0,
step=0.05,
interactive=True,
label="top-p",
)
n_choices_slider = gr.Slider(
minimum=1,
maximum=10,
value=1,
step=1,
interactive=True,
label="n choices",
)
stop_sequence_txt = gr.Textbox(
show_label=True,
placeholder=i18n("停止符,用英文逗号隔开..."),
label="stop",
value="",
lines=1,
)
max_context_length_slider = gr.Slider(
minimum=1,
maximum=32768,
value=2000,
step=1,
interactive=True,
label="max context",
)
max_generation_slider = gr.Slider(
minimum=1,
maximum=32768,
value=1000,
step=1,
interactive=True,
label="max generations",
)
presence_penalty_slider = gr.Slider(
minimum=-2.0,
maximum=2.0,
value=0.0,
step=0.01,
interactive=True,
label="presence penalty",
)
frequency_penalty_slider = gr.Slider(
minimum=-2.0,
maximum=2.0,
value=0.0,
step=0.01,
interactive=True,
label="frequency penalty",
)
logit_bias_txt = gr.Textbox(
show_label=True,
placeholder=f"word:likelihood",
label="logit bias",
value="",
lines=1,
)
user_identifier_txt = gr.Textbox(
show_label=True,
placeholder=i18n("用于定位滥用行为"),
label=i18n("用户标识符"),
value=user_name.value,
lines=1,
)
with gr.Tab(label=i18n("关于")):
gr.Markdown("#### " + i18n("ChatGPT WebUI"))
gr.Markdown(CHUANHU_DESCRIPTION)
with gr.Row(elem_id="popup-wrapper"):
with gr.Box(elem_id="chuanhu-popup"):
with gr.Box(elem_id="chuanhu-setting"):
with gr.Row():
gr.Markdown("## " + i18n("设置"))
gr.HTML(get_html("close_btn.html").format(
obj="box"), elem_classes="close-btn")
with gr.Tabs(elem_id="chuanhu-setting-tabs"):
with gr.Tab(label=i18n("高级")):
gr.HTML(get_html("appearance_switcher.html").format(
label=i18n("切换亮暗色主题")), elem_classes="insert-block", visible=False)
use_streaming_checkbox = gr.Checkbox(
label=i18n("实时传输回答"), value=True, visible=ENABLE_STREAMING_OPTION,
elem_classes="switch-checkbox"
)
language_select_dropdown = gr.Dropdown(
label=i18n("选择回复语言(针对搜索&索引功能)"),
choices=REPLY_LANGUAGES,
multiselect=False,
value=REPLY_LANGUAGES[0],
visible=False,
)
name_chat_method = gr.Dropdown(
label=i18n("对话命名方式"),
choices=HISTORY_NAME_METHODS,
multiselect=False,
interactive=True,
value=HISTORY_NAME_METHODS[chat_name_method_index],
)
single_turn_checkbox = gr.Checkbox(label=i18n(
"单轮对话"), value=False, elem_classes="switch-checkbox", elem_id="gr-single-session-cb",
visible=False)
# checkUpdateBtn = gr.Button(i18n("🔄 检查更新..."), visible=check_update)
with gr.Tab(i18n("网络")):
gr.Markdown(
i18n("⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置"),
elem_id="netsetting-warning")
default_btn = gr.Button(i18n("🔙 恢复默认网络设置"))
# 网络代理
proxyTxt = gr.Textbox(
show_label=True,
placeholder=i18n("未设置代理..."),
label=i18n("代理地址"),
value=http_proxy,
lines=1,
interactive=False,
# container=False,
elem_classes="view-only-textbox no-container",
)
# changeProxyBtn = gr.Button(i18n("🔄 设置代理地址"))
# 优先展示自定义的api_host
apihostTxt = gr.Textbox(
show_label=True,
placeholder="api.openai.com",
label="OpenAI API-Host",
value=api_host or API_HOST,
lines=1,
interactive=False,
# container=False,
elem_classes="view-only-textbox no-container",
)
with gr.Tab(label=i18n("关于"), elem_id="about-tab"):
gr.Markdown("# " + i18n("ChatGPT WebUI"))
gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description")
with gr.Box(elem_id="web-config", visible=False):
gr.HTML(get_html('web_config.html').format(
enableCheckUpdate_config=False,
hideHistoryWhenNotLoggedIn_config=hide_history_when_not_logged_in,
forView_i18n=i18n("仅供查看"),
deleteConfirm_i18n_pref=i18n("你真的要删除 "),
deleteConfirm_i18n_suff=i18n(" 吗?"),
usingLatest_i18n=i18n("您使用的就是最新版!"),
updatingMsg_i18n=i18n("正在尝试更新..."),
updateSuccess_i18n=i18n("更新成功,请重启本程序"),
updateFailure_i18n=i18n(
"更新失败,请尝试[手动更新](https://github.com/shibing624/chatgpt-webui/"),
regenerate_i18n=i18n("重新生成"),
deleteRound_i18n=i18n("删除这轮问答"),
renameChat_i18n=i18n("重命名该对话"),
validFileName_i18n=i18n("请输入有效的文件名,不要包含以下特殊字符:"),
clearFileHistoryMsg_i18n=i18n("⚠️请先删除知识库中的历史文件,再尝试上传!"),
dropUploadMsg_i18n=i18n("释放文件以上传"),
))
with gr.Box(elem_id="fake-gradio-components", visible=False):
changeSingleSessionBtn = gr.Button(
visible=False, elem_classes="invisible-btn", elem_id="change-single-session-btn")
historySelectBtn = gr.Button(
visible=False, elem_classes="invisible-btn", elem_id="history-select-btn") # Not used
def create_greeting(request: gr.Request):
if hasattr(request, "username") and request.username:
logger.info(f"Get User Name: {request.username}")
user_info, user_name = gr.Markdown.update(
value=f"User: {request.username}"), request.username
else:
user_info, user_name = gr.Markdown.update(
value=f"", visible=False), ""
current_model = get_model(
model_name=MODELS[DEFAULT_MODEL], access_key=my_api_key, user_name=user_name)[0]
if not hide_history_when_not_logged_in or user_name:
loaded_stuff = current_model.auto_load()
else:
loaded_stuff = [gr.update(), gr.update(), gr.Chatbot.update(label=MODELS[DEFAULT_MODEL]),
current_model.single_turn, current_model.temperature, current_model.top_p,
current_model.n_choices, current_model.stop_sequence, current_model.token_upper_limit,
current_model.max_generation_token, current_model.presence_penalty,
current_model.frequency_penalty, current_model.logit_bias, current_model.user_identifier]
return user_info, user_name, current_model, toggle_like_btn_visibility(
DEFAULT_MODEL), *loaded_stuff, init_history_list(user_name)
demo.load(create_greeting, inputs=None, outputs=[
user_info, user_name, current_model, like_dislike_area, saveFileName, systemPromptTxt, chatbot,
single_turn_checkbox, temperature_slider, top_p_slider, n_choices_slider, stop_sequence_txt,
max_context_length_slider, max_generation_slider, presence_penalty_slider, frequency_penalty_slider,
logit_bias_txt, user_identifier_txt, historySelectList], api_name="load")
chatgpt_predict_args = dict(
fn=predict,
inputs=[
current_model,
user_question,
chatbot,
use_streaming_checkbox,
use_websearch_checkbox,
index_files,
language_select_dropdown,
],
outputs=[chatbot, status_display],
show_progress=True,
)
start_outputing_args = dict(
fn=start_outputing,
inputs=[],
outputs=[submitBtn, cancelBtn],
show_progress=True,
)
end_outputing_args = dict(
fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn]
)
reset_textbox_args = dict(
fn=reset_textbox, inputs=[], outputs=[user_input]
)
transfer_input_args = dict(
fn=transfer_input, inputs=[user_input], outputs=[
user_question, user_input, submitBtn, cancelBtn], show_progress=True
)
get_usage_args = dict(
fn=billing_info, inputs=[current_model], outputs=[
usageTxt], show_progress=False
)
load_history_from_file_args = dict(
fn=load_chat_history,
inputs=[current_model, historySelectList],
outputs=[saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider,
n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider,
presence_penalty_slider, frequency_penalty_slider, logit_bias_txt, user_identifier_txt],
)
refresh_history_args = dict(
fn=get_history_list, inputs=[user_name], outputs=[historySelectList]
)
auto_name_chat_history_args = dict(
fn=auto_name_chat_history,
inputs=[current_model, name_chat_method, user_question, chatbot, single_turn_checkbox],
outputs=[historySelectList],
show_progress=False,
)
# Chatbot
cancelBtn.click(interrupt, [current_model], [])
user_input.submit(
**transfer_input_args).then(
**chatgpt_predict_args).then(
**end_outputing_args).then(
**auto_name_chat_history_args)
user_input.submit(**get_usage_args)
submitBtn.click(**transfer_input_args).then(
**chatgpt_predict_args, api_name="predict").then(
**end_outputing_args).then(
**auto_name_chat_history_args)
submitBtn.click(**get_usage_args)
index_files.upload(handle_file_upload, [current_model, index_files, chatbot, language_select_dropdown], [
index_files, chatbot, status_display])
summarize_btn.click(handle_summarize_index, [
current_model, index_files, chatbot, language_select_dropdown], [chatbot, status_display])
emptyBtn.click( | reset, | 7 | 2023-12-27 12:14:26+00:00 | 12k |
camenduru/AnyDoor-online-hf | ldm/models/diffusion/ddpm.py | [
{
"identifier": "log_txt_as_img",
"path": "ldm/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts"
},
{
"identifier": "exists",
"path": "ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
},
{
"identifier": "default",
"path": "ldm/util.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "ismap",
"path": "ldm/util.py",
"snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)"
},
{
"identifier": "isimage",
"path": "ldm/util.py",
"snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)"
},
{
"identifier": "mean_flat",
"path": "ldm/util.py",
"snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))"
},
{
"identifier": "count_params",
"path": "ldm/util.py",
"snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params"
},
{
"identifier": "instantiate_from_config",
"path": "ldm/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
},
{
"identifier": "LitEma",
"path": "ldm/modules/ema.py",
"snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)"
},
{
"identifier": "normal_kl",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )"
},
{
"identifier": "DiagonalGaussianDistribution",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean"
},
{
"identifier": "IdentityFirstStage",
"path": "ldm/models/autoencoder.py",
"snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x"
},
{
"identifier": "AutoencoderKL",
"path": "ldm/models/autoencoder.py",
"snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x"
},
{
"identifier": "make_beta_schedule",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()"
},
{
"identifier": "extract_into_tensor",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))"
},
{
"identifier": "noise_like",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
},
{
"identifier": "DDIMSampler",
"path": "ldm/models/diffusion/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec"
}
] | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import itertools
import torch.nn.functional as F
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager, nullcontext
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler | 9,885 | """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
if monitor is not None:
self.monitor = monitor
self.make_it_fit = make_it_fit
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
if monitor is not None:
self.monitor = monitor
self.make_it_fit = make_it_fit | if reset_ema: assert exists(ckpt_path) | 1 | 2023-12-25 04:48:34+00:00 | 12k |
pangxincheng/TaskManager | task_manager/main.py | [
{
"identifier": "CoreManager",
"path": "task_manager/manager/core.py",
"snippet": "class CoreManager(mp.Process):\n\n def __init__(\n self,\n core_manager_addr: str,\n gpu_manager_addr: str=\"ipc://gpu_manager\",\n task_manager_addr: str=\"ipc://task_manager\",\n log_dir: str=\"logs\",\n log_level: str=\"INFO\",\n ) -> None:\n mp.Process.__init__(self)\n assert core_manager_addr.startswith(\"tcp://\") or core_manager_addr.startswith(\"ipc://\"), \\\n \"core manager address must start with tcp:// or ipc://\"\n assert gpu_manager_addr.startswith(\"tcp://\") or gpu_manager_addr.startswith(\"ipc://\"), \\\n \"gpu manager address must start with tcp:// or ipc://\"\n assert task_manager_addr.startswith(\"tcp://\") or task_manager_addr.startswith(\"ipc://\"), \\\n \"task manager address must start with tcp:// or ipc://\"\n self.core_manager_addr = core_manager_addr\n self.gpu_manager_addr = gpu_manager_addr\n self.task_manager_addr = task_manager_addr\n self.log_dir = log_dir\n self.log_level = log_level\n\n def _init_manager(self) -> None:\n\n self.logger = common_utils.get_logger(\n logger_name=\"core_manager\",\n log_level=self.log_level,\n handler=os.path.join(self.log_dir, \"core_manager.log\")\n )\n\n self.logger.info(f\"CoreManager is listening on {self.core_manager_addr}\")\n self._core_manager = zmq_utils.ZMQServer(\n addr=self.core_manager_addr,\n )\n time.sleep(1)\n\n self.logger.info(f\"GPUManager is listening on {self.gpu_manager_addr}\")\n self._gpu_manager = zmq_utils.ZMQServer(\n addr=self.gpu_manager_addr,\n )\n\n self.logger.info(f\"TaskManager is listening on {self.task_manager_addr}\")\n self._task_manager = zmq_utils.ZMQServer(\n addr=self.task_manager_addr,\n )\n\n self.watched_gpus = {}\n self.watched_tasks = {}\n\n pycuda_drv.init()\n self.running = True\n\n def run(self) -> None:\n self._init_manager()\n while self.running:\n identity, msg = self._core_manager.recv_binary()\n command = common_utils.byte_msg_to_dict(msg)\n self.logger.info(f\"receive command to call {command['function']}\")\n return_msg = self.exception_wrapper(\n fn=getattr(self, command[\"function\"], self._default_fn),\n *command.get(\"args\", {}),\n **command.get(\"kwargs\", {})\n )\n self._core_manager.send_binary(\n any=common_utils.dict_to_byte_msg(return_msg),\n identity=identity\n )\n\n def exception_wrapper(self, fn, *args, **kwargs) -> Dict[str, Any]:\n try:\n return fn(*args, **kwargs)\n except Exception as e:\n self.logger.error(f\"Exception when call {fn.__name__}\")\n self.logger.exception(e)\n return {\n \"status\": 400,\n \"result\": f\"Exception when call {fn.__name__}, the excption is \" + str(e)\n }\n \n def _default_fn(self, *args, **kwargs) -> None:\n raise NotImplementedError(\"This function is not implemented\")\n\n def exit(self) -> Dict[str, Any]:\n self.logger.info(\"=> [info] exit core server...\")\n self.running = False\n return_msg = {\n \"watched_gpus\": {},\n \"watched_tasks\": {}\n }\n for identity in self.watched_gpus.keys():\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"exit\"\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[\"watched_gpus\"][identity] = msg\n for identity in self.watched_tasks.keys():\n self._task_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"exit\"\n }),\n identity=identity\n )\n identity_, msg = self._task_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[\"watched_tasks\"][identity] = msg\n return {\n \"status\": 200,\n \"result\": {\n \"msg\": \"👋bye~\",\n \"watched_gpus\": return_msg\n }\n }\n\n def get_gpus_info_by_identities(self, identities: List[str], info_level: str=\"simple\") -> Dict[str, Any]:\n if len(identities) == 0:\n identities = list(self.watched_gpus.keys())\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n\n return_msg = {}\n for identity in identities:\n if identity not in self.watched_gpus.keys():\n return_msg[identity] = {\n \"status\": 400,\n \"result\": f\"Could not find a watch dog with identity {identity}\"\n }\n else:\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"get_gpu_info\",\n \"kwargs\": {\n \"info_level\": info_level\n }\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[identity] = msg\n return {\n \"status\": 200,\n \"result\": return_msg\n }\n \n def get_gpus_info_by_device_ids(self, device_ids: List[int], info_level: str=\"simple\") -> Dict[str, Any]:\n if len(device_ids) == 0:\n device_ids = list(range(pycuda_drv.Device.count()))\n assert len(device_ids) == len(set(device_ids)), \"device_ids should not contain duplicate elements\"\n assert all([device_id >= 0 and device_id < pycuda_drv.Device.count() for device_id in device_ids]), \\\n \"The device_id should be in the valid range\"\n watched_gpu_device_ids = {\n self.watched_gpus[identity][\"device_id\"]: identity\n for identity in self.watched_gpus.keys()\n if self.watched_gpus[identity][\"device_id\"] in device_ids\n }\n unwatched_gpus = sorted(list(set(device_ids) - watched_gpu_device_ids.keys()))\n\n return_msg = {}\n for device_id in watched_gpu_device_ids.keys():\n identity = watched_gpu_device_ids[device_id]\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"get_gpu_info\",\n \"kwargs\": {\n \"info_level\": info_level\n }\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[identity] = msg\n \n return_msg[\"unwatched\"] = []\n for device_id in unwatched_gpus:\n gpu_device = pycuda_drv.Device(device_id)\n device_msg = {\n \"device_id\": device_id,\n \"device_name\": gpu_device.name(),\n \"total_memory\": common_utils.fmt_bytes(gpu_device.total_memory()),\n \"compute_capability\": float(\"%d.%d\" % gpu_device.compute_capability()),\n }\n if info_level != \"simple\":\n device_attributes_tuples = gpu_device.get_attributes().items()\n device_attributes = {}\n\n for k, v in device_attributes_tuples:\n device_attributes[str(k)] = v\n device_msg[\"device_attributes\"] = device_attributes\n return_msg[\"unwatched\"].append(device_msg)\n \n return {\n \"status\": 200,\n \"result\": return_msg\n }\n \n def start_watch_dog_by_device_ids(self, device_ids: List[int]) -> Dict[str, Any]:\n assert len(device_ids) > 0, \"device_ids should not be empty\"\n assert len(device_ids) == len(set(device_ids)), \"device_ids should not contain duplicate elements\"\n assert all([device_id >= 0 and device_id < pycuda_drv.Device.count() for device_id in device_ids]), \\\n \"The device_id should be in the valid range\"\n watched_gpu_device_ids = {\n self.watched_gpus[identity][\"device_id\"]: identity\n for identity in self.watched_gpus.keys()\n if self.watched_gpus[identity][\"device_id\"] in device_ids\n }\n return_msg = {}\n for device_id in device_ids:\n if device_id in watched_gpu_device_ids.keys():\n return_msg[watched_gpu_device_ids[device_id]] = {\n \"status\": 400,\n \"result\": f\"GPU{device_id} is already being watched by {watched_gpu_device_ids[device_id]}\"\n }\n else:\n timestamp = str(time.time())\n identity = common_utils.md5(f\"watch_dog_{device_id}_{timestamp}\")\n watchdog = GPUManager(\n identity=identity,\n device_id=device_id,\n gpu_manager_addr=self.gpu_manager_addr,\n )\n watchdog.start()\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n self.watched_gpus[identity] = {\n \"device_id\": device_id,\n \"watchdog\": watchdog,\n \"timestamp\": timestamp\n }\n return_msg[identity] = msg\n \n return {\n \"status\": 200,\n \"result\": return_msg\n }\n\n def stop_watch_dog_by_identities(self, identities: List[str]) -> Dict[str, Any]:\n if len(identities) == 0:\n identities = list(self.watched_gpus.keys())\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n \n return_msg = {}\n for identity in identities:\n if identity not in self.watched_gpus.keys():\n return_msg[identity] = {\n \"status\": 400,\n \"result\": f\"Could not find a watch dog with identity {identity}\"\n }\n else:\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"exit\",\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n self.watched_gpus[identity][\"watchdog\"].join()\n del self.watched_gpus[identity]\n return_msg[identity] = msg\n return {\n \"status\": 200,\n \"result\": return_msg\n }\n \n def mem_alloc_by_identities(self, identities: List[str], chunk_sizes: List[int], max_sizes: List[int], units: List[str]) -> Dict[str, Any]:\n assert len(identities) == len(chunk_sizes) == len(max_sizes) == len(units), \"The lengths of identities, chunk_sizes, max_sizes and units should be equal\"\n assert all([unit in [\"B\", \"KiB\", \"MiB\", \"GiB\"] for unit in units]), \"The unit should be one of B, KiB, MiB and GiB\"\n assert all([chunk_size > 0 for chunk_size in chunk_sizes]), \"The chunk_size should be positive\"\n assert all([max_size > 0 for max_size in max_sizes]), \"The max_size should be positive\"\n assert all([chunk_size <= max_size for chunk_size, max_size in zip(chunk_sizes, max_sizes)]), \"The chunk_size should be less than or equal to max_size\"\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n assert all([identity in self.watched_gpus.keys() for identity in identities]), \"The identity should be in the valid range\"\n return_msg = {}\n for identity, chunk_size, max_size, unit in zip(identities, chunk_sizes, max_sizes, units):\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"mem_alloc\",\n \"kwargs\": {\n \"chunk_size\": chunk_size,\n \"max_size\": max_size,\n \"unit\": unit,\n }\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[identity] = msg\n return {\n \"status\": 200,\n \"result\": return_msg\n }\n \n def mem_release_by_identities(self, identities: List[str], mem_sizes: List[int], units: List[str]) -> Dict[str, Any]:\n if len(identities) == 0:\n identities = list(self.watched_gpus.keys())\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n assert len(identities) == len(mem_sizes) == len(units), \"The lengths of identities, mem_sizes and units should be equal\"\n assert all([identity in self.watched_gpus.keys() for identity in identities]), \"The identity should be in the valid range\"\n assert all([mem_size > 0 for mem_size in mem_sizes]), \"The mem_size should be positive\"\n assert all([unit in [\"B\", \"KiB\", \"MiB\", \"GiB\"] for unit in units]), \"The unit should be one of B, KiB, MiB and GiB\"\n return_msg = {}\n for identity, mem_size, unit in zip(identities, mem_sizes, units):\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"mem_release\",\n \"kwargs\": {\n \"mem_size\": mem_size,\n \"unit\": unit,\n }\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[identity] = msg\n return {\n \"status\": 200,\n \"result\": return_msg\n }\n \n def start_preemptive_by_identities(self, identities, chunk_sizes: List[int], max_sizes: List[int], units: List[str], auto_close: bool=False) -> Dict[str, Any]:\n assert len(identities) == len(chunk_sizes) == len(max_sizes) == len(units), \"The lengths of identities, chunk_sizes, max_sizes and units should be equal\"\n assert all([unit in [\"B\", \"KiB\", \"MiB\", \"GiB\"] for unit in units]), \"The unit should be one of B, KiB, MiB and GiB\"\n assert all([chunk_size > 0 for chunk_size in chunk_sizes]), \"The chunk_size should be positive\"\n assert all([max_size > 0 for max_size in max_sizes]), \"The max_size should be positive\"\n assert all([chunk_size <= max_size for chunk_size, max_size in zip(chunk_sizes, max_sizes)]), \"The chunk_size should be less than or equal to max_size\"\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n assert all([identity in self.watched_gpus.keys() for identity in identities]), \"The identity should be in the valid range\"\n return_msg = {}\n for identity, chunk_size, max_size, unit in zip(identities, chunk_sizes, max_sizes, units):\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"start_preemptive\",\n \"kwargs\": {\n \"chunk_size\": chunk_size,\n \"max_size\": max_size,\n \"unit\": unit,\n \"auto_close\": auto_close,\n }\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[identity] = msg\n return {\n \"status\": 200,\n \"result\": return_msg\n }\n \n def stop_preemptive_by_identities(self, identities: List[str]) -> Dict[str, Any]:\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n assert all([identity in self.watched_gpus.keys() for identity in identities]), \"The identity should be in the valid range\"\n return_msg = {}\n for identity in identities:\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"stop_preemptive\",\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[identity] = msg\n return {\n \"status\": 200,\n \"result\": return_msg\n }\n\n def add_task(self, user_args: List[str], stdout_file: str, stderr_file: str) -> Dict[str, Any]:\n assert len(user_args) > 0, \"user_args should not be empty\"\n timestamp = str(time.time())\n identity = common_utils.md5(\"_\".join(user_args) + timestamp)\n watchdog = TaskManager(\n identity=identity,\n core_manager_addr=self.core_manager_addr,\n task_manager_addr=self.task_manager_addr,\n user_args=user_args,\n stdout_file=stdout_file,\n stderr_file=stderr_file\n )\n watchdog.start()\n identity_, msg = self._task_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n self.watched_tasks[identity] = {\n \"watchdog\": watchdog,\n \"user_args\": user_args,\n \"timestamp\": timestamp,\n }\n return {\n \"status\": 200,\n \"result\": {\n identity: msg\n }\n }\n\n def remove_task_by_task_daemon(self, identity: str, msg: str, return_code: int) -> Dict[str, Any]:\n assert identity in self.watched_tasks.keys(), \"The identity should be in the valid range\"\n self._task_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"exit\",\n }),\n identity=identity\n )\n identity_, msg = self._task_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n self.watched_tasks[identity][\"watchdog\"].join()\n del self.watched_tasks[identity]\n return {\n \"status\": 200,\n \"result\": {\n \"identity\": identity,\n \"msg\": msg,\n \"return_code\": return_code,\n }\n }\n\n def remove_tasks(self, identities: List[str]) -> Dict[str, Any]:\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n assert all([identity in self.watched_tasks.keys() for identity in identities]), \"The identity should be in the valid range\"\n return_msg = {}\n for identity in identities:\n self._task_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"exit\",\n }),\n identity=identity\n )\n identity_, msg = self._task_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[identity] = msg\n self.watched_tasks[identity][\"watchdog\"].join()\n del self.watched_tasks[identity]\n return {\n \"status\": 200,\n \"result\": return_msg\n }\n\n def get_task_info_by_identities(self, identities: List[str]) -> Dict[str, Any]:\n if len(identities) == 0:\n identities = list(self.watched_tasks.keys())\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n assert all([identity in self.watched_tasks.keys() for identity in identities]), \"The identity should be in the valid range\"\n return_msg = {}\n for identity in identities:\n self._task_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"get_status\",\n }),\n identity=identity\n )\n identity_, msg = self._task_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n msg[\"result\"][\"user_args\"] = self.watched_tasks[identity][\"user_args\"]\n msg[\"result\"][\"timestamp\"] = self.watched_tasks[identity][\"timestamp\"]\n return_msg[identity] = msg\n return {\n \"status\": 200,\n \"result\": return_msg\n }"
},
{
"identifier": "CLIController",
"path": "task_manager/controller/cli_controller.py",
"snippet": "class CLIController(cmd2.Cmd):\n\n def __init__(\n self,\n core_manager_addr: str,\n log_dir: str=\"logs\",\n log_level: str=\"INFO\",\n ):\n super().__init__()\n self.prompt = \"(🚀task_manager)> \"\n self.core_manager_addr = core_manager_addr\n self.log_dir = log_dir\n self.log_level = log_level\n self.logger = None\n self.client = None\n self.identity = \"cli_controller\"\n\n self._init_controller()\n\n def _init_controller(self):\n self.logger = common_utils.get_logger(\n logger_name=\"cli_controller\",\n log_level=self.log_level,\n handler=os.path.join(self.log_dir, \"cli_controller.log\")\n )\n\n self.logger.info(\"init core client\")\n self.client = zmq_utils.ZMQClient(\n addr=self.core_manager_addr,\n identity=self.identity\n )\n time.sleep(1)\n\n @cmd2.with_argparser(cmd2.Cmd2ArgumentParser())\n def do_exit(self, args):\n \"\"\"Exit the application.\"\"\"\n self.logger.info(\"=> [info] exit cli server...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"exit\",\n \"kwargs\": {},\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n self.client.close()\n return True\n\n ggibi_parser = cmd2.Cmd2ArgumentParser()\n ggibi_parser.add_argument(\"--identities\", type=str, nargs=\"+\", default=[], help=\"identities\")\n ggibi_parser.add_argument(\"--info_level\", type=str, default=\"simple\", help=\"simple or detail\", choices=[\"simple\", \"detail\"])\n @cmd2.with_argparser(ggibi_parser)\n def do_get_gpus_info_by_identities(self, args):\n \"\"\"Get gpu information by identities.\"\"\"\n self.logger.info(\"=> [info] get gpu information by identities...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"get_gpus_info_by_identities\",\n \"kwargs\": {\n \"identities\": args.identities,\n \"info_level\": args.info_level\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n ggibdi_parser = cmd2.Cmd2ArgumentParser()\n ggibdi_parser.add_argument(\"--device_ids\", type=int, nargs=\"+\", default=[], help=\"device ids\")\n ggibdi_parser.add_argument(\"--info_level\", type=str, default=\"simple\", help=\"simple or detail\", choices=[\"simple\", \"detail\"])\n @cmd2.with_argparser(ggibdi_parser)\n def do_get_gpus_info_by_device_ids(self, args):\n \"\"\"Get gpu information.\"\"\"\n self.logger.info(\"=> [info] get gpu information...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"get_gpus_info_by_device_ids\",\n \"kwargs\": {\n \"device_ids\": args.device_ids,\n \"info_level\": args.info_level\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n start_wdbdi_parser = cmd2.Cmd2ArgumentParser()\n start_wdbdi_parser.add_argument(\"--device_ids\", type=int, nargs=\"+\", help=\"device ids\")\n @cmd2.with_argparser(start_wdbdi_parser)\n def do_start_watch_dog_by_device_ids(self, args):\n \"\"\"Start watch dog by device ids.\"\"\"\n self.logger.info(\"=> [info] start watch dog by device ids...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"start_watch_dog_by_device_ids\",\n \"kwargs\": {\n \"device_ids\": args.device_ids\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n stop_wdbi_parser = cmd2.Cmd2ArgumentParser()\n stop_wdbi_parser.add_argument(\"--identities\", type=str, nargs=\"+\", default=[], help=\"identities\")\n @cmd2.with_argparser(stop_wdbi_parser)\n def do_stop_watch_dog_by_identities(self, args):\n \"\"\"Stop watch dog by identities.\"\"\"\n self.logger.info(\"=> [info] stop watch dog by identities...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"stop_watch_dog_by_identities\",\n \"kwargs\": {\n \"identities\": args.identities\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n mabi_parser = cmd2.Cmd2ArgumentParser()\n mabi_parser.add_argument(\"--identities\", type=str, nargs=\"+\", help=\"device ids\")\n mabi_parser.add_argument(\"--chunk_sizes\", type=int, nargs=\"+\", help=\"chun size\")\n mabi_parser.add_argument(\"--max_sizes\", type=int, nargs=\"+\", help=\"max size\")\n mabi_parser.add_argument(\"--units\", type=str, nargs=\"+\", help=\"unit\", choices=[\"B\", \"KiB\", \"MiB\", \"GiB\"])\n @cmd2.with_argparser(mabi_parser)\n def do_mem_alloc_by_identities(self, args):\n \"\"\"Memory allocation by identities.\"\"\"\n self.logger.info(\"=> [info] memory allocation by identities...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"mem_alloc_by_identities\",\n \"kwargs\": {\n \"identities\": args.identities,\n \"chunk_sizes\": args.chunk_sizes,\n \"max_sizes\": args.max_sizes,\n \"units\": args.units,\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n mrbi_parser = cmd2.Cmd2ArgumentParser()\n mrbi_parser.add_argument(\"--identities\", type=str, nargs=\"+\", help=\"device ids\")\n mrbi_parser.add_argument(\"--mem_sizes\", type=int, nargs=\"+\", help=\"chun size\")\n mrbi_parser.add_argument(\"--units\", type=str, nargs=\"+\", help=\"unit\", choices=[\"B\", \"KiB\", \"MiB\", \"GiB\"])\n @cmd2.with_argparser(mrbi_parser)\n def do_mem_release_by_identities(self, args):\n \"\"\"Memory release by identities.\"\"\"\n self.logger.info(\"=> [info] memory release by identities...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"mem_release_by_identities\",\n \"kwargs\": {\n \"identities\": args.identities,\n \"mem_sizes\": args.mem_sizes,\n \"units\": args.units,\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n start_pbi_parser = cmd2.Cmd2ArgumentParser()\n start_pbi_parser.add_argument(\"--identities\", type=str, nargs=\"+\", help=\"device ids\")\n start_pbi_parser.add_argument(\"--chunk_sizes\", type=int, nargs=\"+\", help=\"chun size\")\n start_pbi_parser.add_argument(\"--max_sizes\", type=int, nargs=\"+\", help=\"max size\")\n start_pbi_parser.add_argument(\"--units\", type=str, nargs=\"+\", help=\"unit\", choices=[\"B\", \"KiB\", \"MiB\", \"GiB\"])\n start_pbi_parser.add_argument(\"--auto_close\", action=\"store_true\", help=\"auto close\")\n @cmd2.with_argparser(start_pbi_parser)\n def do_start_preemptive_by_identities(self, args):\n \"\"\"Start preemptive by identities.\"\"\"\n self.logger.info(\"=> [info] start preemptive...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"start_preemptive_by_identities\",\n \"kwargs\": {\n \"identities\": args.identities,\n \"chunk_sizes\": args.chunk_sizes,\n \"max_sizes\": args.max_sizes,\n \"units\": args.units,\n \"auto_close\": args.auto_close,\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n stop_pbi_parser = cmd2.Cmd2ArgumentParser()\n stop_pbi_parser.add_argument(\"--identities\", type=str, nargs=\"+\", help=\"device ids\")\n @cmd2.with_argparser(stop_pbi_parser)\n def do_stop_preemptive_by_identities(self, args):\n \"\"\"Stop preemptive by identities.\"\"\"\n self.logger.info(\"=> [info] stop preemptive...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"stop_preemptive_by_identities\",\n \"kwargs\": {\n \"identities\": args.identities,\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n at_parser = cmd2.Cmd2ArgumentParser()\n at_parser.add_argument(\"--stdout_file\", type=str, completer=cmd2.Cmd.path_complete, required=True, help=\"stdout file\")\n at_parser.add_argument(\"--stderr_file\", type=str, completer=cmd2.Cmd.path_complete, default=None, help=\"stderr file\")\n at_parser.add_argument(\"user_args\", nargs=argparse.REMAINDER, completer=cmd2.Cmd.path_complete, help=\"user args\")\n @cmd2.with_argparser(at_parser)\n def do_add_task(self, args):\n \"\"\"Add task.\"\"\"\n self.logger.info(\"=> [info] add task...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"add_task\",\n \"kwargs\": {\n \"user_args\": args.user_args,\n \"stdout_file\": args.stdout_file,\n \"stderr_file\": args.stderr_file if args.stderr_file is not None else args.stdout_file,\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n rt_parser = cmd2.Cmd2ArgumentParser()\n rt_parser.add_argument(\"--identities\", type=str, nargs=\"+\", default=[], help=\"task id\")\n @cmd2.with_argparser(rt_parser)\n def do_remove_tasks(self, args):\n \"\"\"Remove tasks.\"\"\"\n self.logger.info(\"=> [info] remove tasks...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"remove_tasks\",\n \"kwargs\": {\n \"identities\": args.identities,\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n gtibi_parser = cmd2.Cmd2ArgumentParser()\n gtibi_parser.add_argument(\"--identities\", type=str, nargs=\"+\", default=[], help=\"task id\")\n @cmd2.with_argparser(gtibi_parser)\n def do_get_task_info_by_identities(self, args):\n \"\"\"Get task info by identities.\"\"\"\n self.logger.info(\"=> [info] Get task info by identities...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"get_task_info_by_identities\",\n \"kwargs\": {\n \"identities\": args.identities,\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)"
}
] | import os
import sys
import rich
import time
import argparse
import multiprocessing as mp
import task_manager.utils.common_utils as common_utils
from task_manager.manager.core import CoreManager
from task_manager.controller.cli_controller import CLIController | 8,115 |
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method("spawn")
else:
assert mp.get_start_method() == "spawn", "Only support spawn start method"
def parse_args():
identity_id = common_utils.md5(str(time.time()))
parser = argparse.ArgumentParser()
parser.add_argument("--log_dir", default="logs", help="Log dir")
parser.add_argument("--log_level", default="INFO", help="Log level")
parser.add_argument("--web_controller", action="store_true", help="Whether start web gui to watch GPU usage&Tasks")
parser.add_argument(
"--core_manager_addr",
type=str,
default=f"ipc:///tmp/core_manager-{identity_id}.sock",
help="Address to run Core manager on"
)
parser.add_argument(
"--gpu_manager_addr",
type=str,
default=f"ipc:///tmp/gpu_manager-{identity_id}.sock",
help="Address to run GPU manager on"
)
parser.add_argument(
"--task_manager_addr",
type=str,
default=f"ipc:///tmp/task_manager-{identity_id}.sock",
help="Address to run Task manager on"
)
args = parser.parse_args()
os.makedirs(args.log_dir, exist_ok=True)
sys.argv = sys.argv[:1]
return args
def start_core_manager(args):
|
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method("spawn")
else:
assert mp.get_start_method() == "spawn", "Only support spawn start method"
def parse_args():
identity_id = common_utils.md5(str(time.time()))
parser = argparse.ArgumentParser()
parser.add_argument("--log_dir", default="logs", help="Log dir")
parser.add_argument("--log_level", default="INFO", help="Log level")
parser.add_argument("--web_controller", action="store_true", help="Whether start web gui to watch GPU usage&Tasks")
parser.add_argument(
"--core_manager_addr",
type=str,
default=f"ipc:///tmp/core_manager-{identity_id}.sock",
help="Address to run Core manager on"
)
parser.add_argument(
"--gpu_manager_addr",
type=str,
default=f"ipc:///tmp/gpu_manager-{identity_id}.sock",
help="Address to run GPU manager on"
)
parser.add_argument(
"--task_manager_addr",
type=str,
default=f"ipc:///tmp/task_manager-{identity_id}.sock",
help="Address to run Task manager on"
)
args = parser.parse_args()
os.makedirs(args.log_dir, exist_ok=True)
sys.argv = sys.argv[:1]
return args
def start_core_manager(args): | core_manager = CoreManager( | 0 | 2023-12-30 11:47:06+00:00 | 12k |
Shaokang-Agent/S2L | marlgrid/base.py | [
{
"identifier": "WorldObj",
"path": "marlgrid/objects.py",
"snippet": "class WorldObj(metaclass=RegisteredObjectType):\n def __init__(self, color=\"worst\", state=0):\n self.color = color\n self.state = state\n self.contains = None\n\n self.agents = [] # Some objects can have agents on top (e.g. floor, open doors, etc).\n \n self.pos_init = None\n self.pos = None\n self.is_agent = False\n\n @property\n def dir(self):\n return None\n\n def set_position(self, pos):\n if self.pos_init is None:\n self.pos_init = pos\n self.pos = pos\n\n @property\n def numeric_color(self):\n return COLORS[self.color]\n \n @property\n def type(self):\n return self.__class__.__name__\n\n def can_overlap(self):\n return False\n\n def can_pickup(self):\n return False\n\n def can_contain(self):\n return False\n\n def see_behind(self):\n return True\n\n def toggle(self, env, pos):\n return False\n\n def encode(self, str_class=False):\n # Note 5/29/20: Commented out the condition below; was causing agents to \n # render incorrectly in partial views. In particular, if there were N red agents,\n # agents {i != k} would render as blue (rather than red) in agent k's partial view.\n # # if len(self.agents)>0:\n # # return self.agents[0].encode(str_class=str_class)\n # # else:\n enc_class = self.type if bool(str_class) else self.recursive_subclasses().index(self.__class__)\n enc_color = self.color if isinstance(self.color, int) else COLOR_TO_IDX[self.color]\n return (enc_class, enc_color, self.state)\n\n def describe(self):\n return f\"Obj: {self.type}({self.color}, {self.state})\"\n\n @classmethod\n def decode(cls, type, color, state):\n if isinstance(type, str):\n cls_subclasses = {c.__name__: c for c in cls.recursive_subclasses()}\n if type not in cls_subclasses:\n raise ValueError(\n f\"Not sure how to construct a {cls} of (sub)type {type}\"\n )\n return cls_subclasses[type](color, state)\n elif isinstance(type, int):\n subclass = cls.recursive_subclasses()[type]\n return subclass(color, state)\n\n def render(self, img):\n raise NotImplementedError\n\n def str_render(self, dir=0):\n return \"??\""
},
{
"identifier": "Wall",
"path": "marlgrid/objects.py",
"snippet": "class Wall(BulkObj):\n def see_behind(self):\n return False\n\n def str_render(self, dir=0):\n return \"WW\"\n\n def render(self, img):\n fill_coords(img, point_in_rect(0, 1, 0, 1), COLORS[self.color])"
},
{
"identifier": "Goal",
"path": "marlgrid/objects.py",
"snippet": "class Goal(WorldObj):\n def __init__(self, reward, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.reward = reward\n\n def can_overlap(self):\n return True\n\n def get_reward(self, agent):\n return self.reward\n\n def str_render(self, dir=0):\n return \"GG\"\n\n def render(self, img):\n fill_coords(img, point_in_rect(0, 1, 0, 1), COLORS[self.color])"
},
{
"identifier": "Lava",
"path": "marlgrid/objects.py",
"snippet": "class Lava(WorldObj):\n def can_overlap(self):\n return True# and self.agent is None\n\n def str_render(self, dir=0):\n return \"VV\"\n\n def render(self, img):\n c = (255, 128, 0)\n\n # Background color\n fill_coords(img, point_in_rect(0, 1, 0, 1), c)\n\n # Little waves\n for i in range(3):\n ylo = 0.3 + 0.2 * i\n yhi = 0.4 + 0.2 * i\n fill_coords(img, point_in_line(0.1, ylo, 0.3, yhi, r=0.03), (0, 0, 0))\n fill_coords(img, point_in_line(0.3, yhi, 0.5, ylo, r=0.03), (0, 0, 0))\n fill_coords(img, point_in_line(0.5, ylo, 0.7, yhi, r=0.03), (0, 0, 0))\n fill_coords(img, point_in_line(0.7, yhi, 0.9, ylo, r=0.03), (0, 0, 0))"
},
{
"identifier": "GridAgent",
"path": "marlgrid/objects.py",
"snippet": "class GridAgent(WorldObj):\n def __init__(self, *args, color='red', **kwargs):\n super().__init__(*args, **{'color':color, **kwargs})\n self.metadata = {\n 'color': color,\n }\n self.is_agent = True\n\n @property\n def dir(self):\n return self.state % 4\n\n @property\n def type(self):\n return 'Agent'\n\n @dir.setter\n def dir(self, dir):\n self.state = self.state // 4 + dir % 4\n\n def str_render(self, dir=0):\n return [\">>\", \"VV\", \"<<\", \"^^\"][(self.dir + dir) % 4]\n\n def can_overlap(self):\n return True\n\n def render(self, img):\n tri_fn = point_in_triangle((0.12, 0.19), (0.87, 0.50), (0.12, 0.81),)\n tri_fn = rotate_fn(tri_fn, cx=0.5, cy=0.5, theta=0.5 * np.pi * (self.dir))\n fill_coords(img, tri_fn, COLORS[self.color])"
},
{
"identifier": "BonusTile",
"path": "marlgrid/objects.py",
"snippet": "class BonusTile(WorldObj):\n def __init__(self, reward, penalty=-0.1, bonus_id=0, n_bonus=1, initial_reward=True, reset_on_mistake=False, color='yellow', *args, **kwargs):\n super().__init__(*args, **{'color': color, **kwargs, 'state': bonus_id})\n self.reward = reward\n self.penalty = penalty\n self.n_bonus = n_bonus\n self.bonus_id = bonus_id\n self.initial_reward = initial_reward\n self.reset_on_mistake = reset_on_mistake\n\n def can_overlap(self):\n return True\n\n def str_render(self, dir=0):\n return \"BB\"\n\n def get_reward(self, agent):\n # If the agent hasn't hit any bonus tiles, set its bonus state so that\n # it'll get a reward from hitting this tile.\n if self.n_bonus == 1:\n return self.reward\n else:\n first_bonus = False\n if agent.bonus_state is None:\n agent.bonus_state = (self.bonus_id - 1) % self.n_bonus\n first_bonus = True\n if agent.bonus_state == self.bonus_id:\n # This is the last bonus tile the agent hit\n rew = -np.abs(self.penalty)\n elif (agent.bonus_state + 1)%self.n_bonus == self.bonus_id:\n # The agent hit the previous bonus tile before this one\n agent.bonus_state = self.bonus_id\n # rew = agent.bonus_value\n rew = self.reward\n else:\n # The agent hit any other bonus tile before this one\n rew = -np.abs(self.penalty)\n\n if self.reset_on_mistake:\n agent.bonus_state = self.bonus_id\n\n if first_bonus and not bool(self.initial_reward):\n return 0\n else:\n return rew\n\n def render(self, img):\n fill_coords(img, point_in_rect(0, 1, 0, 1), COLORS[self.color])"
},
{
"identifier": "BulkObj",
"path": "marlgrid/objects.py",
"snippet": "class BulkObj(WorldObj, metaclass=RegisteredObjectType):\n # Todo: special behavior for hash, eq if the object has an agent.\n def __hash__(self):\n return hash((self.__class__, self.color, self.state, tuple(self.agents)))\n\n def __eq__(self, other):\n return hash(self) == hash(other)"
},
{
"identifier": "COLORS",
"path": "marlgrid/objects.py",
"snippet": "COLORS = {\n \"red\": np.array([255, 0, 0]),\n \"orange\": np.array([255, 165, 0]),\n \"green\": np.array([0, 255, 0]),\n \"blue\": np.array([0, 0, 255]),\n \"cyan\": np.array([0, 139, 139]),\n \"purple\": np.array([112, 39, 195]),\n \"yellow\": np.array([255, 255, 0]),\n \"olive\": np.array([128, 128, 0]),\n \"grey\": np.array([100, 100, 100]),\n \"worst\": np.array([74, 65, 42]), # https://en.wikipedia.org/wiki/Pantone_448_C\n \"pink\": np.array([255, 0, 189]),\n \"white\": np.array([255,255,255]),\n \"prestige\": np.array([255,255,255]),\n \"shadow\": np.array([35,25,30]), # nice dark purpley color for cells agents can't see.\n}"
},
{
"identifier": "GridAgentInterface",
"path": "marlgrid/agents.py",
"snippet": "class GridAgentInterface(GridAgent):\n class actions(IntEnum):\n left = 0 # Rotate left\n right = 1 # Rotate right\n forward = 2 # Move forward\n pickup = 3 # Pick up an object\n drop = 4 # Drop an object\n toggle = 5 # Toggle/activate an object\n done = 6 # Done completing task\n\n def __init__(\n self,\n view_size=7,\n view_tile_size=5,\n view_offset=0,\n observation_style='image',\n observe_rewards=False,\n observe_position=False,\n observe_orientation=False,\n restrict_actions=False,\n see_through_walls=False,\n hide_item_types=[],\n prestige_beta=0.95,\n prestige_scale=2,\n allow_negative_prestige=False,\n spawn_delay=0,\n **kwargs):\n super().__init__(**kwargs)\n\n self.view_size = view_size\n self.view_tile_size = view_tile_size\n self.view_offset = view_offset\n self.observation_style = observation_style\n self.observe_rewards = observe_rewards\n self.observe_position = observe_position\n self.observe_orientation = observe_orientation\n self.hide_item_types = hide_item_types\n self.see_through_walls = see_through_walls\n self.init_kwargs = kwargs\n self.restrict_actions = restrict_actions\n self.prestige_beta = prestige_beta\n self.prestige_scale = prestige_scale\n self.allow_negative_prestige = allow_negative_prestige\n self.spawn_delay = spawn_delay\n\n if self.prestige_beta > 1:\n # warnings.warn(\"prestige_beta must be between 0 and 1. Using default 0.99\")\n self.prestige_beta = 0.95\n \n image_space = gym.spaces.Box(\n low=0,\n high=255,\n shape=(view_tile_size * view_size, view_tile_size * view_size, 3),\n dtype=\"uint8\",\n )\n if observation_style == 'image':\n self.observation_space = image_space\n elif observation_style == 'rich':\n obs_space = {\n 'pov': image_space,\n }\n if self.observe_rewards:\n obs_space['reward'] = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32)\n if self.observe_position:\n obs_space['position'] = gym.spaces.Box(low=0, high=1, shape=(2,), dtype=np.float32)\n if self.observe_orientation:\n obs_space['orientation'] = gym.spaces.Discrete(n=4)\n self.observation_space = gym.spaces.Dict(obs_space)\n else:\n raise ValueError(f\"{self.__class__.__name__} kwarg 'observation_style' must be one of 'image', 'rich'.\")\n\n if self.restrict_actions:\n self.action_space = gym.spaces.Discrete(3)\n else:\n self.action_space = gym.spaces.Discrete(len(self.actions))\n\n self.metadata = {\n **self.metadata,\n 'view_size': view_size,\n 'view_tile_size': view_tile_size,\n }\n self.reset(new_episode=True)\n\n def render_post(self, tile):\n if not self.active:\n return tile\n\n blue = np.array([0,0,255])\n red = np.array([255,0,0])\n\n if self.color == 'prestige':\n # Compute a scaled prestige value between 0 and 1 that will be used to \n # interpolate between the low-prestige (red) and high-prestige (blue)\n # colors.\n if self.allow_negative_prestige:\n prestige_scaled = 1/(1 + np.exp(-self.prestige/self.prestige_scale))\n else:\n prestige_scaled = np.tanh(self.prestige/self.prestige_scale)\n\n new_color = (\n prestige_scaled * blue +\n (1.-prestige_scaled) * red\n ).astype(np.int)\n\n grey_pixels = (np.diff(tile, axis=-1)==0).all(axis=-1)\n\n alpha = tile[...,0].astype(np.uint16)[...,None]\n tile = np.right_shift(alpha * new_color, 8).astype(np.uint8)\n return tile\n else:\n return tile\n\n def clone(self):\n ret = self.__class__(\n view_size = self.view_size,\n view_offset=self.view_offset,\n view_tile_size = self.view_tile_size,\n observation_style = self.observation_style,\n observe_rewards = self.observe_rewards,\n observe_position = self.observe_position,\n observe_orientation = self.observe_orientation,\n hide_item_types = self.hide_item_types,\n restrict_actions = self.restrict_actions,\n see_through_walls=self.see_through_walls,\n prestige_beta = self.prestige_beta,\n prestige_scale = self.prestige_scale,\n allow_negative_prestige = self.allow_negative_prestige,\n spawn_delay = self.spawn_delay,\n **self.init_kwargs\n )\n return ret\n\n def on_step(self, obj):\n if isinstance(obj, BonusTile):\n self.bonuses.append((obj.bonus_id, self.prestige))\n self.prestige *= self.prestige_beta\n\n def reward(self, rew):\n if self.allow_negative_prestige:\n self.rew += rew\n else:\n if rew >= 0:\n self.prestige += rew\n else: # rew < 0\n self.prestige = 0\n\n def activate(self):\n self.active = True\n\n def deactivate(self):\n self.active = False\n\n def reset(self, new_episode=False):\n self.done = False\n self.active = False\n self.pos = None\n self.carrying = None\n self.mission = \"\"\n if new_episode:\n self.prestige = 0\n self.bonus_state = None\n self.bonuses = []\n\n def render(self, img):\n if self.active:\n super().render(img)\n\n @property\n def dir_vec(self):\n \"\"\"\n Get the direction vector for the agent, pointing in the direction\n of forward movement.\n \"\"\"\n assert self.dir >= 0 and self.dir < 4\n return np.array([[1, 0], [0, 1], [-1, 0], [0, -1]])[self.dir]\n\n @property\n def right_vec(self):\n \"\"\"\n Get the vector pointing to the right of the agent.\n \"\"\"\n dx, dy = self.dir_vec\n return np.array((-dy, dx))\n\n @property\n def front_pos(self):\n \"\"\"\n Get the position of the cell that is right in front of the agent\n \"\"\"\n return np.add(self.pos, self.dir_vec)\n\n def get_view_coords(self, i, j):\n \"\"\"\n Translate and rotate absolute grid coordinates (i, j) into the\n agent's partially observable view (sub-grid). Note that the resulting\n coordinates may be negative or outside of the agent's view size.\n \"\"\"\n\n ax, ay = self.pos\n dx, dy = self.dir_vec\n rx, ry = self.right_vec\n\n \n ax -= 2*self.view_offset*dx\n ay -= 2*self.view_offset*dy\n\n\n # Compute the absolute coordinates of the top-left view corner\n sz = self.view_size\n hs = self.view_size // 2\n tx = ax + (dx * (sz - 1)) - (rx * hs)\n ty = ay + (dy * (sz - 1)) - (ry * hs)\n\n lx = i - tx\n ly = j - ty\n\n # Project the coordinates of the object relative to the top-left\n # corner onto the agent's own coordinate system\n vx = rx * lx + ry * ly\n vy = -(dx * lx + dy * ly)\n\n return vx, vy\n\n \n def get_view_pos(self):\n return (self.view_size // 2, self.view_size - 1 - self.view_offset)\n\n\n def get_view_exts(self):\n \"\"\"\n Get the extents of the square set of tiles visible to the agent\n Note: the bottom extent indices are not included in the set\n \"\"\"\n\n dir = self.dir\n # Facing right\n if dir == 0: # 1\n topX = self.pos[0] - self.view_offset\n topY = self.pos[1] - self.view_size // 2\n # Facing down\n elif dir == 1: # 0\n topX = self.pos[0] - self.view_size // 2\n topY = self.pos[1] - self.view_offset\n # Facing left\n elif dir == 2: # 3\n topX = self.pos[0] - self.view_size + 1 + self.view_offset\n topY = self.pos[1] - self.view_size // 2\n # Facing up\n elif dir == 3: # 2\n topX = self.pos[0] - self.view_size // 2\n topY = self.pos[1] - self.view_size + 1 + self.view_offset\n else:\n assert False, \"invalid agent direction\"\n\n botX = topX + self.view_size\n botY = topY + self.view_size\n\n return (topX, topY, botX, botY)\n\n def relative_coords(self, x, y):\n \"\"\"\n Check if a grid position belongs to the agent's field of view, and returns the corresponding coordinates\n \"\"\"\n\n vx, vy = self.get_view_coords(x, y)\n\n if vx < 0 or vy < 0 or vx >= self.view_size or vy >= self.view_size:\n return None\n\n return vx, vy\n\n def in_view(self, x, y):\n \"\"\"\n check if a grid position is visible to the agent\n \"\"\"\n\n return self.relative_coords(x, y) is not None\n\n def sees(self, x, y):\n raise NotImplementedError\n\n def process_vis(self, opacity_grid):\n assert len(opacity_grid.shape) == 2\n if not self.see_through_walls:\n return occlude_mask(~opacity_grid, self.get_view_pos())\n else:\n return np.full(opacity_grid.shape, 1, dtype=np.bool)"
},
{
"identifier": "SimpleImageViewer",
"path": "marlgrid/rendering.py",
"snippet": "class SimpleImageViewer(object):\n def __init__(self, display=None, caption=None, maxwidth=500):\n self.window = None\n self.isopen = False\n self.display = display\n self.maxwidth = maxwidth\n self.caption = caption\n\n def imshow(self, arr):\n if self.window is None:\n height, width, _channels = arr.shape\n if width > self.maxwidth:\n scale = self.maxwidth / width\n width = int(scale * width)\n height = int(scale * height)\n self.window = pyglet.window.Window(width=width, height=height,\n display=self.display, vsync=False, resizable=True, caption=self.caption)\n self.width = width\n self.height = height\n self.isopen = True\n\n @self.window.event\n def on_resize(width, height):\n self.width = width\n self.height = height\n\n @self.window.event\n def on_close():\n self.isopen = False\n\n assert len(arr.shape) == 3, \"You passed in an image with the wrong number shape\"\n\n image = pyglet.image.ImageData(arr.shape[1], arr.shape[0],\n 'RGB', arr.tobytes(), pitch=arr.shape[1]*-3)\n gl.glTexParameteri(gl.GL_TEXTURE_2D,\n gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)\n texture = image.get_texture()\n\n aspect_ratio = arr.shape[1]/arr.shape[0]\n forced_width = min(self.width, self.height * aspect_ratio)\n texture.height = int(forced_width / aspect_ratio)\n texture.width = int(forced_width)\n\n self.window.clear()\n self.window.switch_to()\n self.window.dispatch_events()\n texture.blit(0, 0) # draw\n self.window.flip()\n \n def close(self):\n if self.isopen and sys.meta_path:\n # ^^^ check sys.meta_path to avoid 'ImportError: sys.meta_path is None, Python is likely shutting down'\n self.window.close()\n self.isopen = False\n\n def __del__(self):\n self.close()"
}
] | import gym
import numpy as np
import gym_minigrid
import math
import warnings
import pdb; pdb.set_trace()
from enum import IntEnum
from .objects import WorldObj, Wall, Goal, Lava, GridAgent, BonusTile, BulkObj, COLORS
from .agents import GridAgentInterface
from .rendering import SimpleImageViewer
from gym_minigrid.rendering import fill_coords, point_in_rect, downsample, highlight_img | 8,106 | if not agent.active:
# below, not sure orientation is correct but as of 6/27/2020 that doesn't matter because
# agent views are usually square and this grid won't be used for anything.
grid = MultiGrid((agent.view_size, agent.view_size), orientation=agent.dir+1)
vis_mask = np.zeros((agent.view_size, agent.view_size), dtype=np.bool)
return grid, vis_mask
topX, topY, botX, botY = agent.get_view_exts()
grid = self.grid.slice(
topX, topY, agent.view_size, agent.view_size, rot_k=agent.dir + 1
)
# Process occluders and visibility
# Note that this incurs some slight performance cost
vis_mask = agent.process_vis(grid.opacity)
# Warning about the rest of the function:
# Allows masking away objects that the agent isn't supposed to see.
# But breaks consistency between the states of the grid objects in the parial views
# and the grid objects overall.
if len(getattr(agent, 'hide_item_types', []))>0:
for i in range(grid.width):
for j in range(grid.height):
item = grid.get(i,j)
if (item is not None) and (item is not agent) and (item.type in agent.hide_item_types):
if len(item.agents) > 0:
grid.set(i,j,item.agents[0])
else:
grid.set(i,j,None)
return grid, vis_mask
def gen_agent_obs(self, agent):
"""
Generate the agent's view (partially observable, low-resolution encoding)
"""
grid, vis_mask = self.gen_obs_grid(agent)
grid_image = grid.render(tile_size=agent.view_tile_size, visible_mask=vis_mask, top_agent=agent)
if agent.observation_style=='image':
return grid_image
else:
ret = {'pov': grid_image}
if agent.observe_rewards:
ret['reward'] = getattr(agent, 'step_reward', 0)
if agent.observe_position:
agent_pos = agent.pos if agent.pos is not None else (0,0)
ret['position'] = np.array(agent_pos)/np.array([self.width, self.height], dtype=np.float)
if agent.observe_orientation:
agent_dir = agent.dir if agent.dir is not None else 0
ret['orientation'] = agent_dir
return ret
def gen_obs(self):
return [self.gen_agent_obs(agent) for agent in self.agents]
def __str__(self):
return self.grid.__str__()
def check_agent_position_integrity(self, title=''):
'''
This function checks whether each agent is present in the grid in exactly one place.
This is particularly helpful for validating the world state when ghost_mode=False and
agents can stack, since the logic for moving them around gets a bit messy.
Prints a message and drops into pdb if there's an inconsistency.
'''
agent_locs = [[] for _ in range(len(self.agents))]
for i in range(self.grid.width):
for j in range(self.grid.height):
x = self.grid.get(i,j)
for k,agent in enumerate(self.agents):
if x==agent:
agent_locs[k].append(('top', (i,j)))
if hasattr(x, 'agents') and agent in x.agents:
agent_locs[k].append(('stacked', (i,j)))
if not all([len(x)==1 for x in agent_locs]):
print(f"{title} > Failed integrity test!")
for a, al in zip(self.agents, agent_locs):
print(" > ", a.color,'-', al)
def step(self, actions):
# Spawn agents if it's time.
for agent in self.agents:
if not agent.active and not agent.done and self.step_count >= agent.spawn_delay:
self.place_obj(agent, **self.agent_spawn_kwargs)
agent.activate()
assert len(actions) == len(self.agents)
step_rewards = np.zeros((len(self.agents,)), dtype=np.float)
self.step_count += 1
iter_agents = list(enumerate(zip(self.agents, actions)))
iter_order = np.arange(len(iter_agents))
self.np_random.shuffle(iter_order)
for shuffled_ix in iter_order:
agent_no, (agent, action) = iter_agents[shuffled_ix]
agent.step_reward = 0
if agent.active:
cur_pos = agent.pos[:]
cur_cell = self.grid.get(*cur_pos)
fwd_pos = agent.front_pos[:]
fwd_cell = self.grid.get(*fwd_pos)
agent_moved = False
# Rotate left
if action == agent.actions.left:
agent.dir = (agent.dir - 1) % 4
# Rotate right
elif action == agent.actions.right:
agent.dir = (agent.dir + 1) % 4
# Move forward
elif action == agent.actions.forward:
# Under the follow conditions, the agent can move forward.
can_move = fwd_cell is None or fwd_cell.can_overlap()
| # Multi-agent gridworld.
# Based on MiniGrid: https://github.com/maximecb/gym-minigrid.
TILE_PIXELS = 32
class ObjectRegistry:
'''
This class contains dicts that map objects to numeric keys and vise versa.
Used so that grid worlds can represent objects using numerical arrays rather
than lists of lists of generic objects.
'''
def __init__(self, objs=[], max_num_objects=1000):
self.key_to_obj_map = {}
self.obj_to_key_map = {}
self.max_num_objects = max_num_objects
for obj in objs:
self.add_object(obj)
def get_next_key(self):
for k in range(self.max_num_objects):
if k not in self.key_to_obj_map:
break
else:
raise ValueError("Object registry full.")
return k
def __len__(self):
return len(self.id_to_obj_map)
def add_object(self, obj):
new_key = self.get_next_key()
self.key_to_obj_map[new_key] = obj
self.obj_to_key_map[obj] = new_key
return new_key
def contains_object(self, obj):
return obj in self.obj_to_key_map
def contains_key(self, key):
return key in self.key_to_obj_map
def get_key(self, obj):
if obj in self.obj_to_key_map:
return self.obj_to_key_map[obj]
else:
return self.add_object(obj)
# 5/4/2020 This gets called A LOT. Replaced calls to this function with direct dict gets
# in an attempt to speed things up. Probably didn't make a big difference.
def obj_of_key(self, key):
return self.key_to_obj_map[key]
def rotate_grid(grid, rot_k):
'''
This function basically replicates np.rot90 (with the correct args for rotating images).
But it's faster.
'''
rot_k = rot_k % 4
if rot_k==3:
return np.moveaxis(grid[:,::-1], 0, 1)
elif rot_k==1:
return np.moveaxis(grid[::-1,:], 0, 1)
elif rot_k==2:
return grid[::-1,::-1]
else:
return grid
class MultiGrid:
tile_cache = {}
def __init__(self, shape, obj_reg=None, orientation=0):
self.orientation = orientation
if isinstance(shape, tuple):
self.width, self.height = shape
self.grid = np.zeros((self.width, self.height), dtype=np.uint8) # w,h
elif isinstance(shape, np.ndarray):
self.width, self.height = shape.shape
self.grid = shape
else:
raise ValueError("Must create grid from shape tuple or array.")
if self.width < 3 or self.height < 3:
raise ValueError("Grid needs width, height >= 3")
self.obj_reg = ObjectRegistry(objs=[None]) if obj_reg is None else obj_reg
@property
def opacity(self):
transparent_fun = np.vectorize(lambda k: (self.obj_reg.key_to_obj_map[k].see_behind() if hasattr(self.obj_reg.key_to_obj_map[k], 'see_behind') else True))
return ~transparent_fun(self.grid)
def __getitem__(self, *args, **kwargs):
return self.__class__(
np.ndarray.__getitem__(self.grid, *args, **kwargs),
obj_reg=self.obj_reg,
orientation=self.orientation,
)
def rotate_left(self, k=1):
return self.__class__(
rotate_grid(self.grid, rot_k=k), # np.rot90(self.grid, k=k),
obj_reg=self.obj_reg,
orientation=(self.orientation - k) % 4,
)
def slice(self, topX, topY, width, height, rot_k=0):
"""
Get a subset of the grid
"""
sub_grid = self.__class__(
(width, height),
obj_reg=self.obj_reg,
orientation=(self.orientation - rot_k) % 4,
)
x_min = max(0, topX)
x_max = min(topX + width, self.width)
y_min = max(0, topY)
y_max = min(topY + height, self.height)
x_offset = x_min - topX
y_offset = y_min - topY
sub_grid.grid[
x_offset : x_max - x_min + x_offset, y_offset : y_max - y_min + y_offset
] = self.grid[x_min:x_max, y_min:y_max]
sub_grid.grid = rotate_grid(sub_grid.grid, rot_k)
sub_grid.width, sub_grid.height = sub_grid.grid.shape
return sub_grid
def set(self, i, j, obj):
assert i >= 0 and i < self.width
assert j >= 0 and j < self.height
self.grid[i, j] = self.obj_reg.get_key(obj)
def get(self, i, j):
assert i >= 0 and i < self.width
assert j >= 0 and j < self.height
return self.obj_reg.key_to_obj_map[self.grid[i, j]]
def horz_wall(self, x, y, length=None, obj_type=Wall):
if length is None:
length = self.width - x
for i in range(0, length):
self.set(x + i, y, obj_type())
def vert_wall(self, x, y, length=None, obj_type=Wall):
if length is None:
length = self.height - y
for j in range(0, length):
self.set(x, y + j, obj_type())
def wall_rect(self, x, y, w, h, obj_type=Wall):
self.horz_wall(x, y, w, obj_type=obj_type)
self.horz_wall(x, y + h - 1, w, obj_type=obj_type)
self.vert_wall(x, y, h, obj_type=obj_type)
self.vert_wall(x + w - 1, y, h, obj_type=obj_type)
def __str__(self):
render = (
lambda x: " "
if x is None or not hasattr(x, "str_render")
else x.str_render(dir=self.orientation)
)
hstars = "*" * (2 * self.width + 2)
return (
hstars
+ "\n"
+ "\n".join(
"*" + "".join(render(self.get(i, j)) for i in range(self.width)) + "*"
for j in range(self.height)
)
+ "\n"
+ hstars
)
def encode(self, vis_mask=None):
"""
Produce a compact numpy encoding of the grid
"""
if vis_mask is None:
vis_mask = np.ones((self.width, self.height), dtype=bool)
array = np.zeros((self.width, self.height, 3), dtype="uint8")
for i in range(self.width):
for j in range(self.height):
if vis_mask[i, j]:
v = self.get(i, j)
if v is None:
array[i, j, :] = 0
else:
array[i, j, :] = v.encode()
return array
@classmethod
def decode(cls, array):
raise NotImplementedError
width, height, channels = array.shape
assert channels == 3
vis_mask[i, j] = np.ones(shape=(width, height), dtype=np.bool)
grid = cls((width, height))
@classmethod
def cache_render_fun(cls, key, f, *args, **kwargs):
if key not in cls.tile_cache:
cls.tile_cache[key] = f(*args, **kwargs)
return np.copy(cls.tile_cache[key])
@classmethod
def cache_render_obj(cls, obj, tile_size, subdivs):
if obj is None:
return cls.cache_render_fun((tile_size, None), cls.empty_tile, tile_size, subdivs)
else:
img = cls.cache_render_fun(
(tile_size, obj.__class__.__name__, *obj.encode()),
cls.render_object, obj, tile_size, subdivs
)
if hasattr(obj, 'render_post'):
return obj.render_post(img)
else:
return img
@classmethod
def empty_tile(cls, tile_size, subdivs):
alpha = max(0, min(20, tile_size-10))
img = np.full((tile_size, tile_size, 3), alpha, dtype=np.uint8)
img[1:,:-1] = 0
return img
@classmethod
def render_object(cls, obj, tile_size, subdivs):
img = np.zeros((tile_size*subdivs,tile_size*subdivs, 3), dtype=np.uint8)
obj.render(img)
# if 'Agent' not in obj.type and len(obj.agents) > 0:
# obj.agents[0].render(img)
return downsample(img, subdivs).astype(np.uint8)
@classmethod
def blend_tiles(cls, img1, img2):
'''
This function renders one "tile" on top of another. Kinda janky, works surprisingly well.
Assumes img2 is a downscaled monochromatic with a black (0,0,0) background.
'''
alpha = img2.sum(2, keepdims=True)
max_alpha = alpha.max()
if max_alpha == 0:
return img1
return (
((img1 * (max_alpha-alpha))+(img2*alpha)
)/max_alpha
).astype(img1.dtype)
@classmethod
def render_tile(cls, obj, tile_size=TILE_PIXELS, subdivs=3, top_agent=None):
subdivs = 3
if obj is None:
img = cls.cache_render_obj(obj, tile_size, subdivs)
else:
if ('Agent' in obj.type) and (top_agent in obj.agents):
# If the tile is a stack of agents that includes the top agent, then just render the top agent.
img = cls.cache_render_obj(top_agent, tile_size, subdivs)
else:
# Otherwise, render (+ downsize) the item in the tile.
img = cls.cache_render_obj(obj, tile_size, subdivs)
# If the base obj isn't an agent but has agents on top, render an agent and blend it in.
if len(obj.agents)>0 and 'Agent' not in obj.type:
if top_agent in obj.agents:
img_agent = cls.cache_render_obj(top_agent, tile_size, subdivs)
else:
img_agent = cls.cache_render_obj(obj.agents[0], tile_size, subdivs)
img = cls.blend_tiles(img, img_agent)
# Render the tile border if any of the corners are black.
if (img[([0,0,-1,-1],[0,-1,0,-1])]==0).all(axis=-1).any():
img = img + cls.cache_render_fun((tile_size, None), cls.empty_tile, tile_size, subdivs)
return img
def render(self, tile_size, highlight_mask=None, visible_mask=None, top_agent=None):
width_px = self.width * tile_size
height_px = self.height * tile_size
img = np.zeros(shape=(height_px, width_px), dtype=np.uint8)[...,None]+COLORS['shadow']
for j in range(0, self.height):
for i in range(0, self.width):
if visible_mask is not None and not visible_mask[i,j]:
continue
obj = self.get(i, j)
tile_img = MultiGrid.render_tile(
obj,
tile_size=tile_size,
top_agent=top_agent
)
ymin = j * tile_size
ymax = (j + 1) * tile_size
xmin = i * tile_size
xmax = (i + 1) * tile_size
img[ymin:ymax, xmin:xmax, :] = rotate_grid(tile_img, self.orientation)
if highlight_mask is not None:
hm = np.kron(highlight_mask.T, np.full((tile_size, tile_size), 255, dtype=np.uint16)
)[...,None] # arcane magic.
img = np.right_shift(img.astype(np.uint16)*8+hm*2, 3).clip(0,255).astype(np.uint8)
return img
class MultiGridEnv(gym.Env):
def __init__(
self,
agents = [],
grid_size=None,
width=None,
height=None,
max_steps=100,
reward_decay=True,
seed=1337,
respawn=False,
ghost_mode=True,
agent_spawn_kwargs = {}
):
if grid_size is not None:
assert width == None and height == None
width, height = grid_size, grid_size
self.respawn = respawn
self.window = None
self.width = width
self.height = height
self.max_steps = max_steps
self.reward_decay = reward_decay
self.seed(seed=seed)
self.agent_spawn_kwargs = agent_spawn_kwargs
self.ghost_mode = ghost_mode
self.agents = []
for agent in agents:
self.add_agent(agent)
self.reset()
def seed(self, seed=1337):
# Seed the random number generator
self.np_random, _ = gym.utils.seeding.np_random(seed)
return [seed]
@property
def action_space(self):
return gym.spaces.Tuple(
[agent.action_space for agent in self.agents]
)
@property
def observation_space(self):
return gym.spaces.Tuple(
[agent.observation_space for agent in self.agents]
)
@property
def num_agents(self):
return len(self.agents)
def add_agent(self, agent_interface):
if isinstance(agent_interface, dict):
self.agents.append(GridAgentInterface(**agent_interface))
elif isinstance(agent_interface, GridAgentInterface):
self.agents.append(agent_interface)
else:
raise ValueError(
"To add an agent to a marlgrid environment, call add_agent with either a GridAgentInterface object "
" or a dictionary that can be used to initialize one.")
def reset(self, **kwargs):
for agent in self.agents:
agent.agents = []
agent.reset(new_episode=True)
self._gen_grid(self.width, self.height)
for agent in self.agents:
if agent.spawn_delay == 0:
self.place_obj(agent, **self.agent_spawn_kwargs)
agent.activate()
self.step_count = 0
obs = self.gen_obs()
return obs
def gen_obs_grid(self, agent):
# If the agent is inactive, return an empty grid and a visibility mask that hides everything.
if not agent.active:
# below, not sure orientation is correct but as of 6/27/2020 that doesn't matter because
# agent views are usually square and this grid won't be used for anything.
grid = MultiGrid((agent.view_size, agent.view_size), orientation=agent.dir+1)
vis_mask = np.zeros((agent.view_size, agent.view_size), dtype=np.bool)
return grid, vis_mask
topX, topY, botX, botY = agent.get_view_exts()
grid = self.grid.slice(
topX, topY, agent.view_size, agent.view_size, rot_k=agent.dir + 1
)
# Process occluders and visibility
# Note that this incurs some slight performance cost
vis_mask = agent.process_vis(grid.opacity)
# Warning about the rest of the function:
# Allows masking away objects that the agent isn't supposed to see.
# But breaks consistency between the states of the grid objects in the parial views
# and the grid objects overall.
if len(getattr(agent, 'hide_item_types', []))>0:
for i in range(grid.width):
for j in range(grid.height):
item = grid.get(i,j)
if (item is not None) and (item is not agent) and (item.type in agent.hide_item_types):
if len(item.agents) > 0:
grid.set(i,j,item.agents[0])
else:
grid.set(i,j,None)
return grid, vis_mask
def gen_agent_obs(self, agent):
"""
Generate the agent's view (partially observable, low-resolution encoding)
"""
grid, vis_mask = self.gen_obs_grid(agent)
grid_image = grid.render(tile_size=agent.view_tile_size, visible_mask=vis_mask, top_agent=agent)
if agent.observation_style=='image':
return grid_image
else:
ret = {'pov': grid_image}
if agent.observe_rewards:
ret['reward'] = getattr(agent, 'step_reward', 0)
if agent.observe_position:
agent_pos = agent.pos if agent.pos is not None else (0,0)
ret['position'] = np.array(agent_pos)/np.array([self.width, self.height], dtype=np.float)
if agent.observe_orientation:
agent_dir = agent.dir if agent.dir is not None else 0
ret['orientation'] = agent_dir
return ret
def gen_obs(self):
return [self.gen_agent_obs(agent) for agent in self.agents]
def __str__(self):
return self.grid.__str__()
def check_agent_position_integrity(self, title=''):
'''
This function checks whether each agent is present in the grid in exactly one place.
This is particularly helpful for validating the world state when ghost_mode=False and
agents can stack, since the logic for moving them around gets a bit messy.
Prints a message and drops into pdb if there's an inconsistency.
'''
agent_locs = [[] for _ in range(len(self.agents))]
for i in range(self.grid.width):
for j in range(self.grid.height):
x = self.grid.get(i,j)
for k,agent in enumerate(self.agents):
if x==agent:
agent_locs[k].append(('top', (i,j)))
if hasattr(x, 'agents') and agent in x.agents:
agent_locs[k].append(('stacked', (i,j)))
if not all([len(x)==1 for x in agent_locs]):
print(f"{title} > Failed integrity test!")
for a, al in zip(self.agents, agent_locs):
print(" > ", a.color,'-', al)
def step(self, actions):
# Spawn agents if it's time.
for agent in self.agents:
if not agent.active and not agent.done and self.step_count >= agent.spawn_delay:
self.place_obj(agent, **self.agent_spawn_kwargs)
agent.activate()
assert len(actions) == len(self.agents)
step_rewards = np.zeros((len(self.agents,)), dtype=np.float)
self.step_count += 1
iter_agents = list(enumerate(zip(self.agents, actions)))
iter_order = np.arange(len(iter_agents))
self.np_random.shuffle(iter_order)
for shuffled_ix in iter_order:
agent_no, (agent, action) = iter_agents[shuffled_ix]
agent.step_reward = 0
if agent.active:
cur_pos = agent.pos[:]
cur_cell = self.grid.get(*cur_pos)
fwd_pos = agent.front_pos[:]
fwd_cell = self.grid.get(*fwd_pos)
agent_moved = False
# Rotate left
if action == agent.actions.left:
agent.dir = (agent.dir - 1) % 4
# Rotate right
elif action == agent.actions.right:
agent.dir = (agent.dir + 1) % 4
# Move forward
elif action == agent.actions.forward:
# Under the follow conditions, the agent can move forward.
can_move = fwd_cell is None or fwd_cell.can_overlap() | if self.ghost_mode is False and isinstance(fwd_cell, GridAgent): | 4 | 2023-12-24 06:50:38+00:00 | 12k |
smonsays/modular-hyperteacher | tests/data/test_imitation.py | [
{
"identifier": "CompositionalGrid",
"path": "metax/data/envs/grid.py",
"snippet": "class CompositionalGrid(Environment):\n def __init__(\n self,\n grid_size: int,\n num_interactions: int,\n num_mazes: int,\n num_objects: int,\n num_distractors: int,\n frac_ood: float,\n task_support: str,\n seed: int,\n ) -> None:\n super().__init__()\n assert grid_size > 5, \"grid_size must be greater than 5\"\n\n self.grid_size = grid_size\n self.num_interactions = num_interactions\n self.num_directions = 4 # split grid into 4 quadrants for the goal position\n self.num_objects = num_objects\n self.num_mazes = num_mazes\n self.num_distractors = num_distractors\n self.frac_ood = frac_ood\n self.task_support = task_support\n self.seed = seed\n self.rng = jax.random.PRNGKey(seed)\n self.num_factors = 4 # direction, interaction, maze, object\n\n # Static matrices\n self._delta_position = jnp.concatenate((\n jnp.array([[-1, 0], [0, 1], [1, 0], [0, -1]]), # up, right, down, left\n jnp.zeros((self.num_interactions, 2), dtype=jnp.int32), # no movement for interaction\n ))\n size_low, size_high = grid_size // 2, (grid_size // 2) + grid_size % 2\n self._quadrants = jnp.stack((\n np.block([\n [np.ones((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.ones((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.ones((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.ones((size_low, size_low))]\n ]),\n ))\n\n # Pregenerate possible goals and randomly split into in/out of distribution\n self.tasks_all = np.array(list(itertools.product(\n range(self.num_directions),\n range(self.num_interactions),\n range(self.num_mazes),\n range(self.num_objects),\n )))\n\n if self.task_support == \"non_compositional\":\n # in/out split with non-compositional support\n self.tasks_in_dist = np.array(list(itertools.product(\n range(self.num_directions - 1), # hold out one goal quadrant from in_dist\n range(self.num_interactions),\n range(self.num_mazes),\n range(self.num_objects),\n )))\n\n @partial(np.vectorize, signature=\"(k),(n,k)->()\")\n def elem_in_array(elem, array):\n return np.any(np.all(elem == array, axis=1))\n\n self.tasks_out_dist = self.tasks_all[~elem_in_array(self.tasks_all, self.tasks_in_dist)]\n\n elif \"_hot\" in self.task_support:\n num_hot = int(self.task_support.split(\"_\")[0])\n mask = jnp.sum(self.tasks_all > 0, axis=1) <= num_hot\n self.tasks_in_dist = jnp.array(self.tasks_all[mask])\n self.tasks_out_dist = jnp.array(self.tasks_all[~mask])\n\n elif self.task_support == \"random\":\n self.tasks_all = jax.random.permutation(self.rng, self.tasks_all)\n self.num_ood = int(len(self.tasks_all) * self.frac_ood)\n self.tasks_in_dist = jnp.array(self.tasks_all[: -self.num_ood])\n self.tasks_out_dist = jnp.array(self.tasks_all[-self.num_ood:])\n\n # Make sure all features for every factor are present in the in-distribution tasks\n assert len(jnp.unique(self.tasks_in_dist[:, 0])) == self.num_directions\n assert len(jnp.unique(self.tasks_in_dist[:, 1])) == self.num_interactions\n assert len(jnp.unique(self.tasks_in_dist[:, 2])) == self.num_mazes\n assert len(jnp.unique(self.tasks_in_dist[:, 3])) == self.num_objects\n else:\n raise ValueError(f\"Invalid task support: {self.task_support}\")\n\n assert len(self.tasks_in_dist) > 0\n assert len(self.tasks_out_dist) > 0\n\n # Create random mazes\n if self.num_mazes > 0:\n self.mazes = jnp.stack([\n self.generate_random_maze(self.grid_size, seed=self.seed + i)\n for i in range(self.num_mazes)\n ])\n else:\n self.mazes = jnp.zeros((1, self.grid_size, self.grid_size))\n\n # Precompute optimal paths, this is potentially expensive for large grid sizes\n optimal_paths, shortest_paths = list(\n zip(*[self._precompute_optimal_paths(m) for m in self.mazes])\n )\n self.optimal_paths, shortest_paths = jnp.stack(optimal_paths), jnp.stack(shortest_paths)\n self.valid_goal_dist = shortest_paths >= self.grid_size\n\n @property\n def num_actions(self) -> int:\n return 4 + self.num_interactions\n\n @property\n def observation_shape(self) -> Tuple[int]:\n # encodes positions of agent, objects and walls\n return (self.grid_size, self.grid_size, self.num_objects + 2)\n\n def reset_goal(self, rng: PRNGKey, mode: str) -> Array:\n assert mode in [\"ood\", \"test\", \"train\"]\n if mode == \"ood\":\n task_code = jax.random.choice(rng, self.tasks_out_dist)\n else:\n task_code = jax.random.choice(rng, self.tasks_in_dist)\n\n task_id = jnp.ravel_multi_index(\n task_code,\n dims=(self.num_directions, self.num_interactions, self.num_mazes, self.num_objects),\n mode=\"wrap\",\n )\n emb_dim = max(self.num_directions, self.num_interactions, self.num_mazes, self.num_objects)\n embedding = jax.nn.one_hot(task_code, emb_dim)\n\n return CompositionalGridGoal(*task_code), {\"task_id\": task_id, \"embedding\": embedding}\n\n def reset(\n self, rng: PRNGKey, goal: Optional[CompositionalGridGoal] = None\n ) -> Tuple[CompositionalGridState, EnvironmentInteraction]:\n \"\"\"Resets the environment to a random, initial state\"\"\"\n rng_distractor, rng_pos1, rng_pos2, rng_pos3, rng_goal = jax.random.split(rng, 5)\n\n if goal is None:\n # Sample a goal from train distribution if None specified\n goal, _ = self.reset_goal(rng_goal, mode=\"train\")\n\n # Sample distractor objects distinct from goal object\n distractors = jax.random.choice(\n key=rng_distractor,\n a=self.num_objects,\n shape=(self.num_distractors,),\n replace=True,\n p=1.0 - (jnp.arange(self.num_objects) == goal.object)\n )\n\n # Sample distinct, random positions for agent, distractors and the goal respecting direction\n position_goal = jax.random.choice(\n key=rng_pos2,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(1, ),\n p=((1.0 - self.mazes[goal.maze]) * self._quadrants[goal.direction]).reshape(-1),\n )\n goal_coord = self._coord_to_idx(position_goal[0][0], position_goal[0][1])\n position_agent = jax.random.choice(\n key=rng_pos1,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(1, ),\n p=((1.0 - self.mazes[goal.maze]).reshape(-1) * self.valid_goal_dist[goal.maze][goal_coord]),\n )\n positions_distractors = jax.random.choice(\n key=rng_pos3,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(self.num_distractors, ),\n replace=False,\n p=1.0 - self.mazes[goal.maze].reshape(-1),\n )\n\n positions = jnp.concatenate([position_goal, positions_distractors, position_agent])\n\n env_state = CompositionalGridState(\n done=False, timestep=0, distractors=distractors, positions=positions, goal=goal\n )\n emission = EnvironmentInteraction(\n observation=self.observe(env_state), reward=0.0, done=False, timestep=0\n )\n\n return env_state, emission\n\n def _step(\n self, rng: PRNGKey, env_state, action: Array\n ) -> Tuple[CompositionalGridState, EnvironmentInteraction]:\n pos_agent = env_state.positions[-1, :]\n\n # Check if agent reached goal (positive reward)\n goal_reached = jnp.logical_and(\n action == (len(MOVES) + env_state.goal.interaction),\n jnp.all(pos_agent == env_state.positions[0, :]),\n )\n reward = 1.0 * goal_reached\n\n # Move the agent to new position and check if valid\n pos_new = self._delta_position[action] + pos_agent\n pos_invalid = jnp.logical_or(\n jnp.logical_or(jnp.any(pos_new < 0), jnp.any(pos_new >= self.grid_size)), # in grid?\n self.mazes[env_state.goal.maze][pos_new[0], pos_new[1]], # in wall?\n )\n pos_new = jnp.where(pos_invalid, pos_agent, pos_new)\n\n # Update state\n positions = env_state.positions.at[-1].set(pos_new)\n env_state = CompositionalGridState(\n done=goal_reached,\n timestep=env_state.timestep + 1,\n distractors=env_state.distractors,\n positions=positions,\n goal=env_state.goal,\n )\n\n emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=reward,\n done=env_state.done,\n timestep=env_state.timestep,\n )\n\n return env_state, emission\n\n def observe(self, env_state: CompositionalGridState) -> Array:\n \"\"\"\n Encode the environment state as an asrray of shape (grid_size, grid_size, num_factors * num_objects + 1).\n For each position in the grid, the code word has the following structure:\n [factor_0_feature_0, ..., factor_0_feature_n, ..., factor_n_feature_0, ..., factor_n_feature_n, wall?, agent?]\n \"\"\"\n objects = jnp.concatenate([jnp.array([env_state.goal.object]), env_state.distractors])\n objects_hot = jax.nn.one_hot(objects, num_classes=self.num_objects)\n pos_objects, pos_agent = env_state.positions[0:-1, :], env_state.positions[-1, :]\n\n # Build the grid\n grid = jnp.zeros(self.observation_shape)\n grid = grid.at[\n jnp.expand_dims(pos_objects[:, 0], axis=1),\n jnp.expand_dims(pos_objects[:, 1], axis=1),\n :-2,\n ].set(jnp.expand_dims(objects_hot, axis=1))\n grid = grid.at[:, :, -2].set(self.mazes[env_state.goal.maze]) # walls encoded in penultimate channel\n grid = grid.at[pos_agent[0], pos_agent[1], -1].set(1.0) # agent encoded in last channel\n\n return grid\n\n def _features_to_idx(self, features: Array) -> Array:\n \"\"\"Converts features to a unique feature index\"\"\"\n idx = [factor * self.num_objects + feature for factor, feature in enumerate(features)]\n return jnp.array(idx)\n\n def _coord_to_idx(self, x, y):\n \"\"\"Converts coordinates to a unique grid index\"\"\"\n return x * self.grid_size + y\n\n def _idx_to_coord(self, idx):\n \"\"\"Converts a grid index to grid coordinates\"\"\"\n return idx // self.grid_size, idx % self.grid_size\n\n def demonstrate(\n self, rng: PRNGKey, env_state: CompositionalGridState\n ) -> EnvironmentInteraction:\n \"\"\"Given a state, compute the optimal trajectory to the goal.\"\"\"\n pos_agent, pos_goal = env_state.positions[-1, :], env_state.positions[0, :]\n idx_agent, idx_goal = self._coord_to_idx(*pos_agent), self._coord_to_idx(*pos_goal)\n optimal_actions = self.optimal_paths[env_state.goal.maze][idx_agent, idx_goal]\n\n # Fill placeholder actions with correct interaction\n mask_pad = (optimal_actions == -1)\n optimal_actions *= ~mask_pad\n optimal_actions += (len(MOVES) + env_state.goal.interaction) * mask_pad\n\n def env_step(carry, action):\n rng, env_state = carry\n rng, rng_step = jax.random.split(rng)\n env_state, emission = self.step(rng_step, env_state, action)\n return (rng, env_state), emission\n\n _, trajectory = jax.lax.scan(env_step, (rng, env_state), optimal_actions)\n\n # Append initial emission and remove last emission from trajectory\n initial_emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=0.0,\n done=False,\n timestep=0,\n )\n trajectory = jtu.tree_map(\n lambda x, y: jnp.concatenate((jnp.expand_dims(x, axis=0), y)),\n initial_emission, trajectory\n )\n trajectory = jtu.tree_map(lambda x: x[:-1], trajectory)\n\n return trajectory, optimal_actions\n\n def _precompute_optimal_paths(self, maze: Array):\n \"\"\"Precompute the optimal trajectories for all possible states.\"\"\"\n # Create an array that encodes the graph structure of the grid to compute all shortest paths\n coordinates, no_walls_coords = [], np.argwhere(maze == 0)\n for x, y in no_walls_coords:\n edges = []\n if x > 0 and not maze[x - 1, y]:\n edges.append([x - 1, y])\n if x < self.grid_size - 1 and not maze[x + 1, y]:\n edges.append([x + 1, y])\n if y > 0 and not maze[x, y - 1]:\n edges.append([x, y - 1])\n if y < self.grid_size - 1 and not maze[x, y + 1]:\n edges.append([x, y + 1])\n\n idx_curr = self._coord_to_idx(x, y)\n coordinates += [(idx_curr, self._coord_to_idx(i, k)) for (i, k) in edges]\n\n coordinates = np.array(coordinates)\n connectivity = np.zeros((self.grid_size**2, self.grid_size**2))\n connectivity[coordinates[:, 0], coordinates[:, 1]] = 1.0\n shortest_paths, predecessors = shortest_path(connectivity, return_predecessors=True)\n max_num_actions = (self.grid_size**2) - 1\n\n def get_path(predecessors, start, end):\n \"\"\"Get the full path from the predecessor matrix.\"\"\"\n path = [end]\n while path[-1] != start:\n path.append(predecessors[start, path[-1]])\n return path[::-1]\n\n def path_to_actions(path):\n \"\"\"Convert path to actions.\"\"\"\n # Pad with placeholder actions, need to be overwritten with correct interaction in self.demonstrate()\n actions = np.full((max_num_actions), -1)\n for i in range(len(path) - 1):\n x1, y1 = self._idx_to_coord(path[i])\n x2, y2 = self._idx_to_coord(path[i + 1])\n action = np.array([x2 - x1, y2 - y1])\n action = np.where(np.all(self._delta_position == action, axis=1))[0][0]\n actions[i] = action\n return np.array(actions)\n\n # Precompute optimal paths for all possible positions\n optimal_paths = -1 * np.ones(\n (self.grid_size**2, self.grid_size**2, max_num_actions), dtype=int\n )\n for start in no_walls_coords:\n for goal in no_walls_coords:\n start_idx, goal_idx = self._coord_to_idx(*start), self._coord_to_idx(*goal)\n path = get_path(predecessors, start_idx, goal_idx)\n actions = path_to_actions(path)\n optimal_paths[start_idx, goal_idx, :] = actions\n\n return jnp.array(optimal_paths), jnp.array(shortest_paths)\n\n @staticmethod\n def generate_random_maze(\n grid_size: int, complexity: float = 0.75, density: float = 0.75, seed: int = 0\n ):\n \"\"\"\n Generate a random maze array.\n Walls are encoded as 1 and free space as 0.\n\n Adapted from https://github.com/zuoxingdong/mazelab/blob/master/mazelab/generators/random_maze.py\n which is based on https://en.wikipedia.org/wiki/Maze_generation_algorithm\n \"\"\"\n assert grid_size % 2 == 1, \"Maze size must be odd\"\n grid_size_pad = grid_size + 2\n np_rng = np.random.default_rng(seed)\n\n # Adjust complexity and density relative to maze size\n complexity = int(complexity * (5 * (grid_size_pad + grid_size_pad)))\n density = int(density * ((grid_size_pad // 2) * (grid_size_pad // 2)))\n\n # Fill borders\n grid = np.zeros((grid_size_pad, grid_size_pad), dtype=bool)\n grid[0, :] = grid[-1, :] = 1\n grid[:, 0] = grid[:, -1] = 1\n\n # Make aisles\n for _ in range(density):\n x, y = (\n np_rng.integers(0, grid_size_pad // 2 + 1) * 2,\n np_rng.integers(0, grid_size_pad // 2 + 1) * 2,\n )\n grid[y, x] = 1\n for j in range(complexity):\n neighbours = []\n if x > 1:\n neighbours.append((y, x - 2))\n if x < grid_size_pad - 2:\n neighbours.append((y, x + 2))\n if y > 1:\n neighbours.append((y - 2, x))\n if y < grid_size_pad - 2:\n neighbours.append((y + 2, x))\n if len(neighbours):\n y_, x_ = neighbours[np_rng.integers(0, len(neighbours))]\n if grid[y_, x_] == 0:\n grid[y_, x_] = 1\n grid[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1\n x, y = x_, y_\n\n return grid.astype(int)[1:-1, 1:-1]"
},
{
"identifier": "ImitationMetaDataloader",
"path": "metax/data/imitation.py",
"snippet": "class ImitationMetaDataloader(Dataloader):\n def __init__(\n self,\n env: Environment,\n num_tasks: int,\n shots_train: int,\n shots_test: int,\n meta_batch_size: int,\n mode: str,\n train_test_split: bool,\n rng: PRNGKey,\n ):\n super().__init__(input_shape=env.observation_shape, output_dim=env.num_actions)\n self.env = env\n self.num_tasks = num_tasks\n self.shots_train = shots_train\n self.shots_test = shots_test\n self.meta_batch_size = meta_batch_size\n self.mode = mode\n self.train_test_split = train_test_split\n self.fixed_rng = rng\n\n assert num_tasks % meta_batch_size == 0, \"num_tasks must be divisible by meta_batch_size\"\n self.num_steps = num_tasks // meta_batch_size\n\n @property\n def sample_input(self):\n return jnp.zeros((1,) + self.env.observation_shape)\n\n def __len__(self):\n return self.num_steps\n\n def __iter__(self):\n for rng in jax.random.split(self.fixed_rng, self.num_steps):\n # Sample batch and wrap as MetaDataset\n rngs_batch = jax.random.split(rng, self.meta_batch_size)\n yield self.sample_metatask(rngs_batch)\n\n @partial(jax.jit, static_argnames=\"self\")\n @partial(jax.vmap, in_axes=(None, 0))\n def sample_metatask(self, rng: PRNGKey) -> MetaDataset:\n rng_goal, rng_task = jax.random.split(rng, 2)\n goal, info = self.env.reset_goal(rng_goal, mode=self.mode)\n\n @jax.vmap\n def sample_task(rng):\n rng_reset, rng_demo = jax.random.split(rng, 2)\n env_state, _ = self.env.reset(rng_reset, goal=goal)\n trajectory, actions = self.env.demonstrate(rng_demo, env_state)\n\n return MultitaskDataset(\n x=trajectory.observation,\n y=actions,\n task_id=jnp.full(actions.shape[:1], info[\"task_id\"]),\n info={\n \"mask\": ~trajectory.done,\n \"embeddings\": jnp.repeat(info[\"embedding\"][None, :], actions.shape[0], axis=0),\n },\n )\n\n rngs_task = jax.random.split(rng_task, self.shots_train + self.shots_test)\n train_and_test_task = sample_task(rngs_task)\n\n if self.train_test_split:\n # Split into train and test set\n return MetaDataset(\n train=jtu.tree_map(\n lambda x: x[:self.shots_train].reshape(-1, *x.shape[2:]), train_and_test_task\n ),\n test=jtu.tree_map(\n lambda x: x[self.shots_train:].reshape(-1, *x.shape[2:]), train_and_test_task\n ),\n )\n else:\n # No train_test split means, meta.train == meta.test set\n return MetaDataset(\n train=jtu.tree_map(lambda x: x.reshape(-1, *x.shape[2:]), train_and_test_task),\n test=jtu.tree_map(lambda x: x.reshape(-1, *x.shape[2:]), train_and_test_task),\n )"
},
{
"identifier": "create_imitation_metaloader",
"path": "metax/data/imitation.py",
"snippet": "def create_imitation_metaloader(\n name,\n meta_batch_size,\n shots_train,\n shots_test,\n train_test_split,\n num_tasks_train,\n num_tasks_test,\n num_tasks_valid,\n num_tasks_ood: Optional[int] = None,\n seed=None,\n **kwargs,\n):\n ood_sets_hot = None\n if name == \"compositional_grid\":\n env = CompositionalGrid(\n grid_size=kwargs[\"grid_size\"],\n num_interactions=kwargs[\"num_interactions\"],\n num_mazes=kwargs[\"num_mazes\"],\n num_objects=kwargs[\"num_objects\"],\n num_distractors=kwargs[\"num_distractors\"],\n frac_ood=kwargs[\"frac_ood\"],\n task_support=kwargs[\"task_support\"],\n seed=seed,\n )\n elif name == \"compositional_preference\":\n # Return the various OOD tasks for the compositional preference env.\n ood_sets_hot = jnp.arange(kwargs[\"num_hot\"] + 1, kwargs[\"num_preferences\"] + 1)\n env = CompositionalPreference(\n num_preferences=kwargs[\"num_preferences\"],\n num_features=kwargs[\"num_features\"],\n num_objects=kwargs[\"num_objects\"],\n num_hot=kwargs[\"num_hot\"],\n continuous_combinations=kwargs[\"continuous_combinations\"],\n discount=kwargs[\"discount\"],\n frac_ood=kwargs[\"frac_ood\"],\n timelimit=kwargs[\"timelimit\"],\n task_support=kwargs[\"task_support\"],\n seed=seed,\n )\n\n else:\n raise ValueError(f\"Unknown environment {name}\")\n\n rng_train, rng_test, rng_valid, rng_ood = jax.random.split(jax.random.PRNGKey(seed), 4)\n\n metatrainloader = ImitationMetaDataloader(\n env=env,\n num_tasks=num_tasks_train,\n shots_train=shots_train,\n shots_test=shots_test if train_test_split else 0,\n meta_batch_size=meta_batch_size,\n mode=\"train\",\n train_test_split=train_test_split,\n rng=rng_train\n )\n\n metatestloader = ImitationMetaDataloader(\n env=env,\n num_tasks=num_tasks_test,\n shots_train=shots_train,\n shots_test=1, # HACK: we need shots_support_train, shots_query_train, shots_support_test and shots_query_test\n meta_batch_size=num_tasks_test,\n mode=\"test\",\n train_test_split=True,\n rng=rng_test\n )\n metavalidloader = ImitationMetaDataloader(\n env=env,\n num_tasks=num_tasks_valid,\n shots_train=shots_train,\n shots_test=1, # HACK: we need shots_support_train, shots_query_train, shots_support_test and shots_query_test\n meta_batch_size=num_tasks_valid,\n mode=\"test\",\n train_test_split=True,\n rng=rng_valid\n )\n metaoodloader = ImitationMetaDataloader(\n env=env,\n num_tasks=num_tasks_ood,\n shots_train=shots_train,\n shots_test=1, # HACK: we need shots_support_train, shots_query_train, shots_support_test and shots_query_test\n meta_batch_size=num_tasks_ood,\n mode=\"ood\",\n train_test_split=True,\n rng=rng_ood\n )\n\n if ood_sets_hot is not None:\n metaauxloaders = {\n \"ood_{}\".format(ood_set): ImitationMetaDataloader(\n env=env,\n num_tasks=num_tasks_ood,\n shots_train=shots_train,\n shots_test=1, # HACK: we need shots_support_train, shots_query_train, shots_support_test and shots_query_test\n meta_batch_size=num_tasks_ood,\n mode=\"ood_{}\".format(ood_set),\n train_test_split=True,\n rng=r,\n )\n for ood_set, r in zip(ood_sets_hot, jax.random.split(rng_ood, len(ood_sets_hot)))\n }\n else:\n metaauxloaders = None\n\n return metatrainloader, metatestloader, metavalidloader, metaoodloader, metaauxloaders"
},
{
"identifier": "tree_length",
"path": "metax/utils/pytree.py",
"snippet": "def tree_length(pytree):\n \"\"\"\n Get size of leading dim assuming all leaves have the same.\n \"\"\"\n chex.assert_equal_shape(jtu.tree_leaves(pytree), dims=0)\n\n return len(jtu.tree_leaves(pytree)[0])"
}
] | import unittest
import jax
import jax.numpy as jnp
from metax.data.envs.grid import CompositionalGrid
from metax.data.imitation import (ImitationMetaDataloader,
create_imitation_metaloader)
from metax.utils import tree_length | 7,470 | """
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class ImitationTestCase(unittest.TestCase):
rng = jax.random.PRNGKey(0)
def test_ImitationMetaDataloader(self):
env = CompositionalGrid(
grid_size := 7,
num_interactions := 3,
num_mazes := 2,
num_objects := 5,
num_distractors := 2,
frac_ood := 0.2,
task_support := "random",
seed := 2022,
)
loader = ImitationMetaDataloader(
env,
num_tasks := 2048,
shots_train := 1,
shots_test := 1,
meta_batch_size := 128,
mode="train",
train_test_split=False,
rng=self.rng
)
assert len(loader) == num_tasks / meta_batch_size
for batch in loader:
assert jnp.all(batch.train.task_id == batch.test.task_id)
# assert jnp.all(batch.train.x != batch.test.x)
# assert jnp.any(batch.train.y != batch.test.y)
assert len(batch.train.x) == meta_batch_size
def test_create_imitation_metaloader(self):
trainloader, testloader, validloader, oodloader, _ = create_imitation_metaloader(
name := "compositional_grid",
meta_batch_size := 128,
shots_train := 2,
shots_test := 2,
train_test_split := False,
num_tasks_train := 4096,
num_tasks_test := 1024,
num_tasks_valid := 1024,
num_tasks_ood := 1024,
seed := 2022,
grid_size=7,
num_interactions=3,
num_mazes=2,
num_objects=5,
num_distractors=2,
frac_ood=0.2,
task_support="random",
)
assert trainloader.sample_input.shape == (1, 7, 7, 5 + 2)
goals_ood = []
for batch in oodloader:
goals_ood.append(jnp.unique(batch.test.task_id[:, 0], axis=0))
goals_ood = jnp.concatenate(goals_ood)
goals_train = []
for batch in trainloader:
goals_train.append(jnp.unique(batch.test.task_id[:, 0], axis=0))
goals_train = jnp.unique(jnp.concatenate(goals_train), axis=0)
assert len(goals_train) + len(goals_ood) == 3 * 2 * 5 * 4
# Check that ood tasks are disjoint from train tasks
for g_ood in goals_ood:
assert not jnp.any(g_ood == goals_train)
for batch_test, batch_valid in zip(testloader, validloader):
| """
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class ImitationTestCase(unittest.TestCase):
rng = jax.random.PRNGKey(0)
def test_ImitationMetaDataloader(self):
env = CompositionalGrid(
grid_size := 7,
num_interactions := 3,
num_mazes := 2,
num_objects := 5,
num_distractors := 2,
frac_ood := 0.2,
task_support := "random",
seed := 2022,
)
loader = ImitationMetaDataloader(
env,
num_tasks := 2048,
shots_train := 1,
shots_test := 1,
meta_batch_size := 128,
mode="train",
train_test_split=False,
rng=self.rng
)
assert len(loader) == num_tasks / meta_batch_size
for batch in loader:
assert jnp.all(batch.train.task_id == batch.test.task_id)
# assert jnp.all(batch.train.x != batch.test.x)
# assert jnp.any(batch.train.y != batch.test.y)
assert len(batch.train.x) == meta_batch_size
def test_create_imitation_metaloader(self):
trainloader, testloader, validloader, oodloader, _ = create_imitation_metaloader(
name := "compositional_grid",
meta_batch_size := 128,
shots_train := 2,
shots_test := 2,
train_test_split := False,
num_tasks_train := 4096,
num_tasks_test := 1024,
num_tasks_valid := 1024,
num_tasks_ood := 1024,
seed := 2022,
grid_size=7,
num_interactions=3,
num_mazes=2,
num_objects=5,
num_distractors=2,
frac_ood=0.2,
task_support="random",
)
assert trainloader.sample_input.shape == (1, 7, 7, 5 + 2)
goals_ood = []
for batch in oodloader:
goals_ood.append(jnp.unique(batch.test.task_id[:, 0], axis=0))
goals_ood = jnp.concatenate(goals_ood)
goals_train = []
for batch in trainloader:
goals_train.append(jnp.unique(batch.test.task_id[:, 0], axis=0))
goals_train = jnp.unique(jnp.concatenate(goals_train), axis=0)
assert len(goals_train) + len(goals_ood) == 3 * 2 * 5 * 4
# Check that ood tasks are disjoint from train tasks
for g_ood in goals_ood:
assert not jnp.any(g_ood == goals_train)
for batch_test, batch_valid in zip(testloader, validloader): | assert tree_length(batch_test) == num_tasks_test | 3 | 2023-12-22 16:35:49+00:00 | 12k |
willfinnigan/RetroBioCat_2 | rbc2/mcts/mcts.py | [
{
"identifier": "MultiExpander",
"path": "rbc2/expansion/multi_expander.py",
"snippet": "class MultiExpander:\n\n def __init__(self,\n expanders: dict[str: Expander],\n network: Optional[Network] = None):\n\n if len(expanders) == 0:\n raise ValueError(\"No expanders provided\")\n\n self.expanders = expanders\n\n # check that all expanders have the same config\n expander_configs = [expander.config for expander in expanders.values()]\n if len(set(expander_configs)) != 1:\n raise ValueError(\"All expanders must have the same config instance\")\n\n # all expanders should have the same config, so just use the first one\n self.expander_config = list(self.expanders.values())[0].config\n\n # give all expanders the same network\n for expander in self.expanders.values():\n expander.network = network\n\n def get_options(self, smis_to_expand: List[str], combination_by: str = 'order_by_score') -> List[ReactionOption]:\n \"\"\" For multiple smiles, get the options from each expander and combine them using the combination method \"\"\"\n per_expander_options = []\n for name, expander in self.expanders.items():\n options = []\n for smi in smis_to_expand:\n if self.is_expander_blocked(smi, expander):\n continue\n options += expander.get_options(smi)\n\n options = sort_options_by_score(options)\n per_expander_options.append(options)\n\n combination_method = combination_methods[combination_by]\n options = combination_method(per_expander_options)\n return options\n\n def get_reactions(self, smis_to_expand: List[str]) -> List[Reaction]:\n options = self.get_options(smis_to_expand)\n reactions = []\n for opt in options:\n new_reactions = opt.evaluate()\n reactions.extend(new_reactions)\n return reactions\n\n def template_application_counts(self) -> dict:\n \"\"\" Return a dictionary of the number of times a template has been applied for each expander \"\"\"\n counts = {}\n for expander_name, expander in self.expanders.items():\n counts[expander_name] = expander.number_of_rule_applications()\n counts['total'] = sum([x for x in counts.values()])\n return counts\n\n def expander_calls(self) -> dict:\n \"\"\" Return a dictionary of the number of times each expander has been called \"\"\"\n counts = {}\n for expander_name, expander in self.expanders.items():\n counts[expander_name] = expander.number_of_calls()\n counts['total'] = sum([x for x in counts.values()])\n return counts\n\n def is_expander_blocked(self, smi: str, expander: Expander) -> bool:\n \"\"\" Return a list of blocked expanders \"\"\"\n if expander.rxn_domain == 'biocatalysis' or expander.rxn_domain == 'biosynthesis':\n if self.expander_config.use_max_mw_for_enzymes is True:\n if get_mw(smi) > self.expander_config.max_mw_to_use_enzymes:\n return True\n return False"
},
{
"identifier": "Filter",
"path": "rbc2/reaction_evaluation/feasability_filters.py",
"snippet": "RETROBIOCAT_FILTER = 'retrobiocat_filter'\nAIZYNTHFINDER_FILTER = 'aizynthfinder_feasability_filter'\ndef aizynthfinder_feasability(reaction: Reaction) -> float:\ndef retrobiocat_feasability(reaction: Reaction) -> float:"
},
{
"identifier": "DefaultSQLStartingMaterialEvaluator",
"path": "rbc2/reaction_evaluation/starting_material_evaluator/starting_material_evaluator.py",
"snippet": "class DefaultSQLStartingMaterialEvaluator(StartingMaterialEvaluatorInterface):\n vendor_urls = {'mcule': 'https://mcule.com/[[ID]]',\n 'sigma': 'https://www.sigmaaldrich.com/GB/en/search/[[ID]]?focus=products&page=1&perpage=30&sort=relevance&term=[[ID]]&type=product',\n 'lifechem': 'https://shop.lifechemicals.com/compound/[[ID]]',\n 'apollo': 'https://store.apolloscientific.co.uk/search?search=[[ID]]',\n 'alfa': 'https://www.alfa.com/en/catalog/[[ID]]',\n 'zinc': 'https://zinc.docking.org/substances/[[ID]]',\n 'flurochem': 'http://www.fluorochem.co.uk/Products/Product?code=[[ID]]',\n 'molport': 'https://www.molport.com/shop/molecule-link/[[ID]]',\n 'ecmdb': 'https://ecmdb.ca/compounds/[[ID]]'}\n\n available_modes = ['building_blocks', 'metabolites']\n\n def __init__(self, config: Optional[SourceMol_Config] = None, custom_smiles=None, blocked_smiles=None):\n\n if does_source_mols_db_exist() == False:\n download_source_mols_db()\n\n db_path = data_folder + '/source_mols.db'\n self.database = SQLite_Database(db_path)\n self.query = DB_Query_SQLite(self.database)\n self.cache_column_names = {}\n self.cache_vendor_names = {}\n self.config = config\n if self.config is None:\n self.config = SourceMol_Config()\n\n self.custom_smiles = custom_smiles\n if self.custom_smiles is None:\n self.custom_smiles = []\n\n self.blocked_smiles = blocked_smiles\n if self.blocked_smiles is None:\n self.blocked_smiles = []\n\n @lru_cache(maxsize=10000)\n def eval(self, smi):\n if smi in self.blocked_smiles:\n return False, {}\n if smi in self.custom_smiles:\n return True, {}\n\n mode, vendors = self.config.get_mode_and_vendors()\n\n if self.is_mol_chiral(smi) and self.config.source_mols_can_be_chiral is False:\n return False, {}\n\n result = self.query.smiles_lookup(smi, mode, vendors=vendors)\n if result is None:\n return False, {}\n\n info = self._process_info(result, mode)\n\n if self._is_above_max_price_per_gram(info, vendors) == True:\n return False, info\n return True, info\n\n def is_mol_chiral(self, smi):\n if '@' in smi:\n return True\n return False\n\n @lru_cache(maxsize=10)\n def column_names(self, mode):\n if mode not in self.cache_column_names:\n self.cache_column_names[mode] = self.query.get_column_names(mode)\n return self.cache_column_names[mode]\n\n @lru_cache(maxsize=10)\n def vendor_names(self, mode):\n if mode not in self.cache_vendor_names:\n columns = self.column_names(mode)\n vendors = []\n for col in columns:\n if '_id' in col:\n vendors.append(col.replace('_id', ''))\n self.cache_vendor_names[mode] = vendors\n return self.cache_vendor_names[mode]\n\n def _process_info(self, result, mode):\n columns = self.column_names(mode)\n vendors = self.vendor_names(mode)\n info = {k: v for k, v in zip(columns, result)}\n info = {k: v for k, v in info.items() if v is not None}\n\n vendor_info = {}\n for col, value in info.items():\n for vendor in vendors:\n if vendor in col:\n if vendor not in vendor_info:\n vendor_info[vendor] = {}\n vendor_info[vendor][col.replace(f\"{vendor}_\", '')] = value\n if ('_id' in col) and (vendor in self.vendor_urls):\n url = self.vendor_urls[vendor].replace('[[ID]]', value)\n vendor_info[vendor]['url'] = url\n return vendor_info\n\n def _is_above_max_price_per_gram(self, info, requested_vendors):\n \"\"\" Determines whether the price of the molecule is above the maximum price per gram, based on settings in the config \"\"\"\n\n if requested_vendors is None:\n return False\n\n price_too_high = [] # will become list of booleans, one for each vendor, for whether the price is too high\n for vendor in requested_vendors:\n if vendor in info:\n if 'ppg' in info[vendor]:\n if info[vendor]['ppg'] is None:\n price_too_high.append(False)\n elif float(info[vendor]['ppg']) > self.config.max_price_per_gram:\n price_too_high.append(True)\n else:\n price_too_high.append(False)\n else:\n price_too_high.append(False)\n\n\n # if there are only Trues in the list, return True\n if len(price_too_high) == sum(price_too_high):\n return True\n\n # if config.block_if_price_over_max is True, then return True if any of the prices are too high\n if self.config.block_if_price_over_max is True:\n if True in price_too_high:\n return True\n\n # otherwise return False\n return False"
},
{
"identifier": "add_logger",
"path": "rbc2/utils/add_logger.py",
"snippet": "def add_logger(name, level='DEBUG'):\n logger = logging.getLogger(name)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.propagate = False\n return logger"
},
{
"identifier": "logging_config",
"path": "rbc2/configs/logging_config.py",
"snippet": "class LoggingConfig():\n def __init__(self):\n def set_global_mode(self, global_mode):\n def set_mcts_loops_only(self):"
},
{
"identifier": "MCTS_Config",
"path": "rbc2/configs/mcts_config.py",
"snippet": "class MCTS_Config():\n\n def __init__(self):\n\n # mcts setting\n self.max_length = 4\n self.exploration = 1.414 # optimal exploration value for UCB1 is sqrt(2)=1.414 if score [0-1]\n self.max_search_time = 120\n self.max_iterations = None\n self.callback_iterations = 20 # number of iterations before the mcts callback function is called (if set)\n\n # mcts scoring\n self.use_reaction_scores_for_mcts_initial_values = True # recommended this is True, otherwise first round will be random\n self.score_mode = 'basic' # 'basic', 'complexity_penalty', 'mass_percent', ('number_of_atoms'-not implemented yet)\n self.use_pathway_length_score = True # also use a pathway length score (like aizynthfinder)\n\n # values for complexity penalty (if used)\n self.non_buyable_score = 0.2 # the default score for a non buyable compound\n self.max_complexity_penalty = -0.2 # the maximum penalty for a *complex* non buyable compound\n self.rel_complexity_no_penalty = 0 # complexity above this has no penalty\n self.rel_complexity_max_penalty = -1 # complexity below this has max penalty\n\n # multi_expansion options\n self.option_combination_method = 'order_by_score' # ['interleave, order_by_score']\n\n # expansion option scoring\n self.allow_moves_beyond_solved = 0 # number of moves beyond a solved node that are allowed, normally 0\n self.stop_expansion_if_nonbuyable_at_max_length = False # dont expand a mcts_node if a non buyable is found at max length (its impossible to solve)\n self.boost_enzyme_score_if_in_cascade = False\n self.boost_enzyme_in_cascade_score_by = 0.2\n\n\n\n self.max_chemistry_nodes = None\n\n self.chemistry_only_at_beginning_or_end = False # if true, only allow chemistry at beginning or end of pathway\n self.max_chemistry_at_beginning = None # if only allowing chemistry at beginning or end, optionally set a max number of chemistry nodes at beginning\n self.max_chemistry_at_end = None # if only allowing chemistry at beginning or end, optionally set a max number of chemistry nodes at end\n\n # expansion node evaluation\n self.avoid_blocked_reactions = True\n self.blocked_reactions = []\n\n self.merge_reactions_from_same_domain = False\n\n self.chemistry_filter = AIZYNTHFINDER_FILTER\n self.chemistry_filter_cutoff = 0.05\n self.biocatalysis_filter = 'None' # RETROBIOCAT_FILTER\n\n\n def update_from_dict(self, attr_dict):\n current_dict = self.to_dict()\n for key, value in attr_dict.items():\n if key in current_dict:\n setattr(self, key, value)\n return self\n\n def to_dict(self):\n return self.__dict__"
},
{
"identifier": "get_expanders",
"path": "rbc2/expansion/expander_repository.py",
"snippet": "def get_expanders(expander_names: Sequence[str],\n network: Optional[Network] = None,\n expander_config: Optional[Expansion_Config] = None) -> dict[str: Expander]:\n \"\"\" Get a dictionary of expanders from a list of names \"\"\"\n\n # if not expansion config specified, use default\n if expander_config is None:\n expander_config = Expansion_Config()\n\n # get expanders\n expanders = {}\n for name in expander_names:\n if name not in expander_repo:\n raise ValueError(f'Expander {name} not found in repository')\n expanders[name] = expander_repo[name](config=expander_config,\n network=network)\n\n return expanders"
},
{
"identifier": "Expander",
"path": "rbc2/expansion/default_expander_interface.py",
"snippet": "class Expander(ABC):\n \"\"\"The expander interface, which defines the methods that an expander must implement\"\"\"\n\n @abstractmethod\n def __init__(self,\n network: Optional[Network] = None,\n config: Optional[Expansion_Config] = None):\n self.network = network\n self.rxn_type = ''\n self.rxn_domain = ''\n self.config = config\n\n @abstractmethod\n def get_options(self, smi: str) -> List[ReactionOption]:\n pass\n\n @abstractmethod\n def create_option(self, smi: str, name: str, smarts: List[str],\n template_metadata: dict, score: float) -> ReactionOption:\n pass\n\n @abstractmethod\n def get_reactions(self, smi: str) -> List[Reaction]:\n pass\n\n @abstractmethod\n def number_of_rule_applications(self) -> int:\n pass\n\n @abstractmethod\n def number_of_calls(self) -> int:\n pass"
},
{
"identifier": "backpropogate",
"path": "rbc2/mcts/mcts_loop/backpropogate.py",
"snippet": "def backpropogate(node: MCTS_Node, score: float) -> List[MCTS_Node]:\n \"\"\"\n Backpropogate the score up the tree to the root.\n In the process, collect any new solved nodes and return these\n \"\"\"\n\n if node is None:\n return []\n\n new_solved_nodes = []\n while node is not None:\n node.value += score\n node.visits += 1\n if node.visits == 2 and node.solved == True: # on its first visit (starts with a 1, and we just added 1), if it is solved, add to solved nodes\n new_solved_nodes.append(node)\n node = node.parent\n\n new_solved_nodes.reverse() # shorter pathways are solved first\n return new_solved_nodes"
},
{
"identifier": "Expansion",
"path": "rbc2/mcts/mcts_loop/expansion/expand.py",
"snippet": "class Expansion():\n\n def __init__(self,\n multi_expander: MultiExpander,\n starting_material_evaluator: StartingMaterialEvaluator,\n mcts_config: MCTS_Config\n ):\n self.multi_expander = multi_expander\n self.starting_material_evaluator = starting_material_evaluator\n self.mcts_config = mcts_config\n\n def expand(self, node: MCTS_Node) -> List[MCTS_Node]:\n return expand(node, self.multi_expander, self.starting_material_evaluator, self.mcts_config)"
},
{
"identifier": "rollout",
"path": "rbc2/mcts/mcts_loop/rollout.py",
"snippet": "def rollout(node: MCTS_Node,\n expansion: Expansion,\n selection: Selection,\n network: Network,\n filters: dict[str: Filter],\n mcts_config: MCTS_Config) -> Optional[MCTS_Node]:\n \"\"\"\n 1. is node terminal\n 2. if not, expand the node\n 3. selection to go to next node\n 4. if node needs evaluating then do this\n 5. repeat\n \"\"\"\n\n if node is None:\n rollout_logger.debug(f'No rollout because node is None')\n return None\n\n start_depth = node.depth\n\n while node.terminal is False and node.fully_searched is False:\n if node.is_evaluated() is False:\n rollout_logger.debug(f'Evaluating node at depth {node.depth}')\n node = resolve_unevaluated_mcts_node(node, network, filters, mcts_config)\n\n if node.expanded is False:\n expansion.expand(node)\n\n node = selection.select(node, mcts_config.exploration)\n\n if node is None:\n return None\n\n rollout_logger.debug(f'Rollout from depth {start_depth} to depth {node.depth}')\n return node"
},
{
"identifier": "score_node",
"path": "rbc2/mcts/mcts_loop/score_node.py",
"snippet": "def score_node(node: MCTS_Node,\n mcts_config: MCTS_Config,\n starting_material_evaluator: StartingMaterialEvaluatorInterface):\n if node is None:\n return 0\n return score_pathway(node.pathway, mcts_config, starting_material_evaluator)"
},
{
"identifier": "Selection",
"path": "rbc2/mcts/mcts_loop/selection.py",
"snippet": "class Selection():\n\n def __init__(self):\n self.metrics = {}\n\n def select(self, node: MCTS_Node, exploration: float) -> Optional[MCTS_Node]:\n node = selection(node, exploration)\n if node is not None:\n if node.option is not None:\n if node.option.rxn_type not in self.metrics:\n self.metrics[node.option.rxn_type] = 0\n self.metrics[node.option.rxn_type] += 1\n return node"
},
{
"identifier": "create_root",
"path": "rbc2/mcts/tree_node.py",
"snippet": "def create_root(target_smi: str) -> MCTS_Node:\n pathway = Pathway([], target_smi=target_smi)\n root = MCTS_Node(pathway=pathway, is_root=True)\n return root"
},
{
"identifier": "MCTS_Node",
"path": "rbc2/mcts/tree_node.py",
"snippet": "class MCTS_Node():\n\n parent: Optional[MCTS_Node] = None\n pathway: Optional[Pathway] = None\n option: Optional[ReactionOption] = None\n\n terminal: bool = False\n children: list = field(default_factory=list)\n visits: int = 1\n value: float = 0\n solved: bool = False\n fully_searched: bool = False\n expanded: bool = False\n depth: int = 0\n is_root: bool = False\n\n def __post_init__(self):\n self.id = str(uuid.uuid4())\n\n def __hash__(self):\n return hash(self.id)\n\n def is_evaluated(self):\n if self.pathway is None and self.option is None:\n raise Exception(\"MCTS must either have a pathway (evaluated), or an option and a parent (non_evaluated)\")\n if self.option is not None:\n if self.parent is None:\n raise Exception(\"If node is initialised with a ReactionOption, it must have a parent node\")\n elif self.parent.pathway is None:\n raise Exception(\"If node is initialised with a ReactionOption, it's parent node must have a pathway\")\n\n return self.pathway is not None\n\n def get_last_rxn_type(self):\n if self.option is not None:\n return self.option.rxn_type\n if self.pathway is not None:\n if len(self.pathway.reactions) > 0:\n rxn_type = self.pathway.reactions[-1].rxn_type\n return rxn_type\n elif self.is_root == False:\n raise Exception(\"Pathway has no reactions, but is not root\")\n return None"
},
{
"identifier": "Network",
"path": "rbc2/reaction_network_entities/network.py",
"snippet": "class Network():\n \"\"\" Network is used to keep a record of the outcome of all expansions.\"\"\"\n\n def __init__(self, reactions: Sequence[Reaction] = ()):\n\n self.smi_produced_by: dict[Smi: Set[Reaction]] = defaultdict(set)\n self.smi_substrate_of: dict[Smi: Set[Reaction]] = defaultdict(set)\n self.reaction_options: dict[Smi: dict[ExpanderID: List[ReactionOption]]] = defaultdict(lambda: defaultdict(dict))\n self.reactions: Set[Reaction] = set()\n\n if len(reactions) != 0:\n for rxn in reactions:\n self.add_reaction(rxn)\n\n def add_reaction(self, reaction: Reaction):\n self.reactions.add(reaction)\n self.smi_produced_by[reaction.product].add(reaction)\n for smi in reaction.substrates:\n self.smi_substrate_of[smi].add(reaction)\n\n def remove_reaction(self, reaction: Reaction):\n self.reactions.discard(reaction)\n self.smi_produced_by[reaction.product].discard(reaction)\n for smi in reaction.substrates:\n self.smi_substrate_of[smi].discard(reaction)\n\n def add_option(self, option: ReactionOption):\n self.reaction_options[option.target_smi][option.rxn_type][option.unique_id] = option\n\n def bulk_add_options(self, smi: Smi, rxn_type: RxnType, list_options: List[ReactionOption]):\n self.reaction_options[smi][rxn_type] = {option.unique_id: option for option in list_options}\n\n def remove_option(self, option: ReactionOption):\n self.reaction_options[option.target_smi][option.rxn_type].pop(option.unique_id, None)\n\n def get_reaction_options(self, smi: Smi, rxn_type: RxnType) -> list[ReactionOption]:\n options_for_smi = self.reaction_options.get(smi, {})\n options_for_rxn_type = options_for_smi.get(rxn_type, {})\n return list(options_for_rxn_type.values())\n\n def are_options_available(self, smi: Smi, rxn_type: RxnType) -> bool:\n return self.reaction_options.get(smi, {}).get(rxn_type, False) is not False\n\n def get_reactions_which_molecule_is_produced_by(self, smi: Smi) -> Set[Reaction]:\n return self.smi_produced_by.get(smi, set())\n\n def get_reactions_which_molecule_is_substrate_of(self, smi: Smi) -> Set[Reaction]:\n return self.smi_substrate_of.get(smi, set())\n\n def all_smis(self) -> Set[Smi]:\n all_smis = set(self.smi_produced_by.keys())\n all_smis.update(set(self.smi_substrate_of.keys()))\n return all_smis\n\n def all_reactions(self) -> List[Reaction]:\n return list(self.reactions)\n\n def all_reaction_options(self) -> List[ReactionOption]:\n all_options = []\n for smi, rxn_type_options in self.reaction_options.items():\n for rxn_type, options_dict in rxn_type_options.items():\n for option_id, option in options_dict.items():\n all_options.append(option)\n return all_options\n\n def save(self):\n \"\"\"Save the network to a dict\"\"\"\n data = {\"reactions\": reactions_to_dicts(self.all_reactions()),\n \"reaction_options\": [option_to_dict(opt) for opt in self.all_reaction_options()]}\n return data\n\n def load(self, data: dict, expanders: List[Expander]):\n \"\"\"\n Load the network from data dict\n ReactionOptions will only be loaded if the relevant expander is provided\n \"\"\"\n\n # check each expander is associated with this network\n for expander in expanders:\n if expander.network != self:\n raise Exception(\"Can not load reaction options when expander is not associated with the same network\")\n\n # load reactions\n reaction_unique_id_dict = {}\n for reaction_dict in data['reactions']:\n reaction = reaction_from_dict(reaction_dict)\n reaction_unique_id_dict[reaction.unique_id] = reaction\n self.add_reaction(reaction)\n\n # load reaction options\n expander_dict = {exp.rxn_type: exp for exp in expanders}\n for opt_dict in data['reaction_options']:\n rxn_type = opt_dict['rxn_type']\n expander = expander_dict.get(rxn_type, None)\n if expander is None:\n continue\n\n option = option_from_dict(opt_dict, expander)\n\n # add reactions from ids\n for unique_id in opt_dict.get('reaction_ids', []):\n reaction = reaction_unique_id_dict.get(unique_id, None)\n if reaction is None:\n continue\n option.reactions.append(reaction)\n\n self.add_option(option)\n\n\n def get_pa_route(self, start_smi, starting_material_evaluator: StartingMaterialEvaluatorInterface):\n def get_smi_produced_by(smi):\n return list(self.smi_produced_by[smi])\n return get_pa_route(start_smi, starting_material_evaluator, get_smi_produced_by)"
},
{
"identifier": "Pathway",
"path": "rbc2/reaction_network_entities/pathway.py",
"snippet": "class Pathway:\n\n def __init__(self, reactions: List[Reaction], target_smi: Optional[str] = None):\n self.reactions = reactions\n\n self.smi_produced_by = defaultdict(set)\n self.smi_substrate_of = defaultdict(set)\n\n for reaction in self.reactions:\n self.smi_produced_by[reaction.product].add(reaction)\n for smi in reaction.substrates:\n self.smi_substrate_of[smi].add(reaction)\n\n self.product_smis = set(self.smi_produced_by.keys())\n self.substrate_smis = set(self.smi_substrate_of.keys())\n self.all_smis = self.product_smis | self.substrate_smis\n\n if target_smi is not None:\n self.target_smi = target_smi\n self.all_smis.add(self.target_smi)\n else:\n self.target_smi = self._get_target_smi()\n\n self.pathway_length = 0\n self.end_smi_depths: dict[str: int] = {}\n self.tree = self._make_tree(self.target_smi)\n\n def _get_target_smi(self):\n target_smis = [smi for smi in self.product_smis if smi not in self.substrate_smis]\n if len(target_smis) > 1:\n raise Exception('Pathway has multiple targets')\n elif len(target_smis) == 0:\n raise Exception('Pathway has no target')\n return target_smis[0]\n\n def _make_tree(self, smi: str, depth=0) -> dict:\n if self.pathway_length < depth:\n self.pathway_length = depth\n\n tree = {'smiles': smi, 'depth': depth, 'children': []}\n for reaction in self.smi_produced_by[smi]:\n for child_smi in reaction.substrates:\n tree['children'].append(self._make_tree(child_smi, depth=depth+1))\n\n if len(self.smi_produced_by[smi]) == 0:\n self.end_smi_depths[smi] = depth\n\n return tree\n\n def get_pa_route(self, starting_material_evaluator: StartingMaterialEvaluatorInterface):\n def get_smi_produced_by(smi):\n return list(self.smi_produced_by[smi])\n\n return get_pa_route(self.target_smi, starting_material_evaluator, get_smi_produced_by)\n\n def get_smi_producted_by(self, smi: str) -> Reaction:\n reactions = self.smi_produced_by[smi]\n if len(reactions) != 1:\n raise Exception(f'smi {smi} produced by multiple reactions')\n return list(reactions)[0]\n\n def end_smis(self):\n return list(self.end_smi_depths.keys())\n\n def save(self):\n \"\"\"Returns a list of dicts containing the reactions in the pathway\"\"\"\n return [asdict(reaction) for reaction in self.reactions]\n\n def get_reaction_with_product(self, smi: str) -> Optional[Reaction]:\n reactions = self.smi_produced_by[smi]\n if len(reactions) == 0:\n return None\n\n if len(reactions) != 1:\n pathway_logger.warning(f'smi {smi} produced by multiple reactions')\n\n return list(reactions)[0]\n\n def get_reaction_with_substrate(self, smi: str) -> Optional[Reaction]:\n reactions = self.smi_substrate_of[smi]\n\n if len(reactions) == 0:\n return None\n\n if len(reactions) != 1:\n pathway_logger.warning(f'smi {smi} substrate of multiple reactions')\n\n return list(reactions)[0]"
}
] | import time
from typing import Optional, List
from rbc2.expansion.multi_expander import MultiExpander
from rbc2.reaction_evaluation.feasability_filters import Filter, default_filter_repo
from rbc2.reaction_evaluation.starting_material_evaluator.starting_material_evaluator import \
DefaultSQLStartingMaterialEvaluator
from rbc2.utils.add_logger import add_logger
from rbc2.configs.logging_config import logging_config
from rbc2.configs.mcts_config import MCTS_Config
from rbc2.expansion.expander_repository import get_expanders
from rbc2.expansion.default_expander_interface import Expander
from rbc2.mcts.mcts_loop.backpropogate import backpropogate
from rbc2.mcts.mcts_loop.expansion.expand import Expansion
from rbc2.mcts.mcts_loop.rollout import rollout
from rbc2.mcts.mcts_loop.score_node import score_node
from rbc2.mcts.mcts_loop.selection import Selection
from rbc2.mcts.tree_node import create_root, MCTS_Node
from rbc2.reaction_network_entities.network import Network
from rbc2.reaction_network_entities.pathway import Pathway | 8,073 |
class MCTS():
def __init__(self,
target_smi: str,
expanders: dict[str: Expander],
filters: dict[str: Filter] = default_filter_repo,
starting_material_evaluator: Optional[DefaultSQLStartingMaterialEvaluator] = None,
network: Optional[Network] = None,
mcts_config: Optional[MCTS_Config] = None):
self.target_smi = target_smi
self.logger = add_logger('MCTS', level=logging_config.mcts)
# config
self.mcts_config = mcts_config
if self.mcts_config is None:
self.mcts_config = MCTS_Config()
# starting material evaluator
self.starting_material_evaluator = starting_material_evaluator
if self.starting_material_evaluator is None:
self.starting_material_evaluator = DefaultSQLStartingMaterialEvaluator()
# network - used to save expansions so they are only done once
self.network = network
if self.network is None:
self.network = Network()
# multi_expander made up of the individual expanders
self.multi_expander = MultiExpander(expanders, network=self.network)
# filters
self.filters = filters
# mcts steps
self.selection = Selection()
self.expansion = Expansion(self.multi_expander,
self.starting_material_evaluator,
self.mcts_config)
self.root: MCTS_Node = create_root(target_smi) # root node
self.solved = [] # the solved nodes, updated during the backpropagation step
self.search_complete = False # used to stop the search either on max iterations or max run time
# stats
self.iterations = 0
self.run_time = 0
self.positive_backpropagations = 0
def run(self, callback=None):
"""Runs the MCTS search"""
self.logger.debug(f'Running MCTS search for {self.target_smi}. Max time: {self.mcts_config.max_search_time} seconds. Max iterations: {self.mcts_config.max_iterations}')
t0 = time.time()
while self.search_complete is False:
self.do_a_loop()
self._check_run_time(t0)
if callback is not None and self.iterations % self.mcts_config.callback_iterations == 0:
callback(self)
def do_a_loop(self):
self.logger.debug(f'---- ITERATION {self.iterations} ----')
node = self.selection.select(self.root, self.mcts_config.exploration)
new_node = rollout(node, self.expansion, self.selection, self.network, self.filters, self.mcts_config)
if new_node is None:
self.logger.debug(f'Search complete - fully explored')
self.search_complete = True
score = score_node(new_node, self.mcts_config, self.starting_material_evaluator)
if score >= 0.95: self.positive_backpropagations += 1
self.solved += backpropogate(new_node, score)
self.iterations += 1
def _get_nodes(self, node: MCTS_Node) -> List[MCTS_Node]:
"""Returns all the nodes which are decendents of the given node"""
nodes = []
evaluated_children = [child for child in node.children if child.is_evaluated()]
nodes += evaluated_children
for child in evaluated_children:
nodes += self._get_nodes(child)
return nodes
def get_all_nodes(self) -> List[MCTS_Node]:
nodes = [self.root]
nodes += self._get_nodes(self.root)
return nodes
def get_solved_nodes(self) -> List[MCTS_Node]:
return self.solved
|
class MCTS():
def __init__(self,
target_smi: str,
expanders: dict[str: Expander],
filters: dict[str: Filter] = default_filter_repo,
starting_material_evaluator: Optional[DefaultSQLStartingMaterialEvaluator] = None,
network: Optional[Network] = None,
mcts_config: Optional[MCTS_Config] = None):
self.target_smi = target_smi
self.logger = add_logger('MCTS', level=logging_config.mcts)
# config
self.mcts_config = mcts_config
if self.mcts_config is None:
self.mcts_config = MCTS_Config()
# starting material evaluator
self.starting_material_evaluator = starting_material_evaluator
if self.starting_material_evaluator is None:
self.starting_material_evaluator = DefaultSQLStartingMaterialEvaluator()
# network - used to save expansions so they are only done once
self.network = network
if self.network is None:
self.network = Network()
# multi_expander made up of the individual expanders
self.multi_expander = MultiExpander(expanders, network=self.network)
# filters
self.filters = filters
# mcts steps
self.selection = Selection()
self.expansion = Expansion(self.multi_expander,
self.starting_material_evaluator,
self.mcts_config)
self.root: MCTS_Node = create_root(target_smi) # root node
self.solved = [] # the solved nodes, updated during the backpropagation step
self.search_complete = False # used to stop the search either on max iterations or max run time
# stats
self.iterations = 0
self.run_time = 0
self.positive_backpropagations = 0
def run(self, callback=None):
"""Runs the MCTS search"""
self.logger.debug(f'Running MCTS search for {self.target_smi}. Max time: {self.mcts_config.max_search_time} seconds. Max iterations: {self.mcts_config.max_iterations}')
t0 = time.time()
while self.search_complete is False:
self.do_a_loop()
self._check_run_time(t0)
if callback is not None and self.iterations % self.mcts_config.callback_iterations == 0:
callback(self)
def do_a_loop(self):
self.logger.debug(f'---- ITERATION {self.iterations} ----')
node = self.selection.select(self.root, self.mcts_config.exploration)
new_node = rollout(node, self.expansion, self.selection, self.network, self.filters, self.mcts_config)
if new_node is None:
self.logger.debug(f'Search complete - fully explored')
self.search_complete = True
score = score_node(new_node, self.mcts_config, self.starting_material_evaluator)
if score >= 0.95: self.positive_backpropagations += 1
self.solved += backpropogate(new_node, score)
self.iterations += 1
def _get_nodes(self, node: MCTS_Node) -> List[MCTS_Node]:
"""Returns all the nodes which are decendents of the given node"""
nodes = []
evaluated_children = [child for child in node.children if child.is_evaluated()]
nodes += evaluated_children
for child in evaluated_children:
nodes += self._get_nodes(child)
return nodes
def get_all_nodes(self) -> List[MCTS_Node]:
nodes = [self.root]
nodes += self._get_nodes(self.root)
return nodes
def get_solved_nodes(self) -> List[MCTS_Node]:
return self.solved
| def get_solved_pathways(self) -> List[Pathway]: | 16 | 2023-12-30 11:33:41+00:00 | 12k |
DerwenAI/textgraphs | textgraphs/kg.py | [
{
"identifier": "DBPEDIA_MIN_ALIAS",
"path": "textgraphs/defaults.py",
"snippet": "DBPEDIA_MIN_ALIAS: float = 0.8"
},
{
"identifier": "DBPEDIA_MIN_SIM",
"path": "textgraphs/defaults.py",
"snippet": "DBPEDIA_MIN_SIM: float = 0.9"
},
{
"identifier": "DBPEDIA_SEARCH_API",
"path": "textgraphs/defaults.py",
"snippet": "DBPEDIA_SEARCH_API: str = \"https://lookup.dbpedia.org/api/search\""
},
{
"identifier": "DBPEDIA_SPARQL_API",
"path": "textgraphs/defaults.py",
"snippet": "DBPEDIA_SPARQL_API: str = \"https://dbpedia.org/sparql\""
},
{
"identifier": "DBPEDIA_SPOTLIGHT_API",
"path": "textgraphs/defaults.py",
"snippet": "DBPEDIA_SPOTLIGHT_API: str = f\"{spacy_dbpedia_spotlight.EntityLinker.base_url}/en\""
},
{
"identifier": "WIKIDATA_API",
"path": "textgraphs/defaults.py",
"snippet": "WIKIDATA_API: str = \"https://www.wikidata.org/w/api.php\""
},
{
"identifier": "Edge",
"path": "textgraphs/elem.py",
"snippet": "class Edge:\n \"\"\"\nA data class representing an edge between two nodes.\n \"\"\"\n src_node: int\n dst_node: int\n kind: RelEnum\n rel: str\n prob: float\n count: int = 1"
},
{
"identifier": "KGSearchHit",
"path": "textgraphs/elem.py",
"snippet": "class KGSearchHit: # pylint: disable=R0902\n \"\"\"\nA data class representing a hit from a _knowledge graph_ search.\n \"\"\"\n iri: str\n label: str\n descrip: str\n aliases: typing.List[ str ]\n prob: float"
},
{
"identifier": "LinkedEntity",
"path": "textgraphs/elem.py",
"snippet": "class LinkedEntity: # pylint: disable=R0902\n \"\"\"\nA data class representing one linked entity.\n \"\"\"\n span: spacy.tokens.span.Span\n iri: str\n length: int\n rel: str\n prob: float\n token_id: int\n kg_ent: KGSearchHit\n count: int = 1"
},
{
"identifier": "Node",
"path": "textgraphs/elem.py",
"snippet": "class Node: # pylint: disable=R0902\n \"\"\"\nA data class representing one node, i.e., an extracted phrase.\n \"\"\"\n node_id: int\n key: str\n span: typing.Union[ spacy.tokens.span.Span, spacy.tokens.token.Token ]\n text: str\n pos: str\n kind: NodeEnum\n loc: typing.List[ typing.List[ int ] ] = field(default_factory = lambda: [])\n label: typing.Optional[ str ] = None\n length: int = 1\n sub_obj: bool = False\n count: int = 0\n neighbors: int = 0\n weight: float = 0.0\n entity: typing.List[ LinkedEntity ] = field(default_factory = lambda: [])\n annotated: bool = False\n\n\n def get_linked_label (\n self\n ) -> typing.Optional[ str ]:\n \"\"\"\nWhen this node has a linked entity, return that IRI.\nOtherwise return its `label` value.\n\n returns:\na label for the linked entity\n \"\"\"\n if len(self.entity) > 0:\n return self.entity[0].iri\n\n return self.label\n\n\n def get_name (\n self\n ) -> str:\n \"\"\"\nReturn a brief name for the graphical depiction of this Node.\n\n returns:\nbrief label to be used in a graph\n \"\"\"\n if self.kind == NodeEnum.IRI:\n return self.label # type: ignore\n if self.kind == NodeEnum.LEM:\n return self.key\n\n return self.text\n\n\n def get_stacked_count (\n self\n ) -> int:\n \"\"\"\nReturn a modified count, to redact verbs and linked entities from\nthe stack-rank partitions.\n\n returns:\ncount, used for re-ranking extracted entities\n \"\"\"\n if self.pos == \"VERB\" or self.kind == NodeEnum.IRI:\n return 0\n\n return self.count\n\n\n def get_pos (\n self\n ) -> typing.Tuple[ int, int ]:\n \"\"\"\nGenerate a position span for `OpenNRE`.\n\n returns:\na position span needed for `OpenNRE` relation extraction\n \"\"\"\n position: typing.Tuple[ int, int ] = ( self.span.idx, self.span.idx + len(self.text) - 1, )\n return position"
},
{
"identifier": "NodeEnum",
"path": "textgraphs/elem.py",
"snippet": "class NodeEnum (enum.IntEnum):\n \"\"\"\nEnumeration for the kinds of node categories\n \"\"\"\n DEP = 0 # `spaCy` parse dependency\n LEM = 1 # lemmatized token\n ENT = 2 # named entity\n CHU = 3 # noun chunk\n IRI = 4 # IRI for linked entity\n\n def __str__ (\n self\n ) -> str:\n \"\"\"\nCodec for representing as a string.\n\n returns:\ndecoded string representation of the enumerated value\n \"\"\"\n decoder: typing.List[ str ] = [\n \"dep\",\n \"lem\",\n \"ent\",\n \"chu\",\n \"iri\",\n ]\n\n return decoder[self.value]"
},
{
"identifier": "RelEnum",
"path": "textgraphs/elem.py",
"snippet": "class RelEnum (enum.IntEnum):\n \"\"\"\nEnumeration for the kinds of edge relations\n \"\"\"\n DEP = 0 # `spaCy` parse dependency\n CHU = 1 # `spaCy` noun chunk\n INF = 2 # `REBEL` or `OpenNRE` inferred relation\n SYN = 3 # `sense2vec` inferred synonym\n IRI = 4 # `DBPedia` or `Wikidata` linked entity\n\n def __str__ (\n self\n ) -> str:\n \"\"\"\nCodec for representing as a string.\n\n returns:\ndecoded string representation of the enumerated value\n \"\"\"\n decoder: typing.List[ str ] = [\n \"dep\",\n \"inf\",\n \"syn\",\n \"chu\",\n \"iri\",\n ]\n\n return decoder[self.value]"
},
{
"identifier": "SimpleGraph",
"path": "textgraphs/graph.py",
"snippet": "class SimpleGraph:\n \"\"\"\nAn in-memory graph used to build a `MultiDiGraph` in NetworkX.\n \"\"\"\n\n def __init__ (\n self\n ) -> None:\n \"\"\"\nConstructor.\n \"\"\"\n self.nodes: typing.Dict[ str, Node ] = OrderedDict()\n self.edges: typing.Dict[ str, Edge ] = {}\n self.lemma_graph: nx.MultiDiGraph = nx.MultiDiGraph()\n\n\n def reset (\n self\n ) -> None:\n \"\"\"\nRe-initialize the data structures, resetting all but the configuration.\n \"\"\"\n self.nodes = OrderedDict()\n self.edges = {}\n self.lemma_graph = nx.MultiDiGraph()\n\n\n def make_node ( # pylint: disable=R0913,R0914\n self,\n tokens: typing.List[ Node ],\n key: str,\n span: spacy.tokens.token.Token,\n kind: NodeEnum,\n text_id: int,\n para_id: int,\n sent_id: int,\n *,\n label: typing.Optional[ str ] = None,\n length: int = 1,\n linked: bool = True,\n ) -> Node:\n \"\"\"\nLookup and return a `Node` object.\nBy default, link matching keys into the same node.\nOtherwise instantiate a new node if it does not exist already.\n\n tokens:\nlist of parsed tokens\n\n key:\nlemma key (invariant)\n\n span:\ntoken span for the parsed entity\n\n kind:\nthe kind of this `Node` object\n\n text_id:\ntext (top-level document) identifier\n\n para_id:\nparagraph identitifer\n\n sent_id:\nsentence identifier\n\n label:\nnode label (for a new object)\n\n length:\nlength of token span\n\n linked:\nflag for whether this links to an entity\n\n returns:\nthe constructed `Node` object\n \"\"\"\n token_id: int = 0\n token_text: str = key\n token_pos: str = \"PROPN\"\n\n if span is not None:\n token_id = span.i\n token_text = span.text\n token_pos = span.pos_\n\n location: typing.List[ int ] = [ # type: ignore\n text_id,\n para_id,\n sent_id,\n token_id,\n ]\n\n if not linked:\n # construct a placeholder node (stopwords)\n self.nodes[key] = Node(\n len(self.nodes),\n key,\n span,\n span.text,\n span.pos_,\n kind,\n loc = [ location ],\n length = length,\n )\n\n elif key in self.nodes:\n # link to previously constructed entity node\n self.nodes[key].loc.append(location)\n self.nodes[key].count += 1\n\n # construct a new node for entity or lemma\n else:\n self.nodes[key] = Node(\n len(self.nodes),\n key,\n span,\n token_text,\n token_pos,\n kind,\n loc = [ location ],\n label = label,\n length = length,\n count = 1,\n )\n\n node: Node = self.nodes.get(key) # type: ignore\n\n if kind not in [ NodeEnum.CHU, NodeEnum.IRI ]:\n tokens.append(node)\n\n return node # type: ignore\n\n\n def make_edge ( # pylint: disable=R0913\n self,\n src_node: Node,\n dst_node: Node,\n kind: RelEnum,\n rel: str,\n prob: float,\n *,\n debug: bool = False,\n ) -> typing.Optional[ Edge ]:\n \"\"\"\nLookup an edge, creating a new one if it does not exist already,\nand increment the count if it does.\n\n src_node:\nsource node in the triple\n\n dst_node:\ndestination node in the triple\n\n kind:\nthe kind of this `Edge` object\n\n rel:\nrelation label\n\n prob:\nprobability of this `Edge` within the graph\n\n debug:\ndebugging flag\n\n returns:\nthe constructed `Edge` object; this may be `None` if the input parameters indicate skipping the edge\n \"\"\"\n key: str = \".\".join([\n str(src_node.node_id),\n str(dst_node.node_id),\n rel.replace(\" \", \"_\"),\n str(kind.value),\n ])\n\n if debug:\n ic(key)\n\n if key in self.edges:\n self.edges[key].count += 1\n\n elif src_node.node_id != dst_node.node_id:\n # preclude cycles in the graph\n self.edges[key] = Edge(\n src_node.node_id,\n dst_node.node_id,\n kind,\n rel,\n prob,\n )\n\n if debug:\n ic(self.edges.get(key))\n\n return self.edges.get(key)\n\n\n def construct_lemma_graph (\n self,\n *,\n debug: bool = False,\n ) -> None:\n \"\"\"\nConstruct the base level of the _lemma graph_ from the collected\nelements. This gets represented in `NetworkX` as a directed graph\nwith parallel edges.\n\n debug:\ndebugging flag\n \"\"\"\n # add the nodes\n self.lemma_graph.add_nodes_from([\n node.node_id\n for node in self.nodes.values()\n ])\n\n # populate the minimum required node properties\n for node_key, node in self.nodes.items():\n nx_node = self.lemma_graph.nodes[node.node_id]\n nx_node[\"title\"] = node_key\n nx_node[\"size\"] = node.count\n nx_node[\"value\"] = node.weight\n\n if debug:\n ic(nx_node)\n\n # add the edges and their properties\n self.lemma_graph.add_edges_from([\n (\n edge.src_node,\n edge.dst_node,\n {\n \"kind\": str(edge.kind),\n \"title\": edge.rel,\n \"weight\": float(edge.count),\n \"prob\": edge.prob,\n \"count\": edge.count,\n },\n )\n for edge_key, edge in self.edges.items()\n ])\n\n\n def dump_lemma_graph (\n self,\n ) -> str:\n \"\"\"\nDump the _lemma graph_ as a JSON string in _node-link_ format,\nsuitable for serialization and subsequent use in JavaScript,\nNeo4j, Graphistry, etc.\n\nMake sure to call beforehand: `TextGraphs.calc_phrase_ranks()`\n\n returns:\na JSON representation of the exported _lemma graph_\n \"\"\"\n # populate the optional node properties\n for node in self.nodes.values():\n nx_node = self.lemma_graph.nodes[node.node_id]\n nx_node[\"name\"] = node.text\n nx_node[\"kind\"] = str(node.kind)\n nx_node[\"iri\"] = node.label\n nx_node[\"subobj\"] = node.sub_obj\n nx_node[\"pos\"] = node.pos\n nx_node[\"loc\"] = str(node.loc)\n\n return json.dumps(\n nx.node_link_data(self.lemma_graph),\n sort_keys = True,\n indent = 2,\n separators = ( \",\", \":\" ),\n )"
},
{
"identifier": "KnowledgeGraph",
"path": "textgraphs/pipe.py",
"snippet": "class KnowledgeGraph (Component):\n \"\"\"\nBase class for a _knowledge graph_ interface.\n \"\"\"\n\n def augment_pipe (\n self,\n factory: \"PipelineFactory\",\n ) -> None:\n \"\"\"\nEncapsulate a `spaCy` call to `add_pipe()` configuration.\n\n factory:\na `PipelineFactory` used to configure components\n \"\"\"\n pass # pylint: disable=W0107\n\n\n def remap_ner (\n self,\n label: typing.Optional[ str ],\n ) -> typing.Optional[ str ]:\n \"\"\"\nRemap the OntoTypes4 values from NER output to more general-purpose IRIs.\n\n label:\ninput NER label, an `OntoTypes4` value\n\n returns:\nan IRI for the named entity\n \"\"\"\n return label\n\n\n def normalize_prefix (\n self,\n iri: str,\n *,\n debug: bool = False, # pylint: disable=W0613\n ) -> str:\n \"\"\"\nNormalize the given IRI to use standard namespace prefixes.\n\n iri:\ninput IRI, in fully-qualified domain representation\n\n debug:\ndebugging flag\n\n returns:\nthe compact IRI representation, using an RDF namespace prefix\n \"\"\"\n return iri\n\n\n def perform_entity_linking (\n self,\n graph: SimpleGraph,\n pipe: \"Pipeline\",\n *,\n debug: bool = False,\n ) -> None:\n \"\"\"\nPerform _entity linking_ based on \"spotlight\" and other services.\n\n graph:\nsource graph\n\n pipe:\nconfigured pipeline for the current document\n\n debug:\ndebugging flag\n \"\"\"\n pass # pylint: disable=W0107\n\n\n def resolve_rel_iri (\n self,\n rel: str,\n *,\n lang: str = \"en\", # pylint: disable=W0613\n debug: bool = False, # pylint: disable=W0613\n ) -> typing.Optional[ str ]:\n \"\"\"\nResolve a `rel` string from a _relation extraction_ model which has\nbeen trained on this knowledge graph.\n\n rel:\nrelation label, generation these source from Wikidata for many RE projects\n\n lang:\nlanguage identifier\n\n debug:\ndebugging flag\n\n returns:\na resolved IRI\n \"\"\"\n return rel"
},
{
"identifier": "Pipeline",
"path": "textgraphs/pipe.py",
"snippet": "class Pipeline: # pylint: disable=R0902,R0903\n \"\"\"\nManage parsing of a document, which is assumed to be paragraph-sized.\n \"\"\"\n\n def __init__ ( # pylint: disable=R0913\n self,\n text_input: str,\n tok_pipe: spacy.Language,\n ner_pipe: spacy.Language,\n aux_pipe: spacy.Language,\n kg: KnowledgeGraph, # pylint: disable=C0103\n infer_rels: typing.List[ InferRel ],\n ) -> None:\n \"\"\"\nConstructor.\n\n text_input:\nraw text to be parsed\n\n tok_pipe:\nthe `spaCy.Language` pipeline used for tallying individual tokens\n\n ner_pipe:\nthe `spaCy.Language` pipeline used for tallying named entities\n\n aux_pipe:\nthe `spaCy.Language` pipeline used for auxiliary components (e.g., `DBPedia Spotlight`)\n\n kg:\nknowledge graph used for entity linking\n\n infer_rels:\na list of components for inferring relations\n \"\"\"\n self.text: str = text_input\n\n # `tok_doc` provides a stream of individual tokens\n self.tok_doc: spacy.tokens.Doc = tok_pipe(self.text)\n\n # `ner_doc` provides the merged-entity spans from NER\n self.ner_doc: spacy.tokens.Doc = ner_pipe(self.text)\n\n # `aux_doc` e.g., span re-indexing for Spotlight entity linking\n self.aux_doc: spacy.tokens.Doc = aux_pipe(self.text)\n\n self.kg: KnowledgeGraph = kg # pylint: disable=C0103\n self.infer_rels: typing.List[ InferRel ] = infer_rels\n\n # list of Node objects for each parsed token, in sequence\n self.tokens: typing.List[ Node ] = []\n\n # set of Edge objects generated by this Pipeline\n self.edges: typing.List[ Edge ] = []\n\n\n @classmethod\n def get_lemma_key (\n cls,\n span: typing.Union[ spacy.tokens.span.Span, spacy.tokens.token.Token ],\n *,\n placeholder: bool = False,\n ) -> str:\n \"\"\"\nCompose a unique, invariant lemma key for the given span.\n\n span:\nspan of tokens within the lemma\n\n placeholder:\nflag for whether to create a placeholder\n\n returns:\na composed lemma key\n \"\"\"\n if isinstance(span, spacy.tokens.token.Token):\n terms: typing.List[ str ] = [\n span.lemma_.strip().lower(),\n span.pos_,\n ]\n\n if placeholder:\n terms.insert(0, str(span.i))\n\n else:\n terms = functools.reduce(\n operator.iconcat,\n [\n [ token.lemma_.strip().lower(), token.pos_, ]\n for token in span\n ],\n [],\n )\n\n return \".\".join(terms)\n\n\n def get_ent_lemma_keys (\n self,\n ) -> typing.Iterator[ typing.Tuple[ str, int ]]:\n \"\"\"\nIterate through the fully qualified lemma keys for an extracted entity.\n\n yields:\nthe lemma keys within an extracted entity\n \"\"\"\n for ent in self.tok_doc.ents:\n yield self.get_lemma_key(ent), len(ent)\n\n\n def link_noun_chunks (\n self,\n nodes: dict,\n *,\n debug: bool = False,\n ) -> typing.List[ NounChunk ]:\n \"\"\"\nLink any noun chunks which are not already subsumed by named entities.\n\n nodes:\ndictionary of `Node` objects in the graph\n\n debug:\ndebugging flag\n\n returns:\na list of identified noun chunks which are novel\n \"\"\"\n chunks: typing.List[ NounChunk ] = []\n\n # first pass: note the available noun chunks\n for sent_id, sent in enumerate(self.tok_doc.sents):\n for span in sent.noun_chunks:\n lemma_key: str = self.get_lemma_key(span)\n\n chunks.append(\n NounChunk(\n span,\n span.text,\n len(span),\n lemma_key,\n lemma_key not in nodes,\n sent_id,\n )\n )\n\n # second pass: remap span indices to the merged entities pipeline\n for i, span in enumerate(self.ner_doc.noun_chunks):\n if span.text == self.tokens[span.start].text:\n chunks[i].unseen = False\n elif chunks[i].unseen:\n chunks[i].start = span.start\n\n if debug:\n ic(chunks[i])\n\n return chunks\n\n\n ######################################################################\n ## relation extraction\n\n def iter_entity_pairs (\n self,\n pipe_graph: nx.MultiGraph,\n max_skip: int,\n *,\n debug: bool = True,\n ) -> typing.Iterator[ typing.Tuple[ Node, Node ]]:\n \"\"\"\nIterator for entity pairs for which the algorithm infers relations.\n\n pipe_graph:\na `networkx.MultiGraph` representation of the graph, reused for graph algorithms\n\n max_skip:\nmaximum distance between entities for inferred relations\n\n debug:\ndebugging flag\n\n yields:\npairs of entities within a range, e.g., to use for relation extraction\n \"\"\"\n ent_list: typing.List[ Node ] = [\n node\n for node in self.tokens\n if node.kind in [ NodeEnum.ENT ]\n ]\n\n for pair in itertools.product(ent_list, repeat = 2):\n if pair[0] != pair[1]:\n src: Node = pair[0]\n dst: Node = pair[1]\n\n try:\n path: typing.List[ int ] = nx.shortest_path(\n pipe_graph,\n source = src.node_id,\n target = dst.node_id,\n weight = \"weight\",\n method = \"dijkstra\",\n )\n\n if debug:\n ic(src.node_id, dst.node_id, path)\n\n if len(path) <= max_skip:\n yield ( src, dst, )\n except nx.NetworkXNoPath:\n pass\n except Exception as ex: # pylint: disable=W0718\n ic(ex)\n ic(\"ERROR\", src, dst)\n traceback.print_exc()"
},
{
"identifier": "PipelineFactory",
"path": "textgraphs/pipe.py",
"snippet": "class PipelineFactory: # pylint: disable=R0903\n \"\"\"\nFactory pattern for building a pipeline, which is one of the more\nexpensive operations with `spaCy`\n \"\"\"\n\n def __init__ ( # pylint: disable=W0102\n self,\n *,\n spacy_model: str = SPACY_MODEL,\n ner: typing.Optional[ Component ] = None,\n kg: KnowledgeGraph = KnowledgeGraph(), # pylint: disable=C0103\n infer_rels: typing.List[ InferRel ] = []\n ) -> None:\n \"\"\"\nConstructor which instantiates the `spaCy` pipelines:\n\n * `tok_pipe` -- regular generator for parsed tokens\n * `ner_pipe` -- with entities merged\n * `aux_pipe` -- spotlight entity linking\n\nwhich will be needed for parsing and entity linking.\n\n spacy_model:\nthe specific model to use in `spaCy` pipelines\n\n ner:\noptional custom NER component\n\n kg:\nknowledge graph used for entity linking\n\n infer_rels:\na list of components for inferring relations\n \"\"\"\n self.ner: typing.Optional[ Component ] = ner\n self.kg: KnowledgeGraph = kg # pylint: disable=C0103\n self.infer_rels: typing.List[ InferRel ] = infer_rels\n\n # determine the NER model to be used\n exclude: typing.List[ str ] = []\n\n if self.ner is not None:\n exclude.append(\"ner\")\n\n # build the pipelines\n # NB: `spaCy` team doesn't quite get the PEP 621 restrictions which PyPa mangled:\n # https://github.com/explosion/spaCy/issues/3536\n # https://github.com/explosion/spaCy/issues/4592#issuecomment-704373657\n if not spacy.util.is_package(spacy_model):\n spacy.cli.download(spacy_model)\n\n self.tok_pipe = spacy.load(\n spacy_model,\n exclude = exclude,\n )\n\n self.ner_pipe = spacy.load(\n spacy_model,\n exclude = exclude,\n )\n\n self.aux_pipe = spacy.load(\n spacy_model,\n exclude = exclude,\n )\n\n # add NER\n if self.ner is not None:\n self.ner.augment_pipe(self)\n\n # `aux_pipe` only: entity linking\n self.kg.augment_pipe(self)\n\n # `ner_pipe` only: merge entities\n self.ner_pipe.add_pipe(\n \"merge_entities\",\n )\n\n\n def create_pipeline (\n self,\n text_input: str,\n ) -> Pipeline:\n \"\"\"\nInstantiate the document pipelines needed to parse the input text.\n\n text_input:\nraw text to be parsed\n\n returns:\na configured `Pipeline` object\n \"\"\"\n pipe: Pipeline = Pipeline(\n text_input,\n self.tok_pipe,\n self.ner_pipe,\n self.aux_pipe,\n self.kg,\n self.infer_rels,\n )\n\n return pipe"
}
] | from collections import OrderedDict
from difflib import SequenceMatcher
from bs4 import BeautifulSoup # pylint: disable=E0401
from icecream import ic # pylint: disable=E0401
from qwikidata.linked_data_interface import get_entity_dict_from_api # pylint: disable=E0401
from .defaults import DBPEDIA_MIN_ALIAS, DBPEDIA_MIN_SIM, \
DBPEDIA_SEARCH_API, DBPEDIA_SPARQL_API, DBPEDIA_SPOTLIGHT_API, \
WIKIDATA_API
from .elem import Edge, KGSearchHit, LinkedEntity, Node, NodeEnum, RelEnum
from .graph import SimpleGraph
from .pipe import KnowledgeGraph, Pipeline, PipelineFactory
import http
import json
import time
import traceback
import typing
import urllib.parse
import markdown2 # pylint: disable=E0401
import requests # type: ignore # pylint: disable=E0401
import spacy # pylint: disable=E0401 | 7,931 |
lang:
language identifier
debug:
debugging flag
"""
hit: dict = {}
params: dict = {
"action": "wbsearchentities",
"type": search_type,
"language": lang,
"format": "json",
"continue": "0",
"search": query,
}
response: requests.models.Response = requests.get(
self.wikidata_api,
params = params,
verify = False,
headers = {
"Accept": "application/json",
},
)
if debug:
ic(response.status_code)
# check for API success
if http.HTTPStatus.OK == response.status_code:
dat: dict = response.json()
hit = dat["search"][0]
#print(json.dumps(hit, indent = 2, sort_keys = True))
return hit
@classmethod
def _match_aliases (
cls,
query: str,
label: str,
aliases: typing.List[ str ],
*,
debug: bool = False,
) -> typing.Tuple[ float, str ]:
"""
Find the best-matching aliases for a search term.
query:
query string
label:
entity label to be matched against the available aliases
aliases:
list of the available aliases
debug:
debugging flag
"""
# best case scenario: the label is an exact match
if query == label.lower():
return ( 1.0, label, )
# ...therefore the label is not an exact match
prob_list: typing.List[ typing.Tuple[ float, str ]] = [
( SequenceMatcher(None, query, label.lower()).ratio(), label, )
]
# fallback: test the aliases
for alias in aliases:
prob: float = SequenceMatcher(None, query, alias.lower()).ratio()
if prob == 1.0:
# early termination for success
return ( prob, alias, )
prob_list.append(( prob, alias, ))
# find the closest match
prob_list.sort(reverse = True)
if debug:
ic(prob_list)
return prob_list[0]
def _md_to_text (
self,
md_text: str,
) -> str:
"""
Convert markdown to plain text.
<https://stackoverflow.com/questions/761824/python-how-to-convert-markdown-formatted-text-to-text>
md_text:
markdown text (unrendered)
returns:
rendered plain text as a string
"""
soup: BeautifulSoup = BeautifulSoup(
self.markdowner.convert(md_text),
features = "html.parser",
)
return soup.get_text().strip()
def wikidata_search (
self,
query: str,
*,
lang: str = "en",
debug: bool = False,
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=C0302
"""
This class provides a wrapper for access to a _knowledge graph_, which
then runs _entity linking_ and other functions in the pipeline.
This could provide an interface to a graph database, such as Neo4j,
StarDog, KùzuDB, etc., or to an API.
In this default case, we wrap services available via the WikiMedia APIs:
* DBPedia: Spotlight, SPARQL, Search
* Wikidata: Search
see copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md
"""
######################################################################
## class definitions
class KGWikiMedia (KnowledgeGraph): # pylint: disable=R0902,R0903
"""
Manage access to WikiMedia-related APIs.
"""
REL_ISA: str = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type"
REL_SAME: str = "http://www.w3.org/2002/07/owl#sameAs"
NER_MAP: typing.Dict[ str, dict ] = OrderedDict({
"CARDINAL": {
"iri": "http://dbpedia.org/resource/Cardinal_number",
"definition": "Numerals that do not fall under another type"
},
"DATE": {
"iri": "http://dbpedia.org/ontology/date",
"definition": "Absolute or relative dates or periods"
},
"EVENT": {
"iri": "http://dbpedia.org/ontology/Event",
"definition": "Named hurricanes, battles, wars, sports events, etc."
},
"FAC": {
"iri": "http://dbpedia.org/ontology/Infrastructure",
"definition": "Buildings, airports, highways, bridges, etc."
},
"GPE": {
"iri": "http://dbpedia.org/ontology/Country",
"definition": "Countries, cities, states"
},
"LANGUAGE": {
"iri": "http://dbpedia.org/ontology/Language",
"definition": "Any named language"
},
"LAW": {
"iri": "http://dbpedia.org/ontology/Law",
"definition": "Named documents made into laws "
},
"LOC": {
"iri": "http://dbpedia.org/ontology/Place",
"definition": "Non-GPE locations, mountain ranges, bodies of water"
},
"MONEY": {
"iri": "http://dbpedia.org/resource/Money",
"definition": "Monetary values, including unit"
},
"NORP": {
"iri": "http://dbpedia.org/ontology/nationality",
"definition": "Nationalities or religious or political groups"
},
"ORDINAL": {
"iri": "http://dbpedia.org/resource/Ordinal_number",
"definition": "Ordinal number, i.e., first, second, etc."
},
"ORG": {
"iri": "http://dbpedia.org/ontology/Organisation",
"definition": "Companies, agencies, institutions, etc."
},
"PERCENT": {
"iri": "http://dbpedia.org/resource/Percentage",
"definition": "Percentage"
},
"PERSON": {
"iri": "http://dbpedia.org/ontology/Person",
"definition": "People, including fictional"
},
"PRODUCT": {
"iri": "http://dbpedia.org/ontology/product",
"definition": "Vehicles, weapons, foods, etc. (Not services)"
},
"QUANTITY": {
"iri": "http://dbpedia.org/resource/Quantity",
"definition": "Measurements, as of weight or distance"
},
"TIME": {
"iri": "http://dbpedia.org/ontology/time",
"definition": "Times smaller than a day"
},
"WORK OF ART": {
"iri": "http://dbpedia.org/resource/Work_of_art",
"definition": "Titles of books, songs, etc."
},
})
NS_PREFIX: typing.Dict[ str, str ] = OrderedDict({
"dbc": "http://dbpedia.org/resource/Category:",
"dbt": "http://dbpedia.org/resource/Template:",
"dbr": "http://dbpedia.org/resource/",
"yago":"http://dbpedia.org/class/yago/",
"dbd": "http://dbpedia.org/datatype/",
"dbo": "http://dbpedia.org/ontology/",
"dbp": "http://dbpedia.org/property/",
"units": "http://dbpedia.org/units/",
"dbpedia-commons": "http://commons.dbpedia.org/resource/",
"dbpedia-wikicompany": "http://dbpedia.openlinksw.com/wikicompany/",
"dbpedia-wikidata": "http://wikidata.dbpedia.org/resource/",
"wd": "http://www.wikidata.org/",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"schema": "https://schema.org/",
"owl": "http://www.w3.org/2002/07/owl#",
})
def __init__ ( # pylint: disable=W0102
self,
*,
spotlight_api: str = DBPEDIA_SPOTLIGHT_API,
dbpedia_search_api: str = DBPEDIA_SEARCH_API,
dbpedia_sparql_api: str = DBPEDIA_SPARQL_API,
wikidata_api: str = WIKIDATA_API,
ner_map: dict = NER_MAP,
ns_prefix: dict = NS_PREFIX,
min_alias: float = DBPEDIA_MIN_ALIAS,
min_similarity: float = DBPEDIA_MIN_SIM,
) -> None:
"""
Constructor.
spotlight_api:
`DBPedia Spotlight` API or equivalent local service
dbpedia_search_api:
`DBPedia Search` API or equivalent local service
dbpedia_sparql_api:
`DBPedia SPARQL` API or equivalent local service
wikidata_api:
`Wikidata Search` API or equivalent local service
ner_map:
named entity map for standardizing IRIs
ns_prefix:
RDF namespace prefixes
min_alias:
minimum alias probability threshold for accepting linked entities
min_similarity:
minimum label similarity threshold for accepting linked entities
"""
self.spotlight_api: str = spotlight_api
self.dbpedia_search_api: str = dbpedia_search_api
self.dbpedia_sparql_api: str = dbpedia_sparql_api
self.wikidata_api: str = wikidata_api
self.ner_map: dict = ner_map
self.ns_prefix: dict = ns_prefix
self.min_alias: float = min_alias
self.min_similarity: float = min_similarity
self.ent_cache: dict = {}
self.iri_cache: dict = {}
self.markdowner = markdown2.Markdown()
def augment_pipe (
self,
factory: PipelineFactory,
) -> None:
"""
Encapsulate a `spaCy` call to `add_pipe()` configuration.
factory:
a `PipelineFactory` used to configure components
"""
factory.aux_pipe.add_pipe(
"dbpedia_spotlight",
config = {
"dbpedia_rest_endpoint": self.spotlight_api, # type: ignore
},
)
def remap_ner (
self,
label: typing.Optional[ str ],
) -> typing.Optional[ str ]:
"""
Remap the OntoTypes4 values from NER output to more general-purpose IRIs.
label:
input NER label, an `OntoTypes4` value
returns:
an IRI for the named entity
"""
if label is None:
return None
try:
iri: typing.Optional[ dict ] = self.ner_map.get(label)
if iri is not None:
return iri["iri"]
except TypeError as ex:
ic(ex)
print(f"unknown label: {label}")
return None
def normalize_prefix (
self,
iri: str,
*,
debug: bool = False,
) -> str:
"""
Normalize the given IRI to use the standard DBPedia namespace prefixes.
iri:
input IRI, in fully-qualified domain representation
debug:
debugging flag
returns:
the compact IRI representation, using an RDF namespace prefix
"""
iri_parse: urllib.parse.ParseResult = urllib.parse.urlparse(iri)
if debug:
ic(iri_parse)
for prefix, ns_fqdn in self.ns_prefix.items():
ns_parse: urllib.parse.ParseResult = urllib.parse.urlparse(ns_fqdn)
if debug:
ic(prefix, ns_parse.netloc, ns_parse.path)
if iri_parse.netloc == ns_parse.netloc and iri_parse.path.startswith(ns_parse.path):
slug: str = iri_parse.path.replace(ns_parse.path, "")
# return normalized IRI
return f"{prefix}:{slug}"
# normalization failed
return iri
def perform_entity_linking (
self,
graph: SimpleGraph,
pipe: Pipeline,
*,
debug: bool = False,
) -> None:
"""
Perform _entity linking_ based on `DBPedia Spotlight` and other services.
graph:
source graph
pipe:
configured pipeline for the current document
debug:
debugging flag
"""
# first pass: use "spotlight" API to markup text
iter_ents: typing.Iterator[ LinkedEntity ] = self._link_spotlight_entities(
pipe,
debug = debug
)
for link in iter_ents:
_ = self._make_link(
graph,
pipe,
link,
self.REL_ISA,
debug = debug,
)
_ = self._secondary_entity_linking(
graph,
pipe,
link,
debug = debug,
)
# second pass: use KG search on entities which weren't linked by Spotlight
iter_ents = self._link_kg_search_entities(
graph,
debug = debug,
)
for link in iter_ents:
_ = self._make_link(
graph,
pipe,
link,
self.REL_ISA,
debug = debug,
)
_ = self._secondary_entity_linking(
graph,
pipe,
link,
debug = debug,
)
def resolve_rel_iri (
self,
rel: str,
*,
lang: str = "en",
debug: bool = False,
) -> typing.Optional[ str ]:
"""
Resolve a `rel` string from a _relation extraction_ model which has
been trained on this _knowledge graph_, which defaults to using the
`WikiMedia` graphs.
rel:
relation label, generation these source from Wikidata for many RE projects
lang:
language identifier
debug:
debugging flag
returns:
a resolved IRI
"""
# first, check the cache
if rel in self.iri_cache:
return self.iri_cache.get(rel)
# otherwise construct a Wikidata API search
try:
hit: dict = self._wikidata_endpoint(
rel,
search_type = "property",
lang = lang,
debug = debug,
)
if debug:
ic(hit["label"], hit["id"])
# get the `claims` of the Wikidata property
prop_id: str = hit["id"]
prop_dict: dict = get_entity_dict_from_api(prop_id)
claims: dict = prop_dict["claims"]
if "P1628" in claims:
# use `equivalent property` if available
iri: str = claims["P1628"][0]["mainsnak"]["datavalue"]["value"]
elif "P2235" in claims:
# use `external superproperty` as a fallback
iri = claims["P2235"][0]["mainsnak"]["datavalue"]["value"]
else:
ic("no related claims", rel)
return None
if debug:
ic(iri)
# update the cache
self.iri_cache[rel] = iri
return iri
except requests.exceptions.ConnectionError as r_ex:
ic(r_ex)
return None
except Exception as ex: # pylint: disable=W0718
ic(ex)
traceback.print_exc()
return None
######################################################################
## private methods, customized per KG instance
def _wikidata_endpoint (
self,
query: str,
*,
search_type: str = "item",
lang: str = "en",
debug: bool = False,
) -> dict:
"""
Call a generic endpoint for Wikidata API.
Raises various untrapped exceptions, to be handled by caller.
query:
query string
search_type:
search type
lang:
language identifier
debug:
debugging flag
"""
hit: dict = {}
params: dict = {
"action": "wbsearchentities",
"type": search_type,
"language": lang,
"format": "json",
"continue": "0",
"search": query,
}
response: requests.models.Response = requests.get(
self.wikidata_api,
params = params,
verify = False,
headers = {
"Accept": "application/json",
},
)
if debug:
ic(response.status_code)
# check for API success
if http.HTTPStatus.OK == response.status_code:
dat: dict = response.json()
hit = dat["search"][0]
#print(json.dumps(hit, indent = 2, sort_keys = True))
return hit
@classmethod
def _match_aliases (
cls,
query: str,
label: str,
aliases: typing.List[ str ],
*,
debug: bool = False,
) -> typing.Tuple[ float, str ]:
"""
Find the best-matching aliases for a search term.
query:
query string
label:
entity label to be matched against the available aliases
aliases:
list of the available aliases
debug:
debugging flag
"""
# best case scenario: the label is an exact match
if query == label.lower():
return ( 1.0, label, )
# ...therefore the label is not an exact match
prob_list: typing.List[ typing.Tuple[ float, str ]] = [
( SequenceMatcher(None, query, label.lower()).ratio(), label, )
]
# fallback: test the aliases
for alias in aliases:
prob: float = SequenceMatcher(None, query, alias.lower()).ratio()
if prob == 1.0:
# early termination for success
return ( prob, alias, )
prob_list.append(( prob, alias, ))
# find the closest match
prob_list.sort(reverse = True)
if debug:
ic(prob_list)
return prob_list[0]
def _md_to_text (
self,
md_text: str,
) -> str:
"""
Convert markdown to plain text.
<https://stackoverflow.com/questions/761824/python-how-to-convert-markdown-formatted-text-to-text>
md_text:
markdown text (unrendered)
returns:
rendered plain text as a string
"""
soup: BeautifulSoup = BeautifulSoup(
self.markdowner.convert(md_text),
features = "html.parser",
)
return soup.get_text().strip()
def wikidata_search (
self,
query: str,
*,
lang: str = "en",
debug: bool = False, | ) -> typing.Optional[ KGSearchHit ]: | 7 | 2023-12-25 11:42:53+00:00 | 12k |
pkariz/grin-explorer | backend/api/tests.py | [
{
"identifier": "Blockchain",
"path": "backend/api/models.py",
"snippet": "class Blockchain(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n # testnet, mainnet etc\n name = models.CharField(max_length=255, unique=True)\n # slug of the name, we use it in url\n slug = models.SlugField(max_length=255, unique=True)\n # node from which the data is fetched\n node = models.ForeignKey(\n Node, related_name='blockchains', on_delete=models.PROTECT)\n # the default blockchain will be picked on the gui by default\n default = models.BooleanField(default=False)\n # if fetch_price is False then the shown price will always be 0.\n # Testnets and localnets should have this set to false.\n fetch_price = models.BooleanField(default=True)\n # load_progress shows current % of loaded blocks. If archive is True then\n # load_progress will represent % of missing all blocks, otherwise % of\n # missing blocks from the latest 1440 blocks\n load_progress = models.DecimalField(\n max_digits=5,\n decimal_places=2,\n default=0.0,\n validators=[MinValueValidator(0), MaxValueValidator(100)]\n )\n\n def __str__(self):\n return f'{self.name} - {self.load_progress} [Node<{self.node}>]'\n\n def bootstrap(self, skip_reorg_check=False):\n # import here to avoid cyclic import\n from .bootstrap import load_blocks\n\n start_height, end_height = self.get_bootstrap_heights()\n load_blocks(self, start_height, end_height, skip_reorg_check)\n\n def get_tip_height(self):\n node_api = NodeV2API(self.node)\n try:\n end_block = node_api.get_tip()['height']\n except NodeError as e:\n logger.exception('Bootstrap failed - failed to get node tip')\n raise e\n return end_block\n\n def get_progress_decimal_places(self):\n if self.node.archive:\n return 2\n return 0\n\n def get_bootstrap_heights(self):\n node_api = NodeV2API(self.node)\n end_height = self.get_tip_height()\n try:\n start_height = node_api.get_blocks(0, end_height, 1, False)['blocks'][0]['header']['height']\n except IndexError:\n raise Exception('Node has no blocks.')\n except NodeError as e:\n logger.exception('Bootstrap failed - failed to get first block height')\n raise e\n return start_height, end_height\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n if self.default:\n # set other blockchain.default to False\n other_blockchains = Blockchain.objects.all()\n if self.pk:\n other_blockchains = other_blockchains.exclude(pk=self.pk)\n other_blockchains.update(default=False)\n # blockchain doesn't change much so this call doesn't hurt\n old_instance = Blockchain.objects.get(pk=self.pk) if self.pk else None\n res = super().save(*args, **kwargs)\n if old_instance and self.load_progress != old_instance.load_progress:\n # load progress changed, send info\n async_to_sync(get_channel_layer().group_send)(\n 'admin_group',\n {\n 'type': 'blockchain_progress_changed',\n 'message': {\n 'slug': self.slug,\n # convert to float since Decimal is not serializable\n 'load_progress': float(self.load_progress),\n },\n }\n )\n return res\n\n def full_print(self):\n \"\"\"Used for developing and debugging.\"\"\"\n print('MAIN CHAIN:')\n for block in self.blocks.filter(reorg=None).order_by('height'):\n print(' --> ' + block.hash)\n for reorg in Reorg.objects.filter(blockchain=self):\n print('REORG:')\n for block in Block.objects.filter(reorg=reorg).order_by('height'):\n print(' --> ' + block.hash)\n print('------------------------------------------------------')\n\n def reset(self):\n \"\"\"Used for developing and debugging.\"\"\"\n from .models import Block, BlockHeader, Input, Output, Kernel, DramatiqTask, Reorg\n from django.contrib.contenttypes.models import ContentType\n from decimal import Decimal\n\n Input.objects.filter(block__blockchain=self).delete()\n Output.objects.filter(block__blockchain=self).delete()\n Kernel.objects.filter(block__blockchain=self).delete()\n self.reorgs.all().delete()\n\n content_type = ContentType.objects.get_for_model(self)\n DramatiqTask.objects.filter(\n content_type=content_type,\n object_id=self.id,\n ).delete()\n # removing header will also remove the block\n BlockHeader.objects.filter(block__blockchain=self).delete()\n self.load_progress = Decimal('0')\n self.save()"
},
{
"identifier": "Block",
"path": "backend/api/models.py",
"snippet": "class Block(TimeStampedModel):\n blockchain = models.ForeignKey(\n Blockchain, related_name='blocks', on_delete=models.CASCADE)\n hash = models.CharField(\n primary_key=True,\n max_length=64,\n validators=[MinLengthValidator(64)],\n db_index=True,\n )\n height = models.PositiveIntegerField(db_index=True)\n timestamp = models.DateTimeField(db_index=True)\n header = models.ForeignKey(\n 'BlockHeader', related_name='block', on_delete=models.CASCADE)\n prev_hash = models.CharField(\n max_length=64,\n null=True,\n blank=True,\n validators=[MinLengthValidator(64)],\n )\n nr_inputs = models.PositiveIntegerField(default=0)\n nr_outputs = models.PositiveIntegerField(default=0)\n nr_kernels = models.PositiveIntegerField(default=0)\n # when reorg is set it means this block is part of a reorg and not the main\n # chain\n reorg = models.ForeignKey(\n 'Reorg', null=True, related_name='blocks', on_delete=models.CASCADE)\n\n def __str__(self):\n suffix = ''\n if self.reorg:\n suffix = ' Reorged: {}'.format(self.reorg.id)\n return '{}: {} (prev: {})'.format(\n self.height, self.hash, self.prev_hash)\n\n def get_next_block(self):\n return Block.objects.filter(prev_hash=self.hash).first()\n\n def get_previous_block(self):\n return Block.objects.filter(hash=self.prev_hash).first()\n\n def full_print(self, prefix=''):\n \"\"\"Used for developing and debugging.\"\"\"\n print('---------------------------------------------------------------')\n print(f'{prefix}Block {self.height}: {self.hash}, reorg: {self.reorg}')\n print(f'{prefix} INPUTS:')\n for input in self.inputs.all():\n print(f'{prefix} {input}, output: {input.output}')\n print(f'{prefix} OUTPUTS:')\n for output in self.outputs.all():\n print(f'{prefix} {output}')\n print(f'{prefix} KERNELS:')\n for kernel in self.kernels.all():\n print(f'{prefix} {kernel}')\n print('---------------------------------------------------------------')"
},
{
"identifier": "BlockHeader",
"path": "backend/api/models.py",
"snippet": "class BlockHeader(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n # same as with 'Block', we want to keep 'same' headers separate if they're\n # a part of a different chain.\n blockchain = models.ForeignKey(\n Blockchain, related_name='headers', on_delete=models.CASCADE)\n version = models.IntegerField()\n kernel_root = models.CharField(max_length=64)\n output_root = models.CharField(max_length=64)\n range_proof_root = models.CharField(max_length=64)\n kernel_mmr_size = models.IntegerField()\n output_mmr_size = models.IntegerField()\n nonce = models.TextField()\n edge_bits = models.IntegerField()\n # cuckoo_solution could be an ArrayField(models.BigIntegerField) but that\n # would make syncing a few times slower\n cuckoo_solution = models.TextField(db_index=True) # ArrayField(models.BigIntegerField())\n secondary_scaling = models.IntegerField()\n # sum of the target difficulties, not the sum of the actual block difficulties\n total_difficulty = models.BigIntegerField()\n total_kernel_offset = models.CharField(max_length=64)"
},
{
"identifier": "Input",
"path": "backend/api/models.py",
"snippet": "class Input(TimeStampedModel):\n \"\"\"\n The same input commitment can be included in two different blocks if it's a\n part of a reorg. In this case there will be two identical Input instances,\n except for the referenced block and possibly also with a different output.\n \"\"\"\n id = models.BigAutoField(primary_key=True)\n block = models.ForeignKey(\n Block,\n related_name='inputs',\n on_delete=models.CASCADE,\n )\n # pedersen commitment as hex\n commitment = models.CharField(max_length=66, db_index=True)\n\n # output which corresponds to this input being spent\n output = models.ForeignKey(\n Output,\n blank=True,\n null=True,\n related_name='inputs',\n on_delete=models.CASCADE,\n )\n\n def __str__(self):\n return f'{self.commitment}({self.id})'"
},
{
"identifier": "Output",
"path": "backend/api/models.py",
"snippet": "class Output(TimeStampedModel):\n \"\"\"\n The same output can be included in two different blocks if it's a part of a\n reorg. In this case there will be two identical Output instances, except for\n the referenced block.\n \"\"\"\n id = models.BigAutoField(primary_key=True)\n\n OUTPUT_TYPE = (\n (\"Transaction\", \"Transaction\"),\n (\"Coinbase\", \"Coinbase\"),\n )\n\n block = models.ForeignKey(\n Block,\n related_name='outputs',\n on_delete=models.CASCADE,\n )\n\n output_type = models.TextField(\n choices=OUTPUT_TYPE\n )\n\n # pedersen commitment as hex\n commitment = models.CharField(\n max_length=66,\n db_index=True,\n )\n\n # on reorged blocks 'spent' is set based on the reorged chain, not main\n spent = models.BooleanField()\n\n # range proof as hex\n proof = models.TextField()\n\n # range proof hash as hex\n proof_hash = models.CharField(max_length=64)\n\n # coinbase transactions have merkle_proof None\n merkle_proof = models.TextField(null=True)\n\n mmr_index = models.IntegerField()\n\n def __str__(self):\n return (\n f'{self.commitment}({self.id}), spent: {self.spent}, '\n f'inputs: {self.inputs.all()}'\n )"
},
{
"identifier": "Kernel",
"path": "backend/api/models.py",
"snippet": "class Kernel(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n\n block = models.ForeignKey(\n Block,\n related_name='kernels',\n on_delete=models.CASCADE,\n )\n\n # plain, coinbase, heightlocked, norecentduplicate\n features = models.TextField()\n\n fee = models.BigIntegerField()\n\n fee_shift = models.IntegerField()\n\n lock_height = models.IntegerField()\n\n excess = models.CharField(max_length=66, db_index=True)\n\n excess_sig = models.CharField(max_length=142)\n\n def __str__(self):\n return f'{self.excess}'"
},
{
"identifier": "Reorg",
"path": "backend/api/models.py",
"snippet": "class Reorg(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n blockchain = models.ForeignKey(\n Blockchain, related_name='reorgs', on_delete=models.CASCADE)\n # start_reorg_block and end_reorg_block define starting and ending block,\n # which were reorged\n start_reorg_block = models.ForeignKey(\n Block, related_name='start_reorgs', on_delete=models.CASCADE)\n end_reorg_block = models.ForeignKey(\n Block, related_name='end_reorgs', on_delete=models.CASCADE)\n # start_main_block defines starting block which is the new start of the main\n # chain - the block that replaced start_reorg_block. We usually don't know\n # which the ending block is when we spot the reorg, so we don't store it\n # (we don't even have it in DB at that time yet since we usually get them\n # incrementally in the order they're accepted).\n start_main_block = models.ForeignKey(\n Block, related_name='start_mains', on_delete=models.CASCADE)\n\n def __str__(self):\n return '{}: start: {}, end: {}'.format(\n self.blockchain.slug, self.start_reorg_block, self.end_reorg_block)"
},
{
"identifier": "Node",
"path": "backend/api/models.py",
"snippet": "class Node(TimeStampedModel):\n \"\"\"Node on the network. Currently it only supports grin-rust.\"\"\"\n id = models.BigAutoField(primary_key=True)\n # name can be whatever\n name = models.CharField(max_length=255, unique=True)\n # by default that's slug of the name\n slug = models.SlugField(max_length=255, unique=True)\n group = models.ForeignKey(\n NodeGroup, related_name='nodes', on_delete=models.PROTECT)\n # foreign api url of the grin-rust node\n api_url = models.URLField()\n # username of the grin-rust node\n api_username = models.CharField(max_length=255)\n # foreign api secret of the grin-rust node\n api_password = models.CharField(max_length=255)\n # if archive is true then we fetch every block when we bootstrap, otherwise\n # we fetch only latest 1440 blocks (1 day)\n archive = models.BooleanField(default=False)\n\n def __str__(self):\n repr = f'{self.name}'\n if self.archive:\n repr += ' (archive)'\n return repr\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n return super().save(*args, **kwargs)\n\n def is_reachable(self):\n try:\n NodeV2API(self).get_tip()\n return True\n except (\n RequestsConnectionError,\n RequestsTimeout,\n RequestsHTTPError,\n RequestsReadTimeout\n ):\n logger.exception('Node unreachable', extra={'node': self.slug})\n return False"
},
{
"identifier": "NodeGroup",
"path": "backend/api/models.py",
"snippet": "class NodeGroup(models.Model):\n \"\"\"\n NodeGroup represents a group of nodes. These nodes should be on the same\n network.:\n \"\"\"\n id = models.BigAutoField(primary_key=True)\n # name is probably mainnet, testnet or smth similar\n name = models.CharField(max_length=255, unique=True)\n # by default that's slug of the name\n slug = models.SlugField(max_length=255, unique=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n self.full_clean()\n return super().save(*args, **kwargs)"
},
{
"identifier": "fetch_and_store_block",
"path": "backend/api/bootstrap.py",
"snippet": "def fetch_and_store_block(blockchain, block_height, prefetch=True):\n # initialize node api\n node_api = NodeV2API(blockchain.node)\n if block_height < 0:\n # no such block height\n raise NodeBlockNotFoundException()\n if prefetch:\n block_data = get_prefetched_header_and_block_data(blockchain.node, block_height)\n else:\n block_data = node_api.get_block(height=block_height)\n header_data = block_data['header']\n timestamp = parse_datetime(header_data['timestamp'])\n hash = header_data['hash']\n # create header instance\n cuckoo_solution = ','.join(map(str, header_data['cuckoo_solution']))\n with transaction.atomic():\n header, header_created = BlockHeader.objects.get_or_create(\n blockchain=blockchain,\n cuckoo_solution=cuckoo_solution,\n kernel_root=header_data['kernel_root'],\n defaults={\n 'version': header_data['version'],\n 'output_root': header_data['output_root'],\n 'range_proof_root': header_data['range_proof_root'],\n 'kernel_mmr_size': header_data['kernel_mmr_size'],\n 'output_mmr_size': header_data['output_mmr_size'],\n 'nonce': str(header_data['nonce']),\n 'edge_bits': header_data['edge_bits'],\n 'secondary_scaling': header_data['secondary_scaling'],\n 'total_difficulty': header_data['total_difficulty'],\n 'total_kernel_offset': header_data['total_kernel_offset'],\n }\n )\n # create block instance\n try:\n block, block_created = Block.objects.get_or_create(\n blockchain=blockchain,\n hash=hash,\n height=block_height,\n timestamp=timestamp,\n header=header,\n prev_hash=block_data['header']['previous'],\n reorg=None,\n nr_inputs=len(block_data['inputs']),\n nr_outputs=len(block_data['outputs']),\n nr_kernels=len(block_data['kernels']),\n )\n except IntegrityError as e:\n # race condition so it's a duplicate. We can skip creation process\n # and just return the block that we already have\n return Block.objects.get(blockchain=blockchain, hash=hash)\n\n if not block_created:\n # we have already fetched all the data since it's done in an atomic\n # transaction, so skip unnecessary work\n return block\n\n # bulk create kernels\n kernels = []\n for kernel_data in block_data['kernels']:\n kernels.append(\n Kernel(\n block=block,\n features=kernel_data['features'],\n fee=kernel_data['fee'],\n fee_shift=kernel_data['fee_shift'],\n lock_height=kernel_data['lock_height'],\n excess=kernel_data['excess'],\n excess_sig=kernel_data['excess_sig'],\n )\n )\n Kernel.objects.bulk_create(kernels)\n\n inputs = []\n # create input instances\n outputs_data = Output.objects\\\n .filter(\n commitment__in=block_data['inputs'],\n block__reorg__isnull=True,\n block__blockchain=block.blockchain,\n )\\\n .values('id', 'commitment')\n outputs_mapper = { output_data['commitment'] : output_data['id'] for output_data in outputs_data }\n for input_data in block_data['inputs']:\n inputs.append(\n Input(\n block=block,\n commitment=input_data,\n output_id=outputs_mapper.get(input_data),\n )\n )\n Input.objects.bulk_create(inputs)\n # mark the corresponding outputs as spent, but only on the main chain so\n # that we don't corrupt the reorged data\n Output.objects.filter(pk__in=outputs_mapper.values()).update(spent=True)\n\n # create output instances\n outputs = []\n inputs = Input.objects\\\n .filter(\n commitment__in=list(map(lambda x: x['commit'], block_data['outputs'])),\n block__reorg__isnull=True,\n block__blockchain=block.blockchain,\n )\n inputs_mapper = { input.commitment : input for input in inputs }\n for output_data in block_data['outputs']:\n outputs.append(\n Output(\n block=block,\n output_type=output_data['output_type'],\n commitment=output_data['commit'],\n spent=output_data['spent'],\n proof=output_data['proof'],\n proof_hash=output_data['proof_hash'],\n merkle_proof=output_data['merkle_proof'],\n mmr_index=output_data['mmr_index'],\n )\n )\n outputs = Output.objects.bulk_create(outputs)\n # link inputs to created outputs, but only on the main chain so that we\n # don't corrupt the reorged data\n fixed_inputs = []\n for output in outputs:\n matching_input = inputs_mapper.get(output.commitment)\n if matching_input:\n matching_input.output = output\n fixed_inputs.append(matching_input)\n Input.objects.bulk_update(fixed_inputs, ['output'])\n return block"
}
] | from django.test import TestCase
from .models import (
Blockchain,
Block,
BlockHeader,
Input,
Output,
Kernel,
Reorg,
Node,
NodeGroup,
)
from .bootstrap import fetch_and_store_block
from unittest.mock import patch, Mock
from backend.api.bootstrap import NodeV2API
from backend.api.bootstrap import NodeV2API, load_blocks
from backend.api.bootstrap import NodeV2API
from backend.api.bootstrap import NodeV2API
import json | 7,867 | self._get_output(2, 'a', False),
]), # h101.2
self._get_fake_block(3, 'h101.2', ['g2', 'a'], [
self._get_output(3, 'f', False),
self._get_output(3, 'b', False),
self._get_output(3, 'c', False),
]), # h102.3
self._get_fake_block(4, 'h102.3', ['c'], [
self._get_output(4, 'h', False)
]), # h103.3
self._get_fake_block(3, 'h101.2', ['a'], [
self._get_output(3, 'b', False),
self._get_output(3, 'c', False),
]), # first reorg - h102.2
self._get_fake_block(4, 'h102.2', ['b'], [
self._get_output(4, 'd', False),
]), # 103.2
self._get_fake_block(5, 'h103.2', [], []), # 104.2
self._get_fake_block(6, 'h104.2', ['c'], [
self._get_output(6, 'e', False),
]), # 105.2
self._get_fake_block(2, 'h100', ['g1'], [
self._get_output(2, 'a', False),
]), # second reorg - h101
self._get_fake_block(3, 'h101', ['g3', 'a'], [
self._get_output(3, 'i', False),
self._get_output(3, 'b', False),
self._get_output(3, 'c', False),
]), # h102
self._get_fake_block(4, 'h102', ['b'], [
self._get_output(4, 'd', False),
]), # h103
]
# make sure node returns reorg data as defined in the function docs
node_instance_mock = Mock()
node_instance_mock.get_header.side_effect = headers
node_instance_mock.get_block.side_effect = blocks
self.nodeV2APIMock.return_value = node_instance_mock
# send new blocks to accepted-block view (includes 2 reorgs)
for i in range(0, len(headers)):
header = headers[i]
post_data = self._get_accepted_block_data(
header['height'], header['hash'], header['previous']
)
self.client.post(
f'/api/blockchains/{self.blockchain.slug}/accepted/',
json.dumps(post_data),
content_type="application/json"
)
# validate correctness of the main chain block sequence
main_chain_blocks = self.blockchain.blocks\
.filter(reorg__isnull=True)\
.order_by('height')
expected_main_chain = [
{ 'height': 1, 'hash': self.to_hex('h100'), 'prev_hash': None },
{ 'height': 2, 'hash': self.to_hex('h101'), 'prev_hash': self.to_hex('h100') },
{ 'height': 3, 'hash': self.to_hex('h102'), 'prev_hash': self.to_hex('h101') },
{ 'height': 4, 'hash': self.to_hex('h103'), 'prev_hash': self.to_hex('h102') },
]
actual_main_chain = [
{
'height': block.height,
'hash': block.hash,
'prev_hash': block.prev_hash,
}
for block in main_chain_blocks
]
self.assertEqual(actual_main_chain, expected_main_chain)
# reorgs validation
self.assertEqual(Reorg.objects.count(), 2)
# validate correctness of the first reorg
reorg1 = Reorg.objects.first()
self.assertEqual(reorg1.blockchain, self.blockchain)
self.assertEqual(reorg1.start_reorg_block.hash, self.to_hex('h102.3'))
self.assertEqual(reorg1.end_reorg_block.hash, self.to_hex('h103.3'))
self.assertEqual(reorg1.start_main_block.hash, self.to_hex('h102.2'))
# validate correctness of the second reorg
reorg2 = Reorg.objects.last()
self.assertEqual(reorg2.blockchain, self.blockchain)
self.assertEqual(reorg2.start_reorg_block.hash, self.to_hex('h101.2'))
self.assertEqual(reorg2.end_reorg_block.hash, self.to_hex('h105.2'))
self.assertEqual(reorg2.start_main_block.hash, self.to_hex('h101'))
# validate all inputs
main_inputs = set(map(
lambda input: (
input.commitment,
input.output.block.hash if input.output else None,
input.block.reorg.id if input.block.reorg else None
),
Input.objects.all()
))
expected_inputs = set([
# pairs (<commitment>, <block_hash_of_related_output>, <reorgID>)
# main
('g1', self.to_hex('h100'), None),
('g3', self.to_hex('h100'), None),
('a', self.to_hex('h101'), None),
('b', self.to_hex('h102'), None),
# reorg 2
('g1', self.to_hex('h100'), 2),
('a', self.to_hex('h101.2'), 2),
('b', self.to_hex('h102.2'), 2),
('c', self.to_hex('h102.2'), 2),
# reorg 1
('g2', self.to_hex('h100'), 1),
('a', self.to_hex('h101.2'), 1),
('c', self.to_hex('h102.3'), 1),
])
self.assertEqual(main_inputs, expected_inputs)
# validate all outputs
main_outputs = set(map(
lambda output: (
output.commitment,
output.block.hash,
output.spent,
tuple(sorted(map(
lambda input: input.block.hash,
output.inputs.all()
)))
),
|
class ReorgTestCase(TestCase):
def setUp(self):
self.patcher = patch('backend.api.bootstrap.NodeV2API')
self.nodeV2APIMock = self.patcher.start()
node_group = NodeGroup.objects.create(name='foo group')
node = Node.objects.create(
name='test',
api_url='foo_url',
api_username='foouser',
api_password='foopw',
archive=False,
group=node_group,
)
self.blockchain = Blockchain.objects.create(
name='test',
node=node,
default=True,
fetch_price=False,
)
def tearDown(self):
self.patcher.stop()
def to_hex(self, s):
# in some cases some previous hash might be None
if s is None:
return
return bytes(s, 'utf-8').hex()
def _get_fake_header(self, height, hash, prev_hash):
return {
'cuckoo_solution': list(range(1, 43)),
'edge_bits': 32,
'hash': self.to_hex(hash),
'height': height,
'kernel_mmr_size': 1,
'kernel_root': 'foo-kernel-root',
'nonce': 1,
'output_mmr_size': 1,
'output_root': 'foo-output-root',
'prev_root': 'foo-prev-root',
'previous': self.to_hex(prev_hash),
'range_proof_root': 'foo-range-proof-root',
'secondary_scaling': 0,
'timestamp': '2000-01-01T00:00:00+00:00',
'total_difficulty': 1,
'total_kernel_offset': 'foo-total-kernel-offset',
'version': 5,
}
def _get_fake_block(self, height, prev_hash, inputs, outputs):
return {
'header': {
'previous': self.to_hex(prev_hash),
},
'inputs': inputs,
'kernels': [
{
'excess': 'foo-excess',
'excess_sig': 'foo-excess-sig',
'features': 'Plain',
'fee': 30000000,
'fee_shift': 0,
'lock_height': 0,
},
],
'outputs': outputs,
}
def _get_accepted_block_data(self, height, hash, prev_hash):
# return only the data that's read in the view
if prev_hash:
# we convert prev_hash to bytearray (but as list of ints) because
# that's what's POST-ed to the accepted-block API.
prev_hash = [x for x in bytearray(bytes.fromhex(prev_hash))]
return {
'data': {
'body': {},
'header': {
'height': height,
'prev_hash': prev_hash,
},
},
# here hash is already hex
'hash': hash,
}
def _get_output(self, height, commit, spent):
return {
'block_height': height,
'commit': commit,
'merkle_proof': None,
'mmr_index': 0,
'output_type': 'Transaction',
'proof': 'foo-proof',
'proof_hash': 'foo-proof-hash',
'spent': spent,
}
def test_reorg_through_accepted_block_view(self):
"""
Test nested reorg scenario for accepted-block view.
0 = main chain
1 and 2 = Reorg 1 and Reorg 2
BLOCK ORDER:
100 --> 100(0)
101.2 --> 100(0), 101.2(0)
102.3 --> 100(0), 101.2(0), 102.3(0)
103.3 --> 100(0), 101.2(0), 102.3(0), 103.3(0)
102.2 --> 100(0), 101.2(0), 102.2(0):
FIND AND MARK OLD AS REORG 1: 102.3(1), 103.3(1)
103.2 --> 100(0), 101.2(0), 102.2(0), 103.2(0):
THE OLD ONES STAY THE SAME: 102.3(1), 103.3(1)
104.2 --> 100(0), 101.2(0), 102.2(0), 103.2(0), 104.2(0):
THE OLD ONES STAY THE SAME: 102.3(1), 103.3(1)
105.2 --> 100(0), 101.2(0), 102.2(0), 103.2(0), 104.2(0), 105.2(0):
THE OLD ONES STAY THE SAME: 102.3(1), 103.3(1)
101 --> 100(0), 101(0):
FIND AND MARK OLD AS REORG 2: 101.2(2), 102.2(2), 103.2(2), 104.2(2), 105.2(2)
PREVIOUS REORGS: 102.3(1), 103.3(1)
102 --> 100(0), 101(0), 102(0):
THE OLD ONES STAY THE SAME: 102.3(1), 103.3(1), 101.2(2), 102.2(2), 103.2(2), 104.2(2), 105.2(2)
103 --> 100(0), 101(0), 102(0), 103(0):
THE OLD ONES STAY THE SAME: 102.3(1), 103.3(1), 101.2(2), 102.2(2), 103.2(2), 104.2(2), 105.2(2)
"""
# define header/block sequence as defined in the function docstring
headers = [
self._get_fake_header(1, 'h100', None), # genesis
self._get_fake_header(2, 'h101.2', 'h100'),
self._get_fake_header(3, 'h102.3', 'h101.2'),
self._get_fake_header(4, 'h103.3', 'h102.3'),
self._get_fake_header(3, 'h102.2', 'h101.2'), # first reorg
self._get_fake_header(4, 'h103.2', 'h102.2'),
self._get_fake_header(5, 'h104.2', 'h103.2'),
self._get_fake_header(6, 'h105.2', 'h104.2'),
self._get_fake_header(2, 'h101', 'h100'), # second reorg
self._get_fake_header(3, 'h102', 'h101'),
self._get_fake_header(4, 'h103', 'h102'),
]
blocks = [
self._get_fake_block(1, None, [], [
self._get_output(1, 'g1', False),
self._get_output(1, 'g2', False),
self._get_output(1, 'g3', False)
]), # genesis - h100
self._get_fake_block(2, 'h100', ['g1'], [
self._get_output(2, 'a', False),
]), # h101.2
self._get_fake_block(3, 'h101.2', ['g2', 'a'], [
self._get_output(3, 'f', False),
self._get_output(3, 'b', False),
self._get_output(3, 'c', False),
]), # h102.3
self._get_fake_block(4, 'h102.3', ['c'], [
self._get_output(4, 'h', False)
]), # h103.3
self._get_fake_block(3, 'h101.2', ['a'], [
self._get_output(3, 'b', False),
self._get_output(3, 'c', False),
]), # first reorg - h102.2
self._get_fake_block(4, 'h102.2', ['b'], [
self._get_output(4, 'd', False),
]), # 103.2
self._get_fake_block(5, 'h103.2', [], []), # 104.2
self._get_fake_block(6, 'h104.2', ['c'], [
self._get_output(6, 'e', False),
]), # 105.2
self._get_fake_block(2, 'h100', ['g1'], [
self._get_output(2, 'a', False),
]), # second reorg - h101
self._get_fake_block(3, 'h101', ['g3', 'a'], [
self._get_output(3, 'i', False),
self._get_output(3, 'b', False),
self._get_output(3, 'c', False),
]), # h102
self._get_fake_block(4, 'h102', ['b'], [
self._get_output(4, 'd', False),
]), # h103
]
# make sure node returns reorg data as defined in the function docs
node_instance_mock = Mock()
node_instance_mock.get_header.side_effect = headers
node_instance_mock.get_block.side_effect = blocks
self.nodeV2APIMock.return_value = node_instance_mock
# send new blocks to accepted-block view (includes 2 reorgs)
for i in range(0, len(headers)):
header = headers[i]
post_data = self._get_accepted_block_data(
header['height'], header['hash'], header['previous']
)
self.client.post(
f'/api/blockchains/{self.blockchain.slug}/accepted/',
json.dumps(post_data),
content_type="application/json"
)
# validate correctness of the main chain block sequence
main_chain_blocks = self.blockchain.blocks\
.filter(reorg__isnull=True)\
.order_by('height')
expected_main_chain = [
{ 'height': 1, 'hash': self.to_hex('h100'), 'prev_hash': None },
{ 'height': 2, 'hash': self.to_hex('h101'), 'prev_hash': self.to_hex('h100') },
{ 'height': 3, 'hash': self.to_hex('h102'), 'prev_hash': self.to_hex('h101') },
{ 'height': 4, 'hash': self.to_hex('h103'), 'prev_hash': self.to_hex('h102') },
]
actual_main_chain = [
{
'height': block.height,
'hash': block.hash,
'prev_hash': block.prev_hash,
}
for block in main_chain_blocks
]
self.assertEqual(actual_main_chain, expected_main_chain)
# reorgs validation
self.assertEqual(Reorg.objects.count(), 2)
# validate correctness of the first reorg
reorg1 = Reorg.objects.first()
self.assertEqual(reorg1.blockchain, self.blockchain)
self.assertEqual(reorg1.start_reorg_block.hash, self.to_hex('h102.3'))
self.assertEqual(reorg1.end_reorg_block.hash, self.to_hex('h103.3'))
self.assertEqual(reorg1.start_main_block.hash, self.to_hex('h102.2'))
# validate correctness of the second reorg
reorg2 = Reorg.objects.last()
self.assertEqual(reorg2.blockchain, self.blockchain)
self.assertEqual(reorg2.start_reorg_block.hash, self.to_hex('h101.2'))
self.assertEqual(reorg2.end_reorg_block.hash, self.to_hex('h105.2'))
self.assertEqual(reorg2.start_main_block.hash, self.to_hex('h101'))
# validate all inputs
main_inputs = set(map(
lambda input: (
input.commitment,
input.output.block.hash if input.output else None,
input.block.reorg.id if input.block.reorg else None
),
Input.objects.all()
))
expected_inputs = set([
# pairs (<commitment>, <block_hash_of_related_output>, <reorgID>)
# main
('g1', self.to_hex('h100'), None),
('g3', self.to_hex('h100'), None),
('a', self.to_hex('h101'), None),
('b', self.to_hex('h102'), None),
# reorg 2
('g1', self.to_hex('h100'), 2),
('a', self.to_hex('h101.2'), 2),
('b', self.to_hex('h102.2'), 2),
('c', self.to_hex('h102.2'), 2),
# reorg 1
('g2', self.to_hex('h100'), 1),
('a', self.to_hex('h101.2'), 1),
('c', self.to_hex('h102.3'), 1),
])
self.assertEqual(main_inputs, expected_inputs)
# validate all outputs
main_outputs = set(map(
lambda output: (
output.commitment,
output.block.hash,
output.spent,
tuple(sorted(map(
lambda input: input.block.hash,
output.inputs.all()
)))
), | Output.objects.all() | 4 | 2023-12-24 22:15:11+00:00 | 12k |
datrocity/pond | pond/activity.py | [
{
"identifier": "Artifact",
"path": "pond/artifact/artifact.py",
"snippet": "class Artifact(ABC):\n \"\"\" Knows how to read and write one type of artifact.\n\n Concrete Artifact implementation should save the metadata with the data if possible,\n so that the artifact is self-contained even if, for instance, it is sent by email.\n \"\"\"\n\n # --- Artifact class interface\n\n # todo: what is the class_id for?\n\n @classmethod\n def class_id(cls):\n \"\"\" String ID to be able to find this class from its name. \"\"\"\n return cls.__name__\n\n @classmethod\n def subclass_from_id(cls, class_id: str) -> Type['Artifact']:\n \"\"\" Find a subclass from its class ID. \"\"\"\n subclasses = cls.__subclasses__()\n for subclass in subclasses:\n if subclass.class_id() == class_id:\n break\n else:\n # todo this exception is not defined here\n raise InvalidArtifactClass(class_id)\n return subclass\n\n # --- Artifact public interface\n\n def __init__(self, data, metadata=None):\n \"\"\" Create an Artifact.\n\n Parameters\n ----------\n data: any\n The data of the artifact.\n metadata: dict\n User-defined metadata, saved with the artifact (optional).\n The metadata keys and values will be stored as strings.\n \"\"\"\n self.data = data\n if metadata is None:\n metadata = {}\n self.metadata = metadata\n\n @classmethod\n def read(cls, path, metadata=None, **kwargs):\n \"\"\" Reads the artifact from a file, given the path.\n\n Parameters\n ----------\n path: str\n Filename from which the artifact is read.\n metadata: dict or None\n The metadata for the artifact. If defined, it takes the place of any metadata\n defined in the artifact itself.\n Typically, this external artifact metadata comes from an artifact manifest. If the\n artifact has been written as a `pond` `VersionedArtifact`, then the two sources of\n metadata are identical.\n kwargs: dict\n Additional parameters for the reader.\n\n Returns\n -------\n artifact: Artifact\n An instance of the artifact.\n \"\"\"\n with open(path, 'rb') as f:\n artifact = cls.read_bytes(f, metadata, **kwargs)\n return artifact\n\n @classmethod\n def read_bytes(cls, file_, metadata=None, **kwargs):\n \"\"\" Reads the artifact from a binary file.\n\n Parameters\n ----------\n file_: file-like object\n A file-like object from which the artifact is read, opened in binary mode.\n metadata: dict or None\n The metadata for the artifact. If defined, it takes the place of any metadata\n defined in the artifact itself.\n Typically, this external artifact metadata comes from an artifact manifest. If the\n artifact has been written as a `pond` `VersionedArtifact`, then the two sources of\n metadata are identical.\n kwargs: dict\n Parameters for the reader.\n\n Returns\n -------\n artifact: Artifact\n An instance of the artifact.\n \"\"\"\n artifact = cls._read_bytes(file_, **kwargs)\n if metadata is not None:\n artifact.metadata = metadata\n return artifact\n\n # todo why the kwargs\n def write(self, path, **kwargs):\n \"\"\" Writes the artifact to file.\n\n Parameters\n ----------\n path: str\n Path to which the artifact is written.\n kwargs: dict\n Parameters for the writer.\n\n \"\"\"\n with open(path, 'wb') as f:\n self.write_bytes(f, **kwargs)\n\n # --- Abstract interface\n\n @staticmethod\n @abstractmethod\n def filename(basename):\n \"\"\" Complete a base filename with an extension.\n\n Parameters\n ----------\n basename: str\n The filename without extension.\n\n Returns\n -------\n filename: str\n The completed filename.\n\n \"\"\"\n pass\n\n @classmethod\n @abstractmethod\n def _read_bytes(cls, file_, **kwargs):\n \"\"\" Reads the artifact from a binary file.\n\n This is a private method that loads the artifact from a binary file without dealing with\n the logic of the external metadata. It is called by `Artifact.read_bytes`.\n\n Parameters\n ----------\n file_: file-like object\n A file-like object from which the artifact is read, opened in binary mode.\n kwargs: dict\n Parameters for the reader.\n\n Returns\n -------\n artifact: Artifact\n An instance of the artifact.\n \"\"\"\n pass\n\n @abstractmethod\n def write_bytes(self, file_, **kwargs):\n \"\"\" Writes the artifact to binary file.\n\n This method also need to take care of writing the artifact metadata in the file itself,\n whenever possible.\n If the artifact is being written as a `pond` `VersionedArtifact`, then the metadata is also\n stored in an external manifest.\n\n Parameters\n ----------\n file_: file-like object\n A file-like object to which the artifact is written, opened in binary mode.\n kwargs: dict\n Parameters for the writer.\n\n \"\"\"\n pass\n\n def get_artifact_metadata(self):\n \"\"\"\n This is not the user metadata!\n\n Returns\n -------\n\n \"\"\"\n return None"
},
{
"identifier": "ArtifactRegistry",
"path": "pond/artifact/artifact_registry.py",
"snippet": "class ArtifactRegistry:\n def __init__(self):\n def register(self, artifact_class, data_class, format=None):\n def get_available_artifacts(self, data_class):\n def get_artifact(self, data_class, format=None):"
},
{
"identifier": "DataType",
"path": "pond/conventions.py",
"snippet": "class WriteMode(str, Enum):\n OVERWRITE = 'overwrite'\n ERROR_IF_EXISTS = 'errorifexists'\nMANIFEST_FILENAME = 'manifest.yml'\nMETADATA_DIRNAME = '_pond'\nTXT_ENCODING = 'utf-8'\nVERSIONS_LOCK_FILENAME = '_VERSIONS_LOCK'\ndef urijoinpath(*parts: str) -> str:\ndef versioned_artifact_location(location: str, artifact_name: str):\ndef version_location(location: str, version_name: VersionName) -> str:\ndef versions_lock_file_location(location: str) -> str:\ndef version_data_location(version_location: str, data_filename: str) -> str:\ndef version_manifest_location(version_location: str) -> str:\ndef version_uri(datastore_id: str, location: str, artifact_name: str, version_name: VersionName):"
},
{
"identifier": "MetadataSource",
"path": "pond/metadata/metadata_source.py",
"snippet": "class MetadataSource:\n \"\"\" Represents a source of metadata.\n\n The metadata is collected using the `collect` method. Note that two calls to `collect` can\n return different values, as the metadata could be collected on the fly, as in the case of a\n time stamp, a git SHA, or other.\n\n Metadata keys and values must both be strings.\n \"\"\"\n\n @abstractmethod\n def section_name(self) -> str:\n \"\"\" Name of the section in the manifest corresponding to this metadata. \"\"\"\n return ''\n\n @abstractmethod\n def collect(self) -> dict[str, str]:\n \"\"\" Collect all the metadata in a dictionary.\n\n Keys and values must both be strings.\n \"\"\"\n return {}"
},
{
"identifier": "DictMetadataSource",
"path": "pond/metadata/dict.py",
"snippet": "class DictMetadataSource(MetadataSource):\n\n def __init__(self, name: str, metadata: dict[str, Any]):\n \"\"\" A dictionary used as source of metadata.\n\n Parameters\n ----------\n name: str\n The name of the section represented by this metadata source.\n metadata: dict[str, Any]\n The dictionary of metadata. Values will be converted to string.\n \"\"\"\n self.name = name\n self.metadata = metadata\n\n def section_name(self) -> str:\n return self.name\n\n def collect(self) -> dict[str, str]:\n return {k: str(v) for k,v in self.metadata.items()}"
},
{
"identifier": "Manifest",
"path": "pond/metadata/manifest.py",
"snippet": "class Manifest:\n\n # --- Manifest class interface\n\n def __init__(self):\n self._sections = {}\n\n @classmethod\n def from_yaml(cls, manifest_location, datastore):\n \"\"\"\n\n Parameters\n ----------\n manifest_location\n datastore\n\n Returns\n -------\n\n \"\"\"\n manifest_dict = datastore.read_yaml(manifest_location)\n return cls.from_nested_dict(manifest_dict)\n\n @classmethod\n def from_nested_dict(cls, manifest_dict: dict):\n manifest = cls()\n for section_name, metadata in manifest_dict.items():\n # TODO make this a FrozendictMetadataSource\n source = DictMetadataSource(name=section_name, metadata=metadata)\n manifest.add_section(source)\n return manifest\n\n # --- Manifest public interface\n\n def to_yaml(self, manifest_location, datastore):\n metadata = self.collect()\n datastore.write_yaml(manifest_location, metadata)\n\n def add_section(self, metadata_source):\n \"\"\"\n\n Parameters\n ----------\n metadata_source\n If None, nothing is added but no exception is raised.\n\n Returns\n -------\n\n \"\"\"\n if metadata_source is not None:\n self._sections[metadata_source.section_name()] = metadata_source\n\n def collect_section(self, name, default_metadata=None):\n source = self._sections.get(name, None)\n if source is None:\n metadata = default_metadata\n else:\n metadata = source.collect()\n return metadata\n\n def collect(self):\n dict_ = {}\n for name, source in self._sections.items():\n source_metadata = {k: str(v) for k, v in source.collect().items()}\n dict_[name] = source_metadata\n return dict_"
},
{
"identifier": "Datastore",
"path": "pond/storage/datastore.py",
"snippet": "class Datastore(ABC):\n \"\"\" Versioned storage for the artifacts.\n\n Parameters\n ----------\n id: str\n Unique identifier for the datastore. This is used in the URI for each versioned\n artifact to uniquely identify the artifact.\n \"\"\"\n\n # -- Datastore class interface\n\n def __init__(self, id: str):\n self.id = id\n\n # -- Abstract interface\n\n @abstractmethod\n def open(self, path: str, mode: str) -> IO[Any]:\n \"\"\" Open a file-like object\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n mode: str\n Specifies the mode in which the file is opened.\n\n Returns\n -------\n IO[Any]\n An open file-like object.\n\n \"\"\"\n pass\n\n @abstractmethod\n def read(self, path: str) -> bytes:\n \"\"\" Read a sequence of bytes from the data store.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n bytes\n The sequence of bytes read from `path`.\n\n Raises\n ------\n FileNotFoundError\n If the requested path does not exist.\n \"\"\"\n pass\n\n @abstractmethod\n def write(self, path: str, data: bytes) -> None:\n \"\"\" Write a sequence of bytes to the data store.\n\n `path` contains the path relative to the root of the data store, including the name\n of the file to be created. If a file already exists at `path`, it is overwritten.\n\n Intermediate directories that do not exist will be created.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n data: bytes\n Sequence of bytes to write at `path`.\n \"\"\"\n pass\n\n @abstractmethod\n def exists(self, path: str) -> bool:\n \"\"\" Returns True if the file exists.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n bool\n True if the file exists, false otherwise\n \"\"\"\n ...\n\n @abstractmethod\n def delete(self, path: str, recursive: bool = False) -> None:\n \"\"\"Deletes a file or directory\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n recursive: bool, optional, default is False\n Whether to recursively delete the location\n \"\"\"\n ...\n\n @abstractmethod\n def makedirs(self, path: str) -> None:\n \"\"\" Creates the specified directory if needed.\n\n If the directories already exist, the method does not do anything.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n \"\"\"\n ...\n\n # -- Read/write utility methods\n\n def read_string(self, path: str) -> str:\n \"\"\" Read a string from a file.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n str\n The read string\n\n Raises\n ------\n FileNotFound\n If the file cannot be found\n \"\"\"\n return self.read(path).decode(TXT_ENCODING)\n\n def write_string(self, path: str, content: str) -> None:\n \"\"\" Write a string to a file.\n\n Intermediate directories that do not exist will be created.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n content: str\n Content to write\n \"\"\"\n self.write(path, content.encode(TXT_ENCODING))\n\n def read_yaml(self, path: str) -> Any:\n \"\"\" Read and parse a YAML file.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n Any\n The parsed object\n\n Raises\n ------\n FileNotFound\n If the file cannot be found\n \"\"\"\n return yaml_load(self.read_string(path))\n\n def write_yaml(self, path: str, content: Any) -> None:\n \"\"\" Serializes to YAML and write an object to a file.\n\n Intermediate directories that do not exist will be created.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n content: Any\n Content to write\n \"\"\"\n return self.write_string(path, yaml_dump(content))\n\n def read_json(self, path: str) -> Any:\n \"\"\" Read and parse a JSON file.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n Any\n The parsed object\n\n Raises\n ------\n FileNotFound\n If the file cannot be found\n \"\"\"\n return json.loads(self.read_string(path))\n\n def write_json(self, path: str, content: Any) -> None:\n \"\"\"Serializes to JSON and write an object to a file\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n content: Any\n Content to write\n \"\"\"\n return self.write_string(path, json.dumps(content, separators=(',', ':')))"
},
{
"identifier": "Version",
"path": "pond/version.py",
"snippet": "class Version:\n\n def __init__(self, artifact_name: str, version_name: VersionName, artifact: Artifact,\n manifest: Optional[Manifest] = None):\n \"\"\" Manages a version: its manifest, name, and artifact.\n \"\"\"\n self.artifact_name = artifact_name\n self.version_name = version_name\n self.manifest = manifest\n self.artifact = artifact\n\n def get_metadata(self, location, datastore, data_filename):\n version_metadata = {\n 'uri': self.get_uri(location, datastore),\n 'filename': data_filename,\n 'date_time': datetime.datetime.now(),\n 'artifact_name': self.artifact_name,\n }\n version_metadata_source = DictMetadataSource(name='version', metadata=version_metadata)\n return version_metadata_source\n\n def write(self, location: str, datastore: Datastore, manifest: Manifest):\n # TODO: manifest is modified in-place, is that an issue?\n\n #: location of the version folder\n version_location_ = version_location(location, self.version_name)\n #: location of the manifest file\n manifest_location = version_manifest_location(version_location_)\n\n #: filename for the saved data\n data_basename = f'{self.artifact_name}_{str(self.version_name)}'\n data_filename = self.artifact.filename(data_basename)\n\n version_metadata_source = self.get_metadata(location, datastore, data_filename)\n manifest.add_section(version_metadata_source)\n artifact_metadata_source = self.artifact.get_artifact_metadata()\n manifest.add_section(artifact_metadata_source)\n manifest.to_yaml(manifest_location, datastore)\n\n datastore.makedirs(version_location_)\n data_location = version_data_location(version_location_, data_filename)\n with datastore.open(data_location, 'wb') as f:\n self.artifact.write_bytes(f)\n\n # save stored manifest\n self.manifest = manifest\n\n # todo store and recover artifact_class from manifest\n @classmethod\n def read(cls, version_name, artifact_class, location, datastore):\n #: location of the version folder\n version_location_ = version_location(location, version_name)\n #: location of the manifest file\n manifest_location = version_manifest_location(version_location_)\n\n if not datastore.exists(manifest_location):\n raise VersionDoesNotExist(location, str(version_name))\n manifest = Manifest.from_yaml(manifest_location, datastore)\n\n version_metadata = manifest.collect_section('version')\n data_filename = version_metadata['filename']\n data_location = version_data_location(version_location_, data_filename)\n user_metadata = manifest.collect_section('user')\n with datastore.open(data_location, 'rb') as f:\n artifact = artifact_class.read_bytes(f, metadata=user_metadata)\n\n version = cls(\n artifact_name=version_metadata['artifact_name'],\n version_name=version_name,\n artifact=artifact,\n manifest=manifest,\n )\n\n return version\n\n def get_uri(self, location, datastore):\n \"\"\" Build URI for a specific location and datastore. \"\"\"\n uri = version_uri(datastore.id, location, self.artifact_name, self.version_name)\n return uri\n\n def exists(self, location: str, datastore: Datastore):\n \"\"\" Does this version already exists on disk?\n\n Parameters\n ----------\n location: str\n Root location in the data store where artifacts are read/written. This is used to\n create folder-like groups inside a datastore. This can be, for instance, the name of\n a project or experiment.\n datastore: Datastore\n Data store object, representing the location where the artifacts are read/written.\n \"\"\"\n #: location of the version folder\n version_location_ = version_location(location, self.version_name)\n #: location of the manifest file\n manifest_location = version_manifest_location(version_location_)\n\n return datastore.exists(manifest_location)"
},
{
"identifier": "SimpleVersionName",
"path": "pond/version_name.py",
"snippet": "class SimpleVersionName(VersionName):\n \"\"\"Simple version name are just an integer number (greater than 0) prefixed with \"v\" when\n rendered as string.\"\"\"\n\n _FORMAT = re.compile('^v?([1-9][0-9]*)$')\n\n # --- VersionName class interface\n\n @classmethod\n def from_string(cls, version_name: str) -> 'SimpleVersionName':\n match = SimpleVersionName._FORMAT.match(version_name)\n if not match:\n raise InvalidVersionName(version_name)\n return cls(int(match[1]))\n\n @classmethod\n def next(cls, prev: Optional['VersionName'] = None) -> VersionName:\n if prev is None:\n next_ = SimpleVersionName(1)\n elif not isinstance(prev, SimpleVersionName):\n raise IncompatibleVersionName(prev, SimpleVersionName)\n else:\n next_ = SimpleVersionName(prev.version_number + 1)\n return next_\n\n def __init__(self, version_number: int):\n self.version_number = version_number\n\n # -- VersionName protected interface\n\n def _partial_compare(self, other: VersionName) -> Optional[int]:\n if isinstance(other, SimpleVersionName):\n return 0 if self.version_number == other.version_number else (\n -1 if self.version_number < other.version_number else 1)\n return None\n\n # -- Magic methods\n\n def __hash__(self) -> int:\n return hash(self.version_number)\n\n def __str__(self) -> str:\n return f'v{self.version_number}'"
},
{
"identifier": "VersionName",
"path": "pond/version_name.py",
"snippet": "class VersionName(ABC):\n \"\"\" Base class for all kind of version naming conventions.\n\n It defines a way to sort version names and compute the next one.\n \"\"\"\n\n # --- VersionName class interface\n\n @classmethod\n def class_id(cls):\n \"\"\" String ID to be able to find this class from its name. \"\"\"\n return cls.__name__\n\n @classmethod\n def subclass_from_id(cls, class_id: str) -> Type['Artifact']:\n \"\"\" Find a subclass from its class ID. \"\"\"\n subclasses = cls.__subclasses__()\n for subclass in subclasses:\n if subclass.class_id() == class_id:\n break\n else:\n raise InvalidVersionName(class_id)\n return subclass\n\n @classmethod\n def from_string(cls, version_name: str) -> 'VersionName':\n \"\"\"Parses a string into a version name.\n\n Parameters\n ----------\n version_name: str\n Version name as a string that needs to be parsed\n\n Returns\n -------\n VersionName\n The parsed version name\n\n Raises\n ------\n InvalidVersionName\n If the version name cannot be parsed\n \"\"\"\n # Only first-level subclasses for the moment, it should be sufficient\n # At the same time, we give up defining a version name priority, and will return the\n # first VersionName subclass that can parse the string\n # TODO: remove the magic\n subclasses = cls.__subclasses__()\n for subclass in subclasses:\n try:\n version = subclass.from_string(version_name)\n break\n except InvalidVersionName:\n pass\n else:\n raise InvalidVersionName(version_name)\n return version\n\n @classmethod\n @abstractmethod\n def next(cls, prev: 'VersionName') -> 'VersionName':\n \"\"\" Generate a new version name given a previous one.\n\n If `prev` is None, this method will generate a first version name.\n\n Some subclasses of `VersionName` will ignore the argument `prev`, except in case of\n collision (e.g., datetime version names).\n\n Parameters\n ----------\n prev: Optional['VersionName']\n The previous version name.\n\n Returns\n -------\n VersionName\n A new version name.\n \"\"\"\n ...\n\n @classmethod\n def first(cls) -> 'VersionName':\n \"\"\" Generate the first version name.\n\n Alias for `VersionName.next(None)`.\n\n Returns\n -------\n VersionName\n The first version name.\n \"\"\"\n return cls.next(prev=None)\n\n # --- VersionName protected interface\n\n @abstractmethod\n def _partial_compare(self, that: 'VersionName') -> Optional[int]:\n ...\n\n # --- Magic methods\n\n def __cmp__(self, other: 'VersionName') -> int:\n cmp = self._partial_compare(other)\n return cmp if cmp is not None else _compare_classnames(self, other)\n\n def __eq__(self, other: Any) -> bool:\n return self._partial_compare(other) == 0\n\n def __ne__(self, other: Any) -> bool:\n return self._partial_compare(other) != 0\n\n def __lt__(self, other: Any) -> bool:\n return self.__cmp__(other) < 0\n\n def __le__(self, other: Any) -> bool:\n return self.__cmp__(other) <= 0\n\n def __gt__(self, other: Any) -> bool:\n return self.__cmp__(other) > 0\n\n def __ge__(self, other: Any) -> bool:\n return self.__cmp__(other) >= 0\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(\"{str(self)}\")'"
},
{
"identifier": "VersionedArtifact",
"path": "pond/versioned_artifact.py",
"snippet": "class VersionedArtifact:\n\n def __init__(self,\n artifact_name: str,\n location: str,\n datastore: Datastore,\n artifact_class: Type[Artifact],\n version_name_class: Type[VersionName]):\n \"\"\" An artifact versioned and stored on disk.\n\n `VersionedArtifact` manages the versioning, data, and metadata, of an artifact.\n\n Parameters\n ----------\n artifact_name: str\n Name of the artifact.\n location: str\n Root location in the data store where artifacts are read/written. This is used to\n create folder-like groups inside a datastore. This can be, for instance, the name of\n a project or experiment.\n datastore: Datastore\n Data store object, representing the storage where the artifacts are read/written.\n artifact_class: Type[Artifact]\n version_name_class: Type[VersionName]\n Class used to create increasing version names. The default value,\n `SimpleVersionName` creates version names as `v1`, `v2`, etc.\n \"\"\"\n self.artifact_name = artifact_name\n self.location = location\n self.datastore = datastore\n self.artifact_class = artifact_class\n self.version_name_class = version_name_class\n\n self.versions_manifest = {\n 'artifact_class': artifact_class.class_id(),\n 'version_name_class': version_name_class.class_id(),\n }\n\n self.versions_location = versioned_artifact_location(location, artifact_name)\n # todo this goes to conventions.py\n self.versions_list_location = f'{self.versions_location}/versions.json'\n self.versions_manifest_location = f'{self.versions_location}/manifest.yml'\n\n if not self.datastore.exists(self.versions_location):\n # Create the versioned artifact folder organization if it does not exist\n self.datastore.makedirs(self.versions_location)\n self._write_version_names([])\n self.versions_manifest['artifact_class'] = artifact_class.class_id()\n self.versions_manifest['version_name_class'] = version_name_class.class_id()\n self._write_manifest()\n\n # --- VersionedArtifact class interface\n\n @classmethod\n def from_datastore(cls, artifact_name: str, location: str, datastore: Datastore):\n versions_location = versioned_artifact_location(location, artifact_name)\n versions_manifest_location = f'{versions_location}/manifest.yml'\n versions_manifest = datastore.read_yaml(versions_manifest_location)\n\n artifact_class_id = versions_manifest['artifact_class']\n artifact_class = Artifact.subclass_from_id(artifact_class_id)\n version_name_class_id = versions_manifest['version_name_class']\n version_name_class = VersionName.subclass_from_id(version_name_class_id)\n\n versioned_artifact = cls(\n artifact_name=artifact_name,\n location=location,\n datastore=datastore,\n artifact_class=artifact_class,\n version_name_class=version_name_class,\n )\n return versioned_artifact\n\n # --- VersionedArtifact public interface\n\n def read(self, version_name: Optional[Union[str, VersionName]] = None) -> Version:\n \"\"\" Read a version of the artifact.\n\n Parameters\n ----------\n version_name: Union[str, VersionName], optional\n Version name, given as a string (more common) or as VersionName instance. If None,\n the latest version name for the given artifact is used.\n\n Raises\n ------\n VersionDoesNotExist\n If the requested version does not exist.\n\n Returns\n -------\n Version\n The version object read from storage.\n \"\"\"\n\n if version_name is not None:\n if isinstance(version_name, str):\n version_name = self.version_name_class.from_string(version_name)\n else:\n version_name = self.latest_version_name()\n\n version = Version.read(\n version_name=version_name,\n artifact_class=self.artifact_class,\n datastore=self.datastore,\n location=self.versions_location,\n )\n\n return version\n\n def write(self,\n data: DataType,\n manifest: Manifest,\n version_name: Optional[Union[str, VersionName]] = None,\n write_mode: WriteMode = WriteMode.ERROR_IF_EXISTS):\n \"\"\" Write some data to storage.\n\n Parameters\n ----------\n data: DataType\n The artifact data to write.\n manifest: Manifest\n Metadata to store with the data.\n version_name: Union[str, VersionName], optional\n Version name, given as a string (more common) or as VersionName instance. If None,\n the latest version name for the given artifact is used.\n write_mode: WriteMode\n Write mode, either WriteMode.ERROR_IF_EXISTS or WriteMode.OVERWRITE.\n\n Raises\n ------\n IncompatibleVersionName\n If the provided version name does not correspond to the version name class used in\n this versioned artifact.\n VersionAlreadyExists\n If the provided version name exists, and the write mode is \"ERROR_IF_EXISTS\".\n\n Returns\n -------\n Version\n The version object read from storage.\n \"\"\"\n # todo lock\n\n if version_name is None:\n prev_version_name = self.latest_version_name(raise_if_none=False)\n version_name = self.version_name_class.next(prev_version_name)\n\n if isinstance(version_name, str):\n version_name = VersionName.from_string(version_name)\n\n if not isinstance(version_name, self.version_name_class):\n raise IncompatibleVersionName(\n version_name=version_name,\n version_name_class=self.version_name_class,\n )\n\n user_metadata = manifest.collect_section('user', default_metadata={})\n artifact = self.artifact_class(data, metadata=user_metadata)\n version = Version(self.artifact_name, version_name, artifact)\n\n if version.exists(self.versions_location, self.datastore):\n if write_mode == WriteMode.ERROR_IF_EXISTS:\n uri = version.get_uri(self.location, self.datastore)\n raise VersionAlreadyExists(uri)\n elif write_mode == WriteMode.OVERWRITE:\n uri = version.get_uri(self.location, self.datastore)\n logger.info(f\"Deleting existing version before overwriting: {uri}\")\n version_location_ = version_location(self.versions_location, version_name)\n self.datastore.delete(version_location_, recursive=True)\n\n version.write(self.versions_location, self.datastore, manifest)\n self._register_version_name(version_name)\n\n return version\n\n def all_version_names(self) -> List[VersionName]:\n \"\"\"Get all locked (and existing) artifact version names.\n\n Locked versions might not be existing yet, they are just reserved names.\n\n Returns\n -------\n List[VersionName]\n A list of all locked version names\n \"\"\"\n try:\n raw_versions = json.loads(self.datastore.read(self.versions_list_location))\n except FileNotFoundError:\n raw_versions = []\n versions = [VersionName.from_string(raw_version) for raw_version in list(raw_versions)]\n return sorted(versions)\n\n def version_names(self) -> List[VersionName]:\n \"\"\"Get all existing artifact version names.\n\n Versions are considered as \"existing\" as soon as they have a \"manifest.yml\"\n\n Returns\n -------\n List[VersionName]\n A list of all existing version names\n \"\"\"\n # todo create version_exists\n return [\n name for name in self.all_version_names()\n if self.datastore.exists(\n version_manifest_location(\n version_location(self.versions_location, name)\n )\n )\n ]\n\n def latest_version_name(self, raise_if_none=True) -> VersionName:\n \"\"\"Get the name of the latest version. If none is defined, will raise an exception\n\n Raises\n ------\n ArtifactHasNoVersion\n If the artifact has no latest version\n\n Returns\n -------\n VersionName\n The name of the latest version\n \"\"\"\n versions = self.version_names()\n if not versions:\n if raise_if_none:\n raise ArtifactHasNoVersion(self.location)\n else:\n return None\n return versions[-1]\n\n def latest_version(self) -> Version:\n \"\"\"Get the latest version. If none is defined, will raise an exception\n\n Raises\n ------\n TableHasNoVersion\n If the artifact has no latest version\n\n Returns\n -------\n Version\n The latest version of this artifact\n \"\"\"\n return self.read(self.latest_version_name())\n\n # TODO: TEST\n def delete_version(self, version_name: Union[str, VersionName]) -> None:\n \"\"\"Delete a version, will not fail if the version did not exist\n\n Parameters\n ----------\n version_name: Union[str, VersionName]\n Name of the version to delete\n \"\"\"\n if not isinstance(version_name, VersionName):\n version_name = VersionName.from_string(version_name)\n\n self.datastore.delete(version_location(self.location, version_name), recursive=True)\n\n # todo: need to lock versions.json here\n names = self.all_version_names()\n if version_name in names:\n names.remove(version_name)\n self._write_version_names(names)\n # todo: need to unlock versions.json here\n\n # --- VersionedArtifact private interface\n\n def _create_version_name(self, retry: bool = True) -> VersionName:\n versions_lock_file = versions_lock_file_location(self.location)\n if self.datastore.exists(versions_lock_file):\n # In case another process just created the data dir and did non update yet the versions\n # list, let's wait a little and retry once\n if retry:\n time.sleep(NEW_VERSION_WAIT_MS / 1000)\n return self._create_version_name(False)\n else:\n raise ArtifactVersionsIsLocked(self.location)\n # todo: this is not safe in case of concurrency.\n self.datastore.write_string(versions_lock_file, '')\n try:\n names = self.all_version_names()\n name = names[-1].next() if names else FIRST_VERSION_NAME\n new_version_name = self._register_version_name(name)\n finally:\n self.datastore.delete(versions_lock_file)\n\n return new_version_name\n\n def _register_version_name(self, name: VersionName) -> VersionName:\n # todo: need to lock versions.json here\n names = self.all_version_names()\n\n if name not in names:\n names.append(name)\n self._write_version_names(names)\n # todo: need to unlock versions.json here\n\n return name\n\n def _write_version_names(self, names: List[VersionName]) -> None:\n \"\"\"Sort, serialize and write version names\"\"\"\n strings = [str(name) for name in sorted(names)]\n self.datastore.write_json(self.versions_list_location, strings)\n\n def _write_manifest(self):\n self.datastore.write_yaml(self.versions_manifest_location, self.versions_manifest)\n\n def _read_manifest(self):\n return self.datastore.read_yaml(self.versions_manifest_location)"
}
] | from typing import Any, Dict, Optional, Set, Type, Union
from pond.artifact import Artifact
from pond.artifact.artifact_registry import ArtifactRegistry, global_artifact_registry
from pond.conventions import DataType, WriteMode
from pond.metadata.metadata_source import MetadataSource
from pond.metadata.dict import DictMetadataSource
from pond.metadata.manifest import Manifest
from pond.storage.datastore import Datastore
from pond.version import Version
from pond.version_name import SimpleVersionName, VersionName
from pond.versioned_artifact import VersionedArtifact | 9,823 | create folder-like groups inside a datastore. This can be, for instance, the name of
a project or experiment.
datastore: Datastore
Data store object, representing the storage where the artifacts are read/written.
author: str
Author name/identifier, used as metadata. Default is 'NA'.
version_name_class: VersionName
Class used to create increasing version names. The default value,
`SimpleVersionName` creates version names as `v1`, `v2`, etc.
artifact_registry: ArtifactRegistry
Registry object mapping data types and file formats to an artifact class able to
read/write them. The artifact classes distributed with `pond` register automatically
to the default value, `global_artifact_registry`.
"""
self.source = source
self.location = location
self.datastore = datastore
self.author = author
self.version_name_class = version_name_class
self.artifact_registry = artifact_registry
# History of all read versions, will be used as default
# "inputs" for written tables. Feel free to empty it whenever needed.
self.read_history: Set[str] = set()
# Dict[TableRef, List[Version]]: History of all written versions
self.write_history: Set[str] = set()
def read_version(self,
name: str,
version_name: Optional[Union[str, VersionName]] = None) -> Version:
""" Read a version, given its name and version name.
If no version name is specified, the latest version is read.
Parameters
----------
name: str
Artifact name
version_name: str or VersionName
Version name, given as a string (more common) or as VersionName instance. If None,
the latest version name for the given artifact is used.
Return
------
version: Version
The loaded Version object.
See Also
--------
`read_artifact` -- Read an Artifact object, including artifact data and metadata
`read` -- Read the data in an artifact
"""
versioned_artifact = VersionedArtifact.from_datastore(
artifact_name=name,
location=self.location,
datastore=self.datastore,
)
version = versioned_artifact.read(version_name=version_name)
version_id = version.get_uri(self.location, self.datastore)
self.read_history.add(version_id)
return version
def read_artifact(self,
name: str,
version_name: Optional[Union[str, VersionName]] = None) -> Any:
""" Read an artifact given its name and version name.
If no version name is specified, the latest version is read.
Parameters
----------
name: str
Artifact name
version_name: str or VersionName
Version name, given as a string (more common) or as VersionName instance. If None,
the latest version name for the given artifact is used.
Return
------
artifact: Artifact
The loaded artifact
See Also
--------
`read` -- Read the data in an artifact
`read_version` -- Read a Version object, including the artifact object and version manifest
"""
version = self.read_version(name, version_name)
return version.artifact
def read(self,
name: str,
version_name: Optional[Union[str, VersionName]] = None) -> Any:
""" Read some data given its name and version name.
If no version name is specified, the latest version is read.
Parameters
----------
name: str
Artifact name
version_name: str or VersionName
Version name, given as a string (more common) or as VersionName instance. If None,
the latest version name for the given artifact is used.
Return
------
data: Any
The loaded data. The metadata is discarded.
See Also
--------
`read_artifact` -- Read an Artifact object, including artifact data and metadata
`read_version` -- Read a Version object, including the artifact object and version manifest
"""
artifact = self.read_artifact(name, version_name)
return artifact.data
# TODO version name is a string vs is a VersionName instance
def write(self,
|
class Activity:
# TODO: can location have subpaths? e.g. `experiment1/test22`
def __init__(self,
source: str,
location: str,
datastore: Datastore,
author: str='NA',
version_name_class: Type[VersionName] = SimpleVersionName,
artifact_registry: ArtifactRegistry = global_artifact_registry):
""" Read and write artifacts with lineage and metadata.
Activity is the main user-facing interface for pond. Most of the usages of `pond` only
ever interact with an instance of this object.
Parameters
----------
source: str
String defining where the read/write operations are made. Often, this is the path of
a file or notebook, used for lineage tracing.
location: str
Root location in the data store where artifacts are read/written. This is used to
create folder-like groups inside a datastore. This can be, for instance, the name of
a project or experiment.
datastore: Datastore
Data store object, representing the storage where the artifacts are read/written.
author: str
Author name/identifier, used as metadata. Default is 'NA'.
version_name_class: VersionName
Class used to create increasing version names. The default value,
`SimpleVersionName` creates version names as `v1`, `v2`, etc.
artifact_registry: ArtifactRegistry
Registry object mapping data types and file formats to an artifact class able to
read/write them. The artifact classes distributed with `pond` register automatically
to the default value, `global_artifact_registry`.
"""
self.source = source
self.location = location
self.datastore = datastore
self.author = author
self.version_name_class = version_name_class
self.artifact_registry = artifact_registry
# History of all read versions, will be used as default
# "inputs" for written tables. Feel free to empty it whenever needed.
self.read_history: Set[str] = set()
# Dict[TableRef, List[Version]]: History of all written versions
self.write_history: Set[str] = set()
def read_version(self,
name: str,
version_name: Optional[Union[str, VersionName]] = None) -> Version:
""" Read a version, given its name and version name.
If no version name is specified, the latest version is read.
Parameters
----------
name: str
Artifact name
version_name: str or VersionName
Version name, given as a string (more common) or as VersionName instance. If None,
the latest version name for the given artifact is used.
Return
------
version: Version
The loaded Version object.
See Also
--------
`read_artifact` -- Read an Artifact object, including artifact data and metadata
`read` -- Read the data in an artifact
"""
versioned_artifact = VersionedArtifact.from_datastore(
artifact_name=name,
location=self.location,
datastore=self.datastore,
)
version = versioned_artifact.read(version_name=version_name)
version_id = version.get_uri(self.location, self.datastore)
self.read_history.add(version_id)
return version
def read_artifact(self,
name: str,
version_name: Optional[Union[str, VersionName]] = None) -> Any:
""" Read an artifact given its name and version name.
If no version name is specified, the latest version is read.
Parameters
----------
name: str
Artifact name
version_name: str or VersionName
Version name, given as a string (more common) or as VersionName instance. If None,
the latest version name for the given artifact is used.
Return
------
artifact: Artifact
The loaded artifact
See Also
--------
`read` -- Read the data in an artifact
`read_version` -- Read a Version object, including the artifact object and version manifest
"""
version = self.read_version(name, version_name)
return version.artifact
def read(self,
name: str,
version_name: Optional[Union[str, VersionName]] = None) -> Any:
""" Read some data given its name and version name.
If no version name is specified, the latest version is read.
Parameters
----------
name: str
Artifact name
version_name: str or VersionName
Version name, given as a string (more common) or as VersionName instance. If None,
the latest version name for the given artifact is used.
Return
------
data: Any
The loaded data. The metadata is discarded.
See Also
--------
`read_artifact` -- Read an Artifact object, including artifact data and metadata
`read_version` -- Read a Version object, including the artifact object and version manifest
"""
artifact = self.read_artifact(name, version_name)
return artifact.data
# TODO version name is a string vs is a VersionName instance
def write(self, | data: DataType, | 2 | 2023-12-24 13:05:58+00:00 | 12k |
demirogun/pyethnobiology | pyethnobiology/pyethnobiology.py | [
{
"identifier": "UR",
"path": "pyethnobiology/indices.py",
"snippet": "class UR:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs.\n taxon_column (str, optional): Name of the column containing species names.\n use_column (str, optional): Name of the column containing plant uses.\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Use Report (UR) per Species\"\n\n def calculate(self):\n \"\"\"\n Calculates the UR for each species.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and UR columns.\n \"\"\"\n\n ur_df = (\n self.data.groupby(self.taxon_column, observed=True)\n .size()\n .reset_index(name=\"UR\")\n .sort_values(by=\"UR\", ascending=False)\n .reset_index(drop=True)\n )\n return ur_df\n\n def save_data(self):\n UR_df = self.calculate()\n UR_df.to_csv(\"use_report_UR.csv\", index=False)\n print(\"Saved to use_report_UR.csv\")\n\n def plot_radial(self, filename=\"UR.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"UR\", num_row, ytick_position, colors, show_colorbar,\n self.informant_column, self.taxon_column, self.use_column)\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "CI",
"path": "pyethnobiology/indices.py",
"snippet": "class CI:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs. Defaults to \"informant\".\n taxon_column (str, optional): Name of the column containing species names. Defaults to \"taxon\".\n use_column (str, optional): Name of the column containing plant uses. Defaults to \"ailments_treated\".\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Cultural Importance (CI) Index\"\n\n def calculate(self):\n \"\"\"\n Calculates the cultural importance index (CI) for each species.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and CI columns.\n \"\"\"\n\n # Calculate Use Reports (UR) per species\n ur_df = UR(self.data, self.informant_column, self.taxon_column, self.use_column).calculate()\n\n # Count unique informants\n informants_count = self.data[self.informant_column].nunique()\n\n # Merge UR and informants count based on 'taxon'\n ci_df = pd.merge(\n ur_df,\n self.data[[self.taxon_column, self.informant_column]]\n .drop_duplicates()\n .groupby(self.taxon_column, observed=False)\n .size()\n .reset_index(name=f\"{self.informant_column}s_count\"),\n on=self.taxon_column,\n )\n\n # Calculate CI index (UR divided by the number of informants)\n ci_df[\"CI\"] = ci_df[\"UR\"] / informants_count\n\n # Keep only relevant columns\n ci_df = ci_df[[self.taxon_column, \"CI\"]]\n\n return ci_df\n\n def save_data(self):\n CI_df = self.calculate()\n CI_df.to_csv(\"cultural_importance_CI.csv\", index=False)\n print(\"Saved to cultural_importance_CI.csv\")\n\n def plot_radial(self, filename=\"CI.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"CI\", num_row, ytick_position,\n colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column)\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "FC",
"path": "pyethnobiology/indices.py",
"snippet": "class FC:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs. Defaults to \"informant\".\n taxon_column (str, optional): Name of the column containing species names. Defaults to \"taxon\".\n use_column (str, optional): Name of the column containing plant uses. Defaults to \"ailments_treated\".\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n\n def calculate(self):\n \"\"\"\n Calculates the frequency of citation (FC) for each species.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and FC columns.\n \"\"\"\n\n # Calculate FC per species by counting unique informants for each taxon\n fc_df = (\n self.data.groupby(self.taxon_column, observed=True)[self.informant_column]\n .nunique()\n .reset_index(name=\"FC\")\n )\n\n # Sort FC values in descending order\n fc_df = fc_df.sort_values(by=\"FC\", ascending=False).reset_index(drop=True)\n\n return fc_df\n\n def save_data(self):\n FC_df = self.calculate()\n FC_df.to_csv(\"frequency_of_citation_FC.csv\", index=False)\n print(\"Saved to frequency_of_citation_FC.csv\")\n\n def plot_radial(self, filename=\"FC.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), \"Frequency of Citation (FC)\", \"FC\", num_row, ytick_position, colors,\n show_colorbar, self.informant_column, self.taxon_column, self.use_column)\n\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "NU",
"path": "pyethnobiology/indices.py",
"snippet": "class NU:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs.\n taxon_column (str, optional): Name of the column containing species names.\n use_column (str, optional): Name of the column containing plant uses.\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Number of Uses (NU) per Species\"\n\n def calculate(self):\n \"\"\"\n Calculates the NU for each species.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and NU columns.\n \"\"\"\n\n nu_df = (\n self.data.groupby(self.taxon_column, observed=True)[self.use_column]\n .nunique()\n .reset_index(name=\"NU\")\n )\n nu_df = nu_df.sort_values(by=\"NU\", ascending=False).reset_index(drop=True)\n return nu_df\n def save_data(self):\n NU_df = self.calculate()\n NU_df.to_csv(\"number_of_uses_NU.csv\", index=False)\n print(\"Saved to number_of_uses_NU.csv\")\n\n def plot_radial(self, filename=\"NU.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"NU\", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column)\n\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "RFC",
"path": "pyethnobiology/indices.py",
"snippet": "class RFC:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs.\n taxon_column (str, optional): Name of the column containing species names.\n use_column (str, optional): Name of the column containing plant uses.\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Relative Frequency of Citation (RFC) per Species\"\n\n def calculate(self):\n \"\"\"\n Calculates the RFC for each species.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and RFC columns.\n \"\"\"\n\n # Get frequency of citation (FC) for each species\n fc_df = FC(self.data, self.informant_column, self.taxon_column, self.use_column).calculate()\n\n # Get total number of informants\n total_informants = self.data[self.informant_column].nunique()\n\n # Calculate use reports (UR) for each species\n ur_df = (\n self.data[[self.taxon_column, self.informant_column]]\n .groupby(self.taxon_column, observed=True)\n .size()\n .reset_index(name=\"UR\")\n )\n\n # Merge FC, UR, and total informants\n rfc_df = pd.merge(fc_df, ur_df, on=self.taxon_column)\n rfc_df[\"RFC\"] = rfc_df[\"FC\"] / total_informants\n\n # Keep only taxon and RFC columns\n rfc_df = rfc_df[[self.taxon_column, \"RFC\"]]\n return rfc_df\n\n def save_data(self):\n RFC_df = self.calculate()\n RFC_df.to_csv(\"relative_frequency_of_citation_RFC.csv\", index=False)\n print(\"Saved to relative_frequency_of_citation_RFC.csv\")\n\n def plot_radial(self, filename=\"RFC.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"RFC\", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column)\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "RI",
"path": "pyethnobiology/indices.py",
"snippet": "class RI:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs.\n taxon_column (str, optional): Name of the column containing species names.\n use_column (str, optional): Name of the column containing plant uses.\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Relative Importance (RI) Index per Species\"\n\n def calculate(self):\n \"\"\"\n Calculates the RI for each species.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and RI columns.\n \"\"\"\n\n # Get RFC and NU for each species\n rfc_df = RFC(\n self.data, self.informant_column, self.taxon_column, self.use_column\n ).calculate()\n nu_df = NU(\n self.data, self.informant_column, self.taxon_column, self.use_column\n ).calculate()\n\n # Normalize RFC and NU\n max_rfc = rfc_df[\"RFC\"].max()\n max_nu = nu_df[\"NU\"].max()\n rfc_df[\"RFC(max)\"] = rfc_df[\"RFC\"] / max_rfc\n nu_df[\"RNU(max)\"] = nu_df[\"NU\"] / max_nu\n\n # Merge RFC(max) and RNU(max)\n ri_df = pd.merge(\n rfc_df[[self.taxon_column, \"RFC(max)\"]],\n nu_df[[self.taxon_column, \"RNU(max)\"]],\n on=self.taxon_column,\n )\n\n # Calculate RI index\n ri_df[\"RI\"] = (ri_df[\"RFC(max)\"] + ri_df[\"RNU(max)\"]) / 2\n\n # Sort and return RI values\n ri_df = ri_df.sort_values(by=\"RI\", ascending=False).reset_index(drop=True)\n return ri_df[[self.taxon_column, \"RI\"]]\n\n def save_data(self):\n RI_df = self.calculate()\n RI_df.to_csv(\"relative_importance_RI.csv\", index=False)\n print(\"Saved to relative_importance_RI.csv\")\n\n def plot_radial(self, filename=\"RI.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"RI\", num_row, ytick_position, colors, show_colorbar,\n self.informant_column, self.taxon_column, self.use_column)\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "UV",
"path": "pyethnobiology/indices.py",
"snippet": "class UV:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs.\n taxon_column (str, optional): Name of the column containing species names.\n use_column (str, optional): Name of the column containing plant uses.\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Use Value (UV) per Species\"\n\n def calculate(self):\n \"\"\"\n Calculates the UV for each species.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and UV columns.\n \"\"\"\n UV_df = CI(self.data, self.informant_column, self.taxon_column, self.use_column).calculate()\n UV_df = UV_df.rename(columns={\"CI\": \"UV\"})\n return UV_df\n\n def save_data(self):\n UV_df = self.calculate()\n UV_df.to_csv(\"use_value_UV.csv\", index=False)\n print(\"Saved to use_value_UV.csv\")\n\n def plot_radial(self, filename=\"UV.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"UV\", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column)\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "CV",
"path": "pyethnobiology/indices.py",
"snippet": "class CV:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs. Defaults to \"informant\".\n taxon_column (str, optional): Name of the column containing species names. Defaults to \"taxon\".\n use_column (str, optional): Name of the column containing plant uses. Defaults to \"ailments_treated\".\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Cultural Value (CV) for Ethnospecies\"\n\n def calculate(self):\n \"\"\"\n Calculates the cultural value (CV) for each ethnospecies.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and CV columns.\n \"\"\"\n\n # Calculate Use Reports (UR) per species\n ur_df = UR(self.data, self.informant_column, self.taxon_column, self.use_column).calculate()\n\n # Calculate Number of Uses (NU) per species\n nu_df = NU(self.data, self.informant_column, self.taxon_column, self.use_column).calculate()\n\n # Calculate Frequency of Citation (FC) per species\n fc_df = FC(self.data, self.informant_column, self.taxon_column, self.use_column).calculate()\n\n # Calculate Uce (Use Citation for Ethnospecies)\n potential_uses = self.data[self.use_column].nunique()\n nu_df[\"Uce\"] = nu_df[\"NU\"] / potential_uses\n\n # Calculate Ice (Informant Citation Index)\n ice = fc_df[\"FC\"] / self.data[self.informant_column].nunique()\n fc_df[\"Ice\"] = ice\n\n # Calculate IUce (Informant Use Index)\n iuce = ur_df[\"UR\"] / self.data[self.informant_column].nunique()\n ur_df[\"IUce\"] = iuce\n\n # Merge dataframes to calculate CV\n merged_df = pd.merge(nu_df[[self.taxon_column, \"Uce\"]], ur_df[[self.taxon_column, \"IUce\"]], on=self.taxon_column)\n merged_df = pd.merge(merged_df, fc_df[[self.taxon_column, \"Ice\"]], on=self.taxon_column)\n\n # Calculate CV = Uce * Ice * IUce\n merged_df[\"CV\"] = merged_df[\"Uce\"] * merged_df[\"Ice\"] * merged_df[\"IUce\"]\n\n # Sort and round CV values\n cv_df = merged_df[[self.taxon_column, \"CV\"]].sort_values(by=\"CV\", ascending=False)\n\n return cv_df\n\n def save_data(self):\n CV_df = self.calculate()\n CV_df.to_csv(\"cultural_value_CV.csv\", index=False)\n print(\"Saved to cultural_value_CV.csv\")\n\n def plot_radial(self, filename=\"CV.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"CV\", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column)\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "FL",
"path": "pyethnobiology/indices.py",
"snippet": "class FL:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs. Defaults to \"informant\".\n taxon_column (str, optional): Name of the column containing species names. Defaults to \"taxon\".\n use_column (str, optional): Name of the column containing plant uses. Defaults to \"ailments_treated\".\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Fidelity Level (FL) per Species\"\n\n def calculate(self):\n \"\"\"\n Calculates the fidelity level (FL) for each species-use combination.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon, use, and FL columns.\n \"\"\"\n\n # Calculate Frequency of Citation (FC) per species\n fc_df = FC(self.data, self.informant_column, self.taxon_column, self.use_column).calculate()\n\n # Count informants for each species-use combination\n ns_df = (\n self.data.groupby([self.taxon_column, self.use_column])[self.informant_column]\n .nunique()\n .reset_index(name=\"Ns\")\n )\n\n # Merge FC and Ns dataframes\n merged_df = pd.merge(ns_df, fc_df, on=self.taxon_column)\n\n # Calculate FL = (Ns * 100) / FC\n merged_df[\"FL\"] = (merged_df[\"Ns\"] * 100) / merged_df[\"FC\"]\n\n # Exclude rows with FL of 0\n merged_df = merged_df[merged_df[\"FL\"] != 0]\n\n return merged_df[[self.taxon_column, self.use_column, \"FL\"]]\n\n def save_data(self, filename=\"fidelity_level_FL.csv\"):\n \"\"\"\n Saves the calculated FL data to a CSV file.\n\n Args:\n filename (str, optional): Name of the CSV file to save. Defaults to \"fidelity_level_FL.csv\".\n \"\"\"\n\n fl_df = self.calculate()\n fl_df.to_csv(filename, index=False)\n print(f\"Saved to {filename}\")\n\n def plot_heatmap(self,\n filename=\"FL.png\",\n cmap=\"coolwarm\",\n show_colorbar=True,\n colorbar_shrink=0.50,\n plot_width=10,\n plot_height=8,\n dpi=300,\n fillna_zero=True):\n \"\"\"\n Creates a heatmap of FL values for each species-use combination,\n with customizable features for plot appearance and layout.\n \"\"\"\n\n data = self.calculate()\n heatmap_plot = HeatmapPlot(\n data=data,\n title=\"Fidelity Level (FL)\",\n value_column=\"FL\",\n row_column=self.taxon_column,\n column_column=self.use_column,\n cmap=cmap,\n show_colorbar=show_colorbar,\n colorbar_shrink=colorbar_shrink,\n plot_width=plot_width,\n plot_height=plot_height,\n dpi=dpi,\n fillna_zero=fillna_zero,\n )\n heatmap_plot.save_plot(filename, dpi=dpi)\n return heatmap_plot.plot()"
},
{
"identifier": "FIC",
"path": "pyethnobiology/indices.py",
"snippet": "class FIC:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs.\n taxon_column (str, optional): Name of the column containing species names.\n use_column (str, optional): Name of the column containing plant uses.\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Informant Consensus Factor (FIC)\"\n\n def calculate(self):\n \"\"\"\n Calculates the FIC for each ailment category.\n\n Returns:\n pd.DataFrame: DataFrame containing ailment category and FIC columns.\n \"\"\"\n\n unique_ailment_categories = self.data[self.use_column].unique()\n fic_values = []\n\n for ailment_category in unique_ailment_categories:\n specific_data = self.data[self.data[self.use_column] == ailment_category]\n\n # Calculate Nur (number of use reports)\n nur = specific_data.shape[0]\n\n # Calculate Nt (number of taxa used)\n nt = specific_data[self.taxon_column].nunique()\n\n # Calculate FIC value\n if nur > nt:\n fic = (nur - nt) / (nur - 1)\n else:\n fic = 0 # Set FIC to 0 if Nur <= Nt\n\n fic_values.append({self.use_column: ailment_category, \"FIC\": fic})\n\n fic_df = pd.DataFrame(fic_values)\n fic_df = fic_df.sort_values(by=\"FIC\", ascending=False).reset_index(drop=True)\n return fic_df\n\n def save_data(self):\n FIC_df = self.calculate()\n FIC_df.to_csv(\"informant_consensus_factor_FIC.csv\", index=False)\n print(\"Saved to informant_consensus_factor_FIC.csv\")\n\n def plot_radial(self, filename=\"FIC.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"FIC\", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column)\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "Jaccard",
"path": "pyethnobiology/stats.py",
"snippet": "class Jaccard:\n def __init__(self, data: pd.DataFrame):\n self.data = data\n\n def convert_data(self, literature_column: str, taxon_column: str, use_column: str) -> pd.DataFrame:\n \"\"\"Converts data to a specified format, handling varying ailment names and extracting literature references.\n\n Args:\n data (pd.DataFrame): The input DataFrame containing the data to be converted.\n literature_column (str): The name of the column containing literature references.\n taxon_column (str): The name of the column containing taxon names.\n use_column (str): The name of the column containing ailment names.\n\n Returns:\n pd.DataFrame: The converted DataFrame with the following columns:\n - \"study\": Study identifier (either \"My Study\" or literature references)\n - taxon_column (str): Taxon name\n - Ailment columns (one for each unique ailment): 0 or 1 indicating presence/absence\n \"\"\"\n\n # Ensure literature column is string type\n if not pd.api.types.is_string_dtype(self.data[literature_column]):\n self.data[literature_column] = self.data[literature_column].astype(str)\n\n # Create an empty DataFrame with the desired columns\n converted_data = pd.DataFrame(columns=[\"study\", taxon_column])\n unique_ailments = set(self.data[use_column])\n for ailment in unique_ailments:\n converted_data[ailment] = 0 # Add columns for all unique ailments\n\n # Iterate through each row efficiently using itertuples\n for row in self.data.itertuples():\n taxon = getattr(row, taxon_column)\n use = getattr(row, use_column)\n\n # Extract literature references (handling potential errors)\n try:\n literature_references = getattr(row, literature_column).split(\";\")\n except (AttributeError, ValueError):\n literature_references = []\n\n # Create rows for \"My Study\" and literature references\n rows_to_add = [\n {\"study\": \"My Study\", taxon_column: taxon, use: 1} # Row for \"My Study\"\n ]\n rows_to_add.extend(\n {\n \"study\": ref,\n taxon_column: taxon,\n use: 1, # Set the relevant ailment column to 1\n }\n for ref in literature_references\n )\n\n # Concatenate new rows efficiently using list comprehension\n converted_data = pd.concat(\n [\n converted_data,\n pd.DataFrame(rows_to_add), # Create a DataFrame from the list of rows\n ],\n ignore_index=True,\n )\n\n # Fill missing values with 0 and group data\n converted_data = converted_data.fillna(0).groupby([\"study\", taxon_column]).sum().clip(upper=1)\n\n return converted_data\n\n def fill_missing_taxa_dynamic(self) -> pd.DataFrame:\n\n \"\"\"Fills missing taxa in a DataFrame with appropriate ailment values based on other studies.\n\n Args:\n data (pd.DataFrame): The input DataFrame containing the data to be processed.\n\n Returns:\n pd.DataFrame: The DataFrame with missing taxa filled in.\n \"\"\"\n\n study_data = {}\n ailment_names = list(self.data.columns[:-2]) # Get ailment names from DataFrame columns\n\n for index, row in self.data.iterrows():\n study_name = row['study']\n taxon = row['taxon']\n ailments = row[:-2].tolist() # Extract ailments as a list\n\n if study_name not in study_data:\n study_data[study_name] = {}\n\n study_data[study_name][taxon] = ailments\n\n for study in study_data:\n taxa_in_my_study = study_data[\"My Study\"].keys()\n for taxon in taxa_in_my_study:\n if taxon not in study_data[study]:\n study_data[study][taxon] = [0] * len(ailment_names)\n\n # Create a list to hold the transformed data\n transformed_data = []\n for study, study_values in study_data.items():\n for taxon, ailments in study_values.items():\n row_data = [study, taxon] + ailments\n transformed_data.append(row_data)\n\n # Create a DataFrame from the transformed data\n columns = ['study', 'taxon'] + ailment_names\n result_df = pd.DataFrame(transformed_data, columns=columns)\n\n return result_df\n\n def calculate_jaccard_similarity(self, study_column: str, taxon_column: str, ailment_columns: list[str],\n my_study: str) -> dict[tuple[str, str], float]:\n\n \"\"\"Calculates pairwise Jaccard similarity between 'My Study' and other studies based on ailments.\n\n Args:\n data (pd.DataFrame): The input DataFrame containing the dataset.\n study_column (str): Column name for the study identifier.\n taxon_column (str): Column name for the taxon information.\n ailment_columns (List[str]): List of ailment column names.\n my_study (str): Identifier for 'My Study'.\n\n Returns:\n Dict[Tuple[str, str], float]: Dictionary containing Jaccard similarities between 'My Study' and other studies.\n \"\"\"\n # Get unique studies\n studies = self.data[study_column].unique()\n\n # Create a dictionary to store Jaccard similarity between 'My Study' and other studies\n jaccard_similarities = []\n\n # Calculate Jaccard similarity for 'My Study' against other studies\n for other_study in studies:\n if other_study != my_study:\n subset1 = self.data[self.data[study_column] == my_study][ailment_columns]\n subset2 = self.data[self.data[study_column] == other_study][ailment_columns]\n\n # Flatten ailment columns for Jaccard similarity calculation\n subset1_flattened = subset1.values.flatten()\n subset2_flattened = subset2.values.flatten()\n\n # Calculate Jaccard similarity for ailment columns using sklearn's jaccard_score\n jaccard_sim = jaccard_score(subset1_flattened, subset2_flattened)\n\n jaccard_similarities.append({\"study\": other_study, \"similarity\": jaccard_sim})\n\n jaccard_similarities = sorted(jaccard_similarities, key=lambda x: x['similarity'], reverse=True)\n\n return pd.DataFrame(jaccard_similarities)\n\n def run_analysis(self, literature_column: str, taxon_column: str, use_column: str, my_study: str = \"My Study\"):\n self.data = self.data.dropna(subset=[literature_column])\n self.data = self.convert_data(literature_column, taxon_column, use_column)\n self.data['study'] = self.data.index.get_level_values(\"study\")\n self.data['taxon'] = self.data.index.get_level_values(taxon_column)\n self.data = self.fill_missing_taxa_dynamic()\n ailment_columns = self.data.columns[2:]\n return self.calculate_jaccard_similarity(study_column=\"study\", taxon_column=\"taxon\",\n ailment_columns=ailment_columns, my_study=my_study)"
},
{
"identifier": "ChordPlot",
"path": "pyethnobiology/visualization.py",
"snippet": "class ChordPlot:\n\n def __init__(\n\n self,\n data: pd.DataFrame,\n by: str = \"taxon\",\n informant_column: str = \"informant\",\n taxon_column: str = \"taxon\",\n use_column: str = \"ailments_treated\",\n colors: str = None,\n min_info_count: int = None,\n get_first: int = None\n ):\n\n \"\"\"\n Initialize a ChordPlot object for visualizing relationships between data elements.\n\n Args:\n data (pd.DataFrame): The data frame containing relevant information.\n by (str, optional): The column to group data by, defaults to \"informant\".\n informant_column (str, optional): The column name for informant data, defaults to \"informant\".\n taxon_column (str, optional): The column name for taxon data, defaults to \"taxon\".\n use_column (str, optional): The column name for additional data associated with each pair, defaults to \"ailments_treated\".\n colors (list, optional): A list of colors for the links in the plot.\n min_info_count (int, optional): The minimum information count to include in the plot.\n get_first (int, optional): The number of top entries to show in the plot.\n\n Returns:\n A ChordPlot object.\n\n \"\"\"\n\n self.data = data\n self.colors = colors\n self.by = by\n self.min_info_count = min_info_count\n self.get_first = get_first\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n\n def plot(self):\n\n \"\"\"\n Generate and display a circular chord plot using the prepared data.\n\n Returns:\n A Circos object containing the plot figure.\n\n Raises:\n Exception: If any error occurs during plot generation.\n \"\"\"\n\n # Prepare data for visualization\n matrix, order = self._prepare_data()\n\n # Create the Circos plot\n circos = self._create_plot(matrix, order)\n\n return circos.plotfig()\n\n def save_plot(self, filename: str, dpi: int = 300):\n\n \"\"\"\n Generate and save a circular chord plot using the prepared data.\n\n Args:\n filename (str): The name of the file to save the plot to.\n dpi (int, optional): The resolution of the plot, defaults to 300.\n\n Raises:\n Exception: If any error occurs during plot generation.\n \"\"\"\n\n # Prepare data for visualization\n matrix, order = self._prepare_data()\n\n # Create the Circos plot\n circos = self._create_plot(matrix, order)\n\n # Save the plot to a file\n circos.savefig(filename, dpi=dpi)\n\n\n def _prepare_data(self) -> pd.DataFrame:\n\n \"\"\"\n Prepare the data for generating the ChordPlot by counting occurrences and creating a matrix.\n\n Returns:\n A tuple containing:\n - matrix (pd.DataFrame): A data frame with informant counts for each pair.\n - order (list): A list of labels for the circular plot.\n \"\"\"\n\n if self.by == \"informant\":\n taxon_column = self.informant_column\n ailments_treated_column = self.use_column\n else:\n taxon_column = self.taxon_column\n ailments_treated_column = self.use_column\n\n informant_counts = (\n self.data.groupby([taxon_column, ailments_treated_column])\n .size()\n .reset_index(name=\"informant_count\")\n .sort_values(by=\"informant_count\", ascending=False)\n ) # Remove slicing for now\n\n # Apply filtering based on user preference\n if self.get_first is not None:\n informant_counts = informant_counts.head(self.get_first) # Limit by number of species\n elif self.min_info_count is not None:\n informant_counts = informant_counts[\n informant_counts[\"informant_count\"] >= self.min_info_count] # Limit by minimum count\n\n informant_counts = informant_counts.reset_index(drop=True)\n\n matrix_data = [[row[taxon_column], row[ailments_treated_column], row[\"informant_count\"]] for idx, row in\n informant_counts.iterrows()]\n matrix = Matrix.parse_fromto_table(pd.DataFrame(matrix_data))\n order = list(set(informant_counts[taxon_column].to_list())) + list(\n set(informant_counts[ailments_treated_column].to_list()))\n return matrix, order\n\n def _create_plot(self, matrix: pd.DataFrame, order: list) -> Circos:\n\n \"\"\"\n Create the Circos plot using the prepared data and configuration.\n\n Args:\n matrix (pd.DataFrame): The data frame with informant counts for each pair.\n order (list): The list of labels for the circular plot.\n\n Returns:\n A Circos object containing the plot figure.\n \"\"\"\n\n circos = Circos.initialize_from_matrix(\n matrix=matrix,\n space=3,\n r_lim=(97, 100),\n cmap=self.colors if self.colors else \"tab10\",\n label_kws=dict(size=9, orientation=\"vertical\"),\n link_kws=dict(ec=\"black\", lw=0.1),\n order=order,\n )\n return circos"
}
] | import pandas as pd
import rdata
from .indices import UR, CI, FC, NU, RFC, RI, UV, CV, FL, FIC
from .stats import Jaccard
from .visualization import ChordPlot | 9,529 |
class pyethnobiology:
"""
Encapsulates ethnobotanical data and analysis.
"""
def __init__(
self,
data: pd.DataFrame,
informant_column: str = "informant",
taxon_column: str = "taxon",
use_column: str = "ailments_treated",
literature_column: str = "literature",
convert_use_data: bool = False,
) -> None:
"""
Initializes the Ethnobotany class.
Args:
data: DataFrame containing ethnobotanical information.
informant_column: Name of the column containing informant IDs.
taxon_column: Name of the column containing species names.
use_column: Name of the column containing plant uses.
convert_use_data: Whether to convert use data format (optional).
"""
self.data = self.load_data(data, informant_column, taxon_column, use_column, convert_use_data)
self.informant_column = informant_column
self.taxon_column = taxon_column
self.use_column = use_column
self.literature_column = literature_column
def CI(self):
CI_class = CI(self.data, self.informant_column, self.taxon_column, self.use_column)
return CI_class
def FC(self):
FC_class = FC(self.data, self.informant_column, self.taxon_column, self.use_column)
return FC_class
|
class pyethnobiology:
"""
Encapsulates ethnobotanical data and analysis.
"""
def __init__(
self,
data: pd.DataFrame,
informant_column: str = "informant",
taxon_column: str = "taxon",
use_column: str = "ailments_treated",
literature_column: str = "literature",
convert_use_data: bool = False,
) -> None:
"""
Initializes the Ethnobotany class.
Args:
data: DataFrame containing ethnobotanical information.
informant_column: Name of the column containing informant IDs.
taxon_column: Name of the column containing species names.
use_column: Name of the column containing plant uses.
convert_use_data: Whether to convert use data format (optional).
"""
self.data = self.load_data(data, informant_column, taxon_column, use_column, convert_use_data)
self.informant_column = informant_column
self.taxon_column = taxon_column
self.use_column = use_column
self.literature_column = literature_column
def CI(self):
CI_class = CI(self.data, self.informant_column, self.taxon_column, self.use_column)
return CI_class
def FC(self):
FC_class = FC(self.data, self.informant_column, self.taxon_column, self.use_column)
return FC_class
| def NU(self): | 3 | 2023-12-25 01:06:51+00:00 | 12k |
JiePKU/MoLE | train_db.py | [
{
"identifier": "ConfigSanitizer",
"path": "library/config_util.py",
"snippet": "class ConfigSanitizer:\n # @curry\n @staticmethod\n def __validate_and_convert_twodim(klass, value: Sequence) -> Tuple:\n Schema(ExactSequence([klass, klass]))(value)\n return tuple(value)\n\n # @curry\n @staticmethod\n def __validate_and_convert_scalar_or_twodim(klass, value: Union[float, Sequence]) -> Tuple:\n Schema(Any(klass, ExactSequence([klass, klass])))(value)\n try:\n Schema(klass)(value)\n return (value, value)\n except:\n return ConfigSanitizer.__validate_and_convert_twodim(klass, value)\n\n # subset schema\n SUBSET_ASCENDABLE_SCHEMA = {\n \"color_aug\": bool,\n \"face_crop_aug_range\": functools.partial(__validate_and_convert_twodim.__func__, float),\n \"flip_aug\": bool,\n \"num_repeats\": int,\n \"random_crop\": bool,\n \"shuffle_caption\": bool,\n \"keep_tokens\": int,\n \"token_warmup_min\": int,\n \"token_warmup_step\": Any(float,int),\n }\n # DO means DropOut\n DO_SUBSET_ASCENDABLE_SCHEMA = {\n \"caption_dropout_every_n_epochs\": int,\n \"caption_dropout_rate\": Any(float, int),\n \"caption_tag_dropout_rate\": Any(float, int),\n }\n # DB means DreamBooth\n DB_SUBSET_ASCENDABLE_SCHEMA = {\n \"caption_extension\": str,\n \"class_tokens\": str,\n }\n DB_SUBSET_DISTINCT_SCHEMA = {\n Required(\"image_dir\"): str,\n \"is_reg\": bool,\n }\n # FT means FineTuning\n FT_SUBSET_DISTINCT_SCHEMA = {\n Required(\"metadata_file\"): str,\n \"image_dir\": str,\n }\n\n # datasets schema\n DATASET_ASCENDABLE_SCHEMA = {\n \"batch_size\": int,\n \"bucket_no_upscale\": bool,\n \"bucket_reso_steps\": int,\n \"enable_bucket\": bool,\n \"max_bucket_reso\": int,\n \"min_bucket_reso\": int,\n \"resolution\": functools.partial(__validate_and_convert_scalar_or_twodim.__func__, int),\n }\n\n # options handled by argparse but not handled by user config\n ARGPARSE_SPECIFIC_SCHEMA = {\n \"debug_dataset\": bool,\n \"max_token_length\": Any(None, int),\n \"prior_loss_weight\": Any(float, int),\n }\n # for handling default None value of argparse\n ARGPARSE_NULLABLE_OPTNAMES = [\n \"face_crop_aug_range\",\n \"resolution\",\n ]\n # prepare map because option name may differ among argparse and user config\n ARGPARSE_OPTNAME_TO_CONFIG_OPTNAME = {\n \"train_batch_size\": \"batch_size\",\n \"dataset_repeats\": \"num_repeats\",\n }\n\n def __init__(self, support_dreambooth: bool, support_finetuning: bool, support_dropout: bool) -> None:\n assert support_dreambooth or support_finetuning, \"Neither DreamBooth mode nor fine tuning mode specified. Please specify one mode or more. / DreamBooth モードか fine tuning モードのどちらも指定されていません。1つ以上指定してください。\"\n\n self.db_subset_schema = self.__merge_dict(\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.DB_SUBSET_DISTINCT_SCHEMA,\n self.DB_SUBSET_ASCENDABLE_SCHEMA,\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n )\n\n self.ft_subset_schema = self.__merge_dict(\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.FT_SUBSET_DISTINCT_SCHEMA,\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n )\n\n self.db_dataset_schema = self.__merge_dict(\n self.DATASET_ASCENDABLE_SCHEMA,\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.DB_SUBSET_ASCENDABLE_SCHEMA,\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n {\"subsets\": [self.db_subset_schema]},\n )\n\n self.ft_dataset_schema = self.__merge_dict(\n self.DATASET_ASCENDABLE_SCHEMA,\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n {\"subsets\": [self.ft_subset_schema]},\n )\n\n if support_dreambooth and support_finetuning:\n def validate_flex_dataset(dataset_config: dict):\n subsets_config = dataset_config.get(\"subsets\", [])\n\n # check dataset meets FT style\n # NOTE: all FT subsets should have \"metadata_file\"\n if all([\"metadata_file\" in subset for subset in subsets_config]):\n return Schema(self.ft_dataset_schema)(dataset_config)\n # check dataset meets DB style\n # NOTE: all DB subsets should have no \"metadata_file\"\n elif all([\"metadata_file\" not in subset for subset in subsets_config]):\n return Schema(self.db_dataset_schema)(dataset_config)\n else:\n raise voluptuous.Invalid(\"DreamBooth subset and fine tuning subset cannot be mixed in the same dataset. Please split them into separate datasets. / DreamBoothのサブセットとfine tuninのサブセットを同一のデータセットに混在させることはできません。別々のデータセットに分割してください。\")\n\n self.dataset_schema = validate_flex_dataset\n elif support_dreambooth:\n self.dataset_schema = self.db_dataset_schema\n else:\n self.dataset_schema = self.ft_dataset_schema\n\n self.general_schema = self.__merge_dict(\n self.DATASET_ASCENDABLE_SCHEMA,\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.DB_SUBSET_ASCENDABLE_SCHEMA if support_dreambooth else {},\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n )\n\n self.user_config_validator = Schema({\n \"general\": self.general_schema,\n \"datasets\": [self.dataset_schema],\n })\n\n self.argparse_schema = self.__merge_dict(\n self.general_schema,\n self.ARGPARSE_SPECIFIC_SCHEMA,\n {optname: Any(None, self.general_schema[optname]) for optname in self.ARGPARSE_NULLABLE_OPTNAMES},\n {a_name: self.general_schema[c_name] for a_name, c_name in self.ARGPARSE_OPTNAME_TO_CONFIG_OPTNAME.items()},\n )\n\n self.argparse_config_validator = Schema(Object(self.argparse_schema), extra=voluptuous.ALLOW_EXTRA)\n\n def sanitize_user_config(self, user_config: dict) -> dict:\n try:\n return self.user_config_validator(user_config)\n except MultipleInvalid:\n # TODO: エラー発生時のメッセージをわかりやすくする\n print(\"Invalid user config / ユーザ設定の形式が正しくないようです\")\n raise\n\n # NOTE: In nature, argument parser result is not needed to be sanitize\n # However this will help us to detect program bug\n def sanitize_argparse_namespace(self, argparse_namespace: argparse.Namespace) -> argparse.Namespace:\n try:\n return self.argparse_config_validator(argparse_namespace)\n except MultipleInvalid:\n # XXX: this should be a bug\n print(\"Invalid cmdline parsed arguments. This should be a bug. / コマンドラインのパース結果が正しくないようです。プログラムのバグの可能性が高いです。\")\n raise\n\n # NOTE: value would be overwritten by latter dict if there is already the same key\n @staticmethod\n def __merge_dict(*dict_list: dict) -> dict:\n merged = {}\n for schema in dict_list:\n # merged |= schema\n for k, v in schema.items():\n merged[k] = v\n return merged"
},
{
"identifier": "BlueprintGenerator",
"path": "library/config_util.py",
"snippet": "class BlueprintGenerator:\n BLUEPRINT_PARAM_NAME_TO_CONFIG_OPTNAME = {\n }\n\n def __init__(self, sanitizer: ConfigSanitizer):\n self.sanitizer = sanitizer\n\n # runtime_params is for parameters which is only configurable on runtime, such as tokenizer\n def generate(self, user_config: dict, argparse_namespace: argparse.Namespace, **runtime_params) -> Blueprint:\n sanitized_user_config = self.sanitizer.sanitize_user_config(user_config)\n sanitized_argparse_namespace = self.sanitizer.sanitize_argparse_namespace(argparse_namespace)\n\n # convert argparse namespace to dict like config\n # NOTE: it is ok to have extra entries in dict\n optname_map = self.sanitizer.ARGPARSE_OPTNAME_TO_CONFIG_OPTNAME\n argparse_config = {optname_map.get(optname, optname): value for optname, value in vars(sanitized_argparse_namespace).items()}\n\n general_config = sanitized_user_config.get(\"general\", {})\n\n dataset_blueprints = []\n for dataset_config in sanitized_user_config.get(\"datasets\", []):\n # NOTE: if subsets have no \"metadata_file\", these are DreamBooth datasets/subsets\n subsets = dataset_config.get(\"subsets\", [])\n is_dreambooth = all([\"metadata_file\" not in subset for subset in subsets])\n if is_dreambooth:\n subset_params_klass = DreamBoothSubsetParams\n dataset_params_klass = DreamBoothDatasetParams\n else:\n subset_params_klass = FineTuningSubsetParams\n dataset_params_klass = FineTuningDatasetParams\n\n subset_blueprints = []\n for subset_config in subsets:\n params = self.generate_params_by_fallbacks(subset_params_klass,\n [subset_config, dataset_config, general_config, argparse_config, runtime_params])\n subset_blueprints.append(SubsetBlueprint(params))\n\n params = self.generate_params_by_fallbacks(dataset_params_klass,\n [dataset_config, general_config, argparse_config, runtime_params])\n dataset_blueprints.append(DatasetBlueprint(is_dreambooth, params, subset_blueprints))\n\n dataset_group_blueprint = DatasetGroupBlueprint(dataset_blueprints)\n\n return Blueprint(dataset_group_blueprint)\n\n @staticmethod\n def generate_params_by_fallbacks(param_klass, fallbacks: Sequence[dict]):\n name_map = BlueprintGenerator.BLUEPRINT_PARAM_NAME_TO_CONFIG_OPTNAME\n search_value = BlueprintGenerator.search_value\n default_params = asdict(param_klass())\n param_names = default_params.keys()\n\n params = {name: search_value(name_map.get(name, name), fallbacks, default_params.get(name)) for name in param_names}\n\n return param_klass(**params)\n\n @staticmethod\n def search_value(key: str, fallbacks: Sequence[dict], default_value = None):\n for cand in fallbacks:\n value = cand.get(key)\n if value is not None:\n return value\n\n return default_value"
},
{
"identifier": "apply_snr_weight",
"path": "library/custom_train_functions.py",
"snippet": "def apply_snr_weight(loss, timesteps, noise_scheduler, gamma):\r\n snr = torch.stack([noise_scheduler.all_snr[t] for t in timesteps])\r\n gamma_over_snr = torch.div(torch.ones_like(snr) * gamma, snr)\r\n snr_weight = torch.minimum(gamma_over_snr, torch.ones_like(gamma_over_snr)).float() # from paper\r\n loss = loss * snr_weight\r\n return loss\r"
},
{
"identifier": "get_weighted_text_embeddings",
"path": "library/custom_train_functions.py",
"snippet": "def get_weighted_text_embeddings(\r\n tokenizer,\r\n text_encoder,\r\n prompt: Union[str, List[str]],\r\n device,\r\n max_embeddings_multiples: Optional[int] = 3,\r\n no_boseos_middle: Optional[bool] = False,\r\n clip_skip=None,\r\n):\r\n r\"\"\"\r\n Prompts can be assigned with local weights using brackets. For example,\r\n prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',\r\n and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.\r\n\r\n Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.\r\n\r\n Args:\r\n prompt (`str` or `List[str]`):\r\n The prompt or prompts to guide the image generation.\r\n max_embeddings_multiples (`int`, *optional*, defaults to `3`):\r\n The max multiple length of prompt embeddings compared to the max output length of text encoder.\r\n no_boseos_middle (`bool`, *optional*, defaults to `False`):\r\n If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and\r\n ending token in each of the chunk in the middle.\r\n skip_parsing (`bool`, *optional*, defaults to `False`):\r\n Skip the parsing of brackets.\r\n skip_weighting (`bool`, *optional*, defaults to `False`):\r\n Skip the weighting. When the parsing is skipped, it is forced True.\r\n \"\"\"\r\n max_length = (tokenizer.model_max_length - 2) * max_embeddings_multiples + 2\r\n if isinstance(prompt, str):\r\n prompt = [prompt]\r\n\r\n prompt_tokens, prompt_weights = get_prompts_with_weights(tokenizer, prompt, max_length - 2)\r\n\r\n # round up the longest length of tokens to a multiple of (model_max_length - 2)\r\n max_length = max([len(token) for token in prompt_tokens])\r\n\r\n max_embeddings_multiples = min(\r\n max_embeddings_multiples,\r\n (max_length - 1) // (tokenizer.model_max_length - 2) + 1,\r\n )\r\n max_embeddings_multiples = max(1, max_embeddings_multiples)\r\n max_length = (tokenizer.model_max_length - 2) * max_embeddings_multiples + 2\r\n\r\n # pad the length of tokens and weights\r\n bos = tokenizer.bos_token_id\r\n eos = tokenizer.eos_token_id\r\n pad = tokenizer.pad_token_id\r\n prompt_tokens, prompt_weights = pad_tokens_and_weights(\r\n prompt_tokens,\r\n prompt_weights,\r\n max_length,\r\n bos,\r\n eos,\r\n no_boseos_middle=no_boseos_middle,\r\n chunk_length=tokenizer.model_max_length,\r\n )\r\n prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=device)\r\n\r\n # get the embeddings\r\n text_embeddings = get_unweighted_text_embeddings(\r\n tokenizer,\r\n text_encoder,\r\n prompt_tokens,\r\n tokenizer.model_max_length,\r\n clip_skip,\r\n eos,\r\n pad,\r\n no_boseos_middle=no_boseos_middle,\r\n )\r\n prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=device)\r\n\r\n # assign weights to the prompts and normalize in the sense of mean\r\n previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)\r\n text_embeddings = text_embeddings * prompt_weights.unsqueeze(-1)\r\n current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)\r\n text_embeddings = text_embeddings * (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)\r\n\r\n return text_embeddings\r"
},
{
"identifier": "prepare_scheduler_for_custom_training",
"path": "library/custom_train_functions.py",
"snippet": "def prepare_scheduler_for_custom_training(noise_scheduler, device):\r\n if hasattr(noise_scheduler, \"all_snr\"):\r\n return\r\n\r\n alphas_cumprod = noise_scheduler.alphas_cumprod\r\n sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod)\r\n sqrt_one_minus_alphas_cumprod = torch.sqrt(1.0 - alphas_cumprod)\r\n alpha = sqrt_alphas_cumprod\r\n sigma = sqrt_one_minus_alphas_cumprod\r\n all_snr = (alpha / sigma) ** 2\r\n\r\n noise_scheduler.all_snr = all_snr.to(device)\r"
},
{
"identifier": "pyramid_noise_like",
"path": "library/custom_train_functions.py",
"snippet": "def pyramid_noise_like(noise, device, iterations=6, discount=0.4):\r\n b, c, w, h = noise.shape # EDIT: w and h get over-written, rename for a different variant!\r\n u = torch.nn.Upsample(size=(w, h), mode=\"bilinear\").to(device)\r\n for i in range(iterations):\r\n r = random.random() * 2 + 2 # Rather than always going 2x,\r\n wn, hn = max(1, int(w / (r**i))), max(1, int(h / (r**i)))\r\n noise += u(torch.randn(b, c, wn, hn).to(device)) * discount**i\r\n if wn == 1 or hn == 1:\r\n break # Lowest resolution is 1x1\r\n return noise / noise.std() # Scaled back to roughly unit variance\r"
},
{
"identifier": "apply_noise_offset",
"path": "library/custom_train_functions.py",
"snippet": "def apply_noise_offset(latents, noise, noise_offset, adaptive_noise_scale):\r\n if noise_offset is None:\r\n return noise\r\n if adaptive_noise_scale is not None:\r\n # latent shape: (batch_size, channels, height, width)\r\n # abs mean value for each channel\r\n latent_mean = torch.abs(latents.mean(dim=(2, 3), keepdim=True))\r\n\r\n # multiply adaptive noise scale to the mean value and add it to the noise offset\r\n noise_offset = noise_offset + adaptive_noise_scale * latent_mean\r\n noise_offset = torch.clamp(noise_offset, 0.0, None) # in case of adaptive noise scale is negative\r\n\r\n noise = noise + noise_offset * torch.randn((latents.shape[0], latents.shape[1], 1, 1), device=latents.device)\r\n return noise\r"
},
{
"identifier": "scale_v_prediction_loss_like_noise_prediction",
"path": "library/custom_train_functions.py",
"snippet": "def scale_v_prediction_loss_like_noise_prediction(loss, timesteps, noise_scheduler):\r\n snr_t = torch.stack([noise_scheduler.all_snr[t] for t in timesteps]) # batch_size\r\n snr_t = torch.minimum(snr_t, torch.ones_like(snr_t) * 1000) # if timestep is 0, snr_t is inf, so limit it to 1000\r\n scale = snr_t / (snr_t + 1)\r\n\r\n loss = loss * scale\r\n return loss\r"
}
] | import gc
import time
import argparse
import itertools
import math
import os
import toml
import torch
import diffusers
import library.train_util as train_util
import library.config_util as config_util
import library.custom_train_functions as custom_train_functions
from multiprocessing import Value
from tqdm import tqdm
from accelerate.utils import set_seed
from diffusers import DDPMScheduler
from library.config_util import (
ConfigSanitizer,
BlueprintGenerator,
)
from library.custom_train_functions import (
apply_snr_weight,
get_weighted_text_embeddings,
prepare_scheduler_for_custom_training,
pyramid_noise_like,
apply_noise_offset,
scale_v_prediction_loss_like_noise_prediction,
) | 7,307 | train_dataloader = torch.utils.data.DataLoader(
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
# 学習ステップ数を計算する
if args.max_train_epochs is not None:
args.max_train_steps = args.max_train_epochs * math.ceil(
len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
)
print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
# データセット側にも学習ステップを送信
train_dataset_group.set_max_train_steps(args.max_train_steps)
if args.stop_text_encoder_training is None:
args.stop_text_encoder_training = args.max_train_steps + 1 # do not stop until end
# lr schedulerを用意する TODO gradient_accumulation_stepsの扱いが何かおかしいかもしれない。後で確認する
lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes)
# 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする
if args.full_fp16:
assert (
args.mixed_precision == "fp16"
), "full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。"
print("enable full fp16 training.")
unet.to(weight_dtype)
text_encoder.to(weight_dtype)
# acceleratorがなんかよろしくやってくれるらしい
if train_text_encoder:
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
)
else:
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)
# transform DDP after prepare
text_encoder, unet = train_util.transform_if_model_is_DDP(text_encoder, unet)
if not train_text_encoder:
text_encoder.to(accelerator.device, dtype=weight_dtype) # to avoid 'cpu' vs 'cuda' error
# 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする
if args.full_fp16:
train_util.patch_accelerator_for_fp16_training(accelerator)
# resumeする
train_util.resume_from_local_or_hf_if_specified(accelerator, args)
# epoch数を計算する
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0):
args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1
# 学習する
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
print("running training / 学習開始")
print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}")
print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}")
print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}")
print(f" num epochs / epoch数: {num_train_epochs}")
print(f" batch size per device / バッチサイズ: {args.train_batch_size}")
print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}")
print(f" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}")
print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}")
progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps")
global_step = 0
noise_scheduler = DDPMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False
)
prepare_scheduler_for_custom_training(noise_scheduler, accelerator.device)
if accelerator.is_main_process:
accelerator.init_trackers("dreambooth" if args.log_tracker_name is None else args.log_tracker_name)
loss_list = []
loss_total = 0.0
for epoch in range(num_train_epochs):
print(f"\nepoch {epoch+1}/{num_train_epochs}")
current_epoch.value = epoch + 1
# 指定したステップ数までText Encoderを学習する:epoch最初の状態
unet.train()
# train==True is required to enable gradient_checkpointing
if args.gradient_checkpointing or global_step < args.stop_text_encoder_training:
text_encoder.train()
for step, batch in enumerate(train_dataloader):
current_step.value = global_step
# 指定したステップ数でText Encoderの学習を止める
if global_step == args.stop_text_encoder_training:
print(f"stop text encoder training at step {global_step}")
if not args.gradient_checkpointing:
text_encoder.train(False)
text_encoder.requires_grad_(False)
with accelerator.accumulate(unet):
with torch.no_grad():
# latentに変換
if cache_latents:
latents = batch["latents"].to(accelerator.device)
else:
latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * 0.18215
b_size = latents.shape[0]
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents, device=latents.device)
if args.noise_offset:
noise = apply_noise_offset(latents, noise, args.noise_offset, args.adaptive_noise_scale)
elif args.multires_noise_iterations:
| # DreamBooth training
# XXX dropped option: fine_tune
# perlin_noise,
def train(args):
train_util.verify_training_args(args)
train_util.prepare_dataset_args(args, False)
cache_latents = args.cache_latents
if args.seed is not None:
set_seed(args.seed) # 乱数系列を初期化する
tokenizer = train_util.load_tokenizer(args)
# データセットを準備する
if args.dataset_class is None:
blueprint_generator = BlueprintGenerator(ConfigSanitizer(True, False, True))
if args.dataset_config is not None:
print(f"Load dataset config from {args.dataset_config}")
user_config = config_util.load_user_config(args.dataset_config)
ignored = ["train_data_dir", "reg_data_dir"]
if any(getattr(args, attr) is not None for attr in ignored):
print(
"ignore following options because config file is found: {0} / 設定ファイルが利用されるため以下のオプションは無視されます: {0}".format(
", ".join(ignored)
)
)
else:
user_config = {
"datasets": [
{"subsets": config_util.generate_dreambooth_subsets_config_by_subdirs(args.train_data_dir, args.reg_data_dir)}
]
}
blueprint = blueprint_generator.generate(user_config, args, tokenizer=tokenizer)
train_dataset_group = config_util.generate_dataset_group_by_blueprint(blueprint.dataset_group)
else:
train_dataset_group = train_util.load_arbitrary_dataset(args, tokenizer)
current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
if args.no_token_padding:
train_dataset_group.disable_token_padding()
if args.debug_dataset:
train_util.debug_dataset(train_dataset_group)
return
if cache_latents:
assert (
train_dataset_group.is_latent_cacheable()
), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
# acceleratorを準備する
print("prepare accelerator")
if args.gradient_accumulation_steps > 1:
print(
f"gradient_accumulation_steps is {args.gradient_accumulation_steps}. accelerate does not support gradient_accumulation_steps when training multiple models (U-Net and Text Encoder), so something might be wrong"
)
print(
f"gradient_accumulation_stepsが{args.gradient_accumulation_steps}に設定されています。accelerateは複数モデル(U-NetおよびText Encoder)の学習時にgradient_accumulation_stepsをサポートしていないため結果は未知数です"
)
accelerator, unwrap_model = train_util.prepare_accelerator(args)
# mixed precisionに対応した型を用意しておき適宜castする
weight_dtype, save_dtype = train_util.prepare_dtype(args)
# モデルを読み込む
text_encoder, vae, unet, load_stable_diffusion_format = train_util.load_target_model(args, weight_dtype, accelerator)
# verify load/save model formats
if load_stable_diffusion_format:
src_stable_diffusion_ckpt = args.pretrained_model_name_or_path
src_diffusers_model_path = None
else:
src_stable_diffusion_ckpt = None
src_diffusers_model_path = args.pretrained_model_name_or_path
if args.save_model_as is None:
save_stable_diffusion_format = load_stable_diffusion_format
use_safetensors = args.use_safetensors
else:
save_stable_diffusion_format = args.save_model_as.lower() == "ckpt" or args.save_model_as.lower() == "safetensors"
use_safetensors = args.use_safetensors or ("safetensors" in args.save_model_as.lower())
# モデルに xformers とか memory efficient attention を組み込む
train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers)
# 学習を準備する
if cache_latents:
vae.to(accelerator.device, dtype=weight_dtype)
vae.requires_grad_(False)
vae.eval()
with torch.no_grad():
train_dataset_group.cache_latents(vae, args.vae_batch_size, args.cache_latents_to_disk, accelerator.is_main_process)
vae.to("cpu")
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
accelerator.wait_for_everyone()
# 学習を準備する:モデルを適切な状態にする
train_text_encoder = args.stop_text_encoder_training is None or args.stop_text_encoder_training >= 0
unet.requires_grad_(True) # 念のため追加
text_encoder.requires_grad_(train_text_encoder)
if not train_text_encoder:
print("Text Encoder is not trained.")
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
text_encoder.gradient_checkpointing_enable()
if not cache_latents:
vae.requires_grad_(False)
vae.eval()
vae.to(accelerator.device, dtype=weight_dtype)
# 学習に必要なクラスを準備する
print("prepare optimizer, data loader etc.")
if train_text_encoder:
trainable_params = itertools.chain(unet.parameters(), text_encoder.parameters())
else:
trainable_params = unet.parameters()
_, _, optimizer = train_util.get_optimizer(args, trainable_params)
# dataloaderを準備する
# DataLoaderのプロセス数:0はメインプロセスになる
n_workers = min(args.max_data_loader_n_workers, os.cpu_count() - 1) # cpu_count-1 ただし最大で指定された数まで
train_dataloader = torch.utils.data.DataLoader(
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
# 学習ステップ数を計算する
if args.max_train_epochs is not None:
args.max_train_steps = args.max_train_epochs * math.ceil(
len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
)
print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
# データセット側にも学習ステップを送信
train_dataset_group.set_max_train_steps(args.max_train_steps)
if args.stop_text_encoder_training is None:
args.stop_text_encoder_training = args.max_train_steps + 1 # do not stop until end
# lr schedulerを用意する TODO gradient_accumulation_stepsの扱いが何かおかしいかもしれない。後で確認する
lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes)
# 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする
if args.full_fp16:
assert (
args.mixed_precision == "fp16"
), "full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。"
print("enable full fp16 training.")
unet.to(weight_dtype)
text_encoder.to(weight_dtype)
# acceleratorがなんかよろしくやってくれるらしい
if train_text_encoder:
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
)
else:
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)
# transform DDP after prepare
text_encoder, unet = train_util.transform_if_model_is_DDP(text_encoder, unet)
if not train_text_encoder:
text_encoder.to(accelerator.device, dtype=weight_dtype) # to avoid 'cpu' vs 'cuda' error
# 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする
if args.full_fp16:
train_util.patch_accelerator_for_fp16_training(accelerator)
# resumeする
train_util.resume_from_local_or_hf_if_specified(accelerator, args)
# epoch数を計算する
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0):
args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1
# 学習する
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
print("running training / 学習開始")
print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}")
print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}")
print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}")
print(f" num epochs / epoch数: {num_train_epochs}")
print(f" batch size per device / バッチサイズ: {args.train_batch_size}")
print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}")
print(f" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}")
print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}")
progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps")
global_step = 0
noise_scheduler = DDPMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False
)
prepare_scheduler_for_custom_training(noise_scheduler, accelerator.device)
if accelerator.is_main_process:
accelerator.init_trackers("dreambooth" if args.log_tracker_name is None else args.log_tracker_name)
loss_list = []
loss_total = 0.0
for epoch in range(num_train_epochs):
print(f"\nepoch {epoch+1}/{num_train_epochs}")
current_epoch.value = epoch + 1
# 指定したステップ数までText Encoderを学習する:epoch最初の状態
unet.train()
# train==True is required to enable gradient_checkpointing
if args.gradient_checkpointing or global_step < args.stop_text_encoder_training:
text_encoder.train()
for step, batch in enumerate(train_dataloader):
current_step.value = global_step
# 指定したステップ数でText Encoderの学習を止める
if global_step == args.stop_text_encoder_training:
print(f"stop text encoder training at step {global_step}")
if not args.gradient_checkpointing:
text_encoder.train(False)
text_encoder.requires_grad_(False)
with accelerator.accumulate(unet):
with torch.no_grad():
# latentに変換
if cache_latents:
latents = batch["latents"].to(accelerator.device)
else:
latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * 0.18215
b_size = latents.shape[0]
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents, device=latents.device)
if args.noise_offset:
noise = apply_noise_offset(latents, noise, args.noise_offset, args.adaptive_noise_scale)
elif args.multires_noise_iterations: | noise = pyramid_noise_like(noise, latents.device, args.multires_noise_iterations, args.multires_noise_discount) | 5 | 2023-12-30 07:46:35+00:00 | 12k |
Hatins/DEOE | modules/detection.py | [
{
"identifier": "ObjectLabels",
"path": "data/genx_utils/labels.py",
"snippet": "class ObjectLabels(ObjectLabelBase):\n def __init__(self,\n object_labels: th.Tensor,\n input_size_hw: Tuple[int, int]):\n super().__init__(object_labels=object_labels, input_size_hw=input_size_hw)\n\n def __len__(self) -> int:\n return self.object_labels.shape[0]\n\n def rotate_(self, angle_deg: float):\n if len(self) == 0:\n return\n # (x0,y0)---(x1,y0) p00---p10\n # | | | |\n # | | | |\n # (x0,y1)---(x1,y1) p01---p11\n p00 = th.stack((self.x, self.y), dim=1)\n p10 = th.stack((self.x + self.w, self.y), dim=1)\n p01 = th.stack((self.x, self.y + self.h), dim=1)\n p11 = th.stack((self.x + self.w, self.y + self.h), dim=1)\n # points: 4 x N x 2\n points = th.stack((p00, p10, p01, p11), dim=0)\n\n cx = self._input_size_hw[1] // 2\n cy = self._input_size_hw[0] // 2\n center = th.tensor([cx, cy], device=self.device)\n\n angle_rad = angle_deg / 180 * math.pi\n # counter-clockwise rotation\n rot_matrix = th.tensor([[math.cos(angle_rad), math.sin(angle_rad)],\n [-math.sin(angle_rad), math.cos(angle_rad)]], device=self.device)\n\n points = points - center\n points = th.einsum('ij,pnj->pni', rot_matrix, points)\n points = points + center\n\n height, width = self.input_size_hw\n x0 = th.clamp(th.min(points[..., 0], dim=0)[0], min=0, max=width - 1)\n y0 = th.clamp(th.min(points[..., 1], dim=0)[0], min=0, max=height - 1)\n x1 = th.clamp(th.max(points[..., 0], dim=0)[0], min=0, max=width - 1)\n y1 = th.clamp(th.max(points[..., 1], dim=0)[0], min=0, max=height - 1)\n\n self.x = x0\n self.y = y0\n self.w = x1 - x0\n self.h = y1 - y0\n\n self.remove_flat_labels_()\n\n assert th.all(self.x >= 0)\n assert th.all(self.y >= 0)\n assert th.all(self.x + self.w <= self.input_size_hw[1] - 1)\n assert th.all(self.y + self.h <= self.input_size_hw[0] - 1)\n\n def zoom_in_and_rescale_(self, zoom_coordinates_x0y0: Tuple[int, int], zoom_in_factor: float):\n \"\"\"\n 1) Computes a new smaller canvas size: original canvas scaled by a factor of 1/zoom_in_factor (downscaling)\n 2) Places the smaller canvas inside the original canvas at the top-left coordinates zoom_coordinates_x0y0\n 3) Extract the smaller canvas and rescale it back to the original resolution\n \"\"\"\n if len(self) == 0:\n return\n assert len(zoom_coordinates_x0y0) == 2\n assert zoom_in_factor >= 1\n if zoom_in_factor == 1:\n return\n z_x0, z_y0 = zoom_coordinates_x0y0\n h_orig, w_orig = self.input_size_hw\n assert 0 <= z_x0 <= w_orig - 1\n assert 0 <= z_y0 <= h_orig - 1\n zoom_window_h, zoom_window_w = tuple(x / zoom_in_factor for x in self.input_size_hw)\n z_x1 = min(z_x0 + zoom_window_w, w_orig - 1)\n assert z_x1 <= w_orig - 1, f'{z_x1=} is larger than {w_orig-1=}'\n z_y1 = min(z_y0 + zoom_window_h, h_orig - 1)\n assert z_y1 <= h_orig - 1, f'{z_y1=} is larger than {h_orig-1=}'\n\n x0 = th.clamp(self.x, min=z_x0, max=z_x1 - 1)\n y0 = th.clamp(self.y, min=z_y0, max=z_y1 - 1)\n\n x1 = th.clamp(self.x + self.w, min=z_x0, max=z_x1 - 1)\n y1 = th.clamp(self.y + self.h, min=z_y0, max=z_y1 - 1)\n\n self.x = x0 - z_x0\n self.y = y0 - z_y0\n self.w = x1 - x0\n self.h = y1 - y0\n self.input_size_hw = (zoom_window_h, zoom_window_w)\n\n self.remove_flat_labels_()\n\n self.scale_(scaling_multiplier=zoom_in_factor)\n\n def zoom_out_and_rescale_(self, zoom_coordinates_x0y0: Tuple[int, int], zoom_out_factor: float):\n \"\"\"\n 1) Scales the input by a factor of 1/zoom_out_factor (i.e. reduces the canvas size)\n 2) Places the downscaled canvas into the original canvas at the top-left coordinates zoom_coordinates_x0y0\n \"\"\"\n if len(self) == 0:\n return\n assert len(zoom_coordinates_x0y0) == 2\n assert zoom_out_factor >= 1\n if zoom_out_factor == 1:\n return\n\n h_orig, w_orig = self.input_size_hw\n self.scale_(scaling_multiplier=1 / zoom_out_factor)\n\n self.input_size_hw = (h_orig, w_orig)\n z_x0, z_y0 = zoom_coordinates_x0y0\n assert 0 <= z_x0 <= w_orig - 1\n assert 0 <= z_y0 <= h_orig - 1\n\n self.x = self.x + z_x0\n self.y = self.y + z_y0\n\n def scale_(self, scaling_multiplier: float):\n if len(self) == 0:\n return\n assert scaling_multiplier > 0\n if scaling_multiplier == 1:\n return\n img_ht, img_wd = self.input_size_hw\n new_img_ht = scaling_multiplier * img_ht\n new_img_wd = scaling_multiplier * img_wd\n self.input_size_hw = (new_img_ht, new_img_wd)\n x1 = th.clamp((self.x + self.w) * scaling_multiplier, max=new_img_wd - 1)\n y1 = th.clamp((self.y + self.h) * scaling_multiplier, max=new_img_ht - 1)\n self.x = self.x * scaling_multiplier\n self.y = self.y * scaling_multiplier\n\n self.w = x1 - self.x\n self.h = y1 - self.y\n\n self.remove_flat_labels_()\n\n def flip_lr_(self) -> None:\n if len(self) == 0:\n return\n self.x = self.input_size_hw[1] - 1 - self.x - self.w\n\n def get_labels_as_tensors(self, keep_classes: list = [], format_: str = 'yolox') -> th.Tensor:\n self._assert_not_numpy()\n if format_ == 'yolox':\n out = th.zeros((len(self), 6), dtype=th.float32, device=self.device)\n if len(self) == 0:\n return out\n out[:, 0] = self.class_id\n out[:, 1] = self.x + 0.5 * self.w\n out[:, 2] = self.y + 0.5 * self.h\n out[:, 3] = self.w\n out[:, 4] = self.h\n out[:, 5] = th.tensor([int(i) in keep_classes for i in self.class_id]) + 0\n\n return out\n else:\n raise NotImplementedError\n\n @staticmethod\n def get_labels_as_batched_tensor(obj_label_list: List[ObjectLabels], training_classes: list = [], format_: str = 'yolox') -> th.Tensor:\n num_object_frames = len(obj_label_list)\n assert num_object_frames > 0\n max_num_labels_per_object_frame = max([len(x) for x in obj_label_list])\n assert max_num_labels_per_object_frame > 0\n if format_ == 'yolox':\n tensor_labels = []\n for labels in obj_label_list:\n obj_labels_tensor = labels.get_labels_as_tensors(keep_classes = training_classes, format_=format_)\n num_to_pad = max_num_labels_per_object_frame - len(labels)\n padded_labels = pad(obj_labels_tensor, (0, 0, 0, num_to_pad), mode='constant', value=0)\n tensor_labels.append(padded_labels)\n tensor_labels = th.stack(tensors=tensor_labels, dim=0)\n return tensor_labels\n else:\n raise NotImplementedError\n \n @staticmethod\n def labels_mapping(device, labels_: th.Tensor):\n mask = th.where(labels_[:, :, -1] == 1, th.tensor(1, device = device), th.tensor(0, device = device))\n label = labels_ * mask.unsqueeze(-1)\n label[:, :, [0, -1]] = label[:, :, [-1, 0]]\n label = label[:, :, 0:-1]\n label[:, :, 0] = 0\n return label"
},
{
"identifier": "DataType",
"path": "data/utils/types.py",
"snippet": "class DataType(Enum):\nclass DatasetType(Enum):\nclass DatasetMode(Enum):\nclass DatasetSamplingMode(StrEnum):\nclass ObjDetOutput(Enum):\n EV_REPR = auto()\n FLOW = auto()\n IMAGE = auto()\n OBJLABELS = auto()\n OBJLABELS_SEQ = auto()\n IS_PADDED_MASK = auto()\n IS_FIRST_SAMPLE = auto()\n TOKEN_MASK = auto()\n GEN1 = auto()\n GEN4 = auto()\n TRAIN = auto()\n VALIDATION = auto()\n TESTING = auto()\n RANDOM = 'random'\n STREAM = 'stream'\n MIXED = 'mixed'\n LABELS_PROPH = auto()\n PRED_PROPH = auto()\n EV_REPR = auto()\n SKIP_VIZ = auto()"
},
{
"identifier": "postprocess",
"path": "models/detection/yolox/utils/boxes.py",
"snippet": "def postprocess(prediction, conf_thre=0.7, nms_thre=0.45, mode= 'train'):\n #modified: this place use the class_conf for mask, which is need to be fixed.\n box_corner = prediction.new(prediction.shape)\n box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2\n box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2\n box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2\n box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2\n prediction[:, :, :4] = box_corner[:, :, :4]\n\n output = [None for _ in range(len(prediction))]\n for i, image_pred in enumerate(prediction):\n\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Get score and class with highest confidence\n class_pred = torch.zeros(image_pred.shape[0],1).to(image_pred.device)\n if mode == 'val':\n conf_mask = keep_top_k_scores(image_pred, 1500)\n else:\n conf_mask = (image_pred[:, 4] >= conf_thre).squeeze()\n # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)\n detections = torch.cat((image_pred[:, :5], class_pred.float()), 1)\n detections = detections[conf_mask]\n if not detections.size(0):\n continue\n\n if mode == 'val':\n nms_out_index = nms_with_fixed_output(detections)\n else:\n nms_out_index = torchvision.ops.nms(\n detections[:, :4],\n detections[:, 4],\n nms_thre,\n )\n\n detections = detections[nms_out_index]\n if output[i] is None:\n output[i] = detections\n else:\n output[i] = torch.cat((output[i], detections))\n\n return output"
},
{
"identifier": "YoloXDetector",
"path": "models/detection/yolox_extension/models/detector.py",
"snippet": "class YoloXDetector(th.nn.Module):\n def __init__(self,\n model_cfg: DictConfig):\n super().__init__()\n backbone_cfg = model_cfg.backbone\n fpn_cfg = model_cfg.fpn\n head_cfg = model_cfg.head\n\n self.backbone = build_recurrent_backbone(backbone_cfg)\n\n in_channels = self.backbone.get_stage_dims(fpn_cfg.in_stages)\n self.fpn = build_yolox_fpn(fpn_cfg, in_channels=in_channels)\n\n strides = self.backbone.get_strides(fpn_cfg.in_stages)\n self.yolox_head = build_yolox_head(head_cfg, in_channels=in_channels, strides=strides)\n\n def forward_backbone(self,\n x: th.Tensor,\n previous_states: Optional[LstmStates] = None,\n token_mask: Optional[th.Tensor] = None) -> \\\n Tuple[BackboneFeatures, LstmStates]:\n with CudaTimer(device=x.device, timer_name=\"Backbone\"):\n backbone_features, states = self.backbone(x, previous_states, token_mask)\n return backbone_features, states\n\n def forward_detect(self,\n backbone_features: BackboneFeatures,\n targets: Optional[th.Tensor] = None,\n prev_reg: th.Tensor = None) -> \\\n Tuple[th.Tensor, Union[Dict[str, th.Tensor], None]]:\n device = next(iter(backbone_features.values())).device\n with CudaTimer(device=device, timer_name=\"FPN\"):\n fpn_features = self.fpn(backbone_features)\n if self.training:\n assert targets is not None\n with CudaTimer(device=device, timer_name=\"HEAD + Loss\"):\n outputs, losses = self.yolox_head(fpn_features, targets, prev_reg)\n return outputs, losses\n with CudaTimer(device=device, timer_name=\"HEAD\"):\n outputs, losses = self.yolox_head(fpn_features)\n assert losses is None\n return outputs, losses\n\n def forward(self,\n x: th.Tensor,\n previous_states: Optional[LstmStates] = None,\n retrieve_detections: bool = True,\n targets: Optional[th.Tensor] = None) -> \\\n Tuple[Union[th.Tensor, None], Union[Dict[str, th.Tensor], None], LstmStates]:\n backbone_features, states = self.forward_backbone(x, previous_states)\n outputs, losses = None, None\n if not retrieve_detections:\n assert targets is None\n return outputs, losses, states\n outputs, losses = self.forward_detect(backbone_features=backbone_features, targets=targets)\n return outputs, losses, states"
},
{
"identifier": "PropheseeEvaluator",
"path": "utils/evaluation/prophesee/evaluator.py",
"snippet": "class PropheseeEvaluator:\n LABELS = 'lables'\n PREDICTIONS = 'predictions'\n\n def __init__(self, dataset: str, downsample_by_2: bool):\n super().__init__()\n assert dataset in {'gen1', 'gen4'}\n self.dataset = dataset\n self.downsample_by_2 = downsample_by_2\n\n self._buffer = None\n self._buffer_empty = True\n self._reset_buffer()\n self.ignored = True\n\n def _reset_buffer(self):\n self._buffer_empty = True\n self._buffer = {\n self.LABELS: list(),\n self.PREDICTIONS: list(),\n }\n\n def set_ignored_to_False(self):\n self.ignored = False\n\n def _add_to_buffer(self, key: str, value: List[np.ndarray]):\n assert isinstance(value, list)\n for entry in value:\n assert isinstance(entry, np.ndarray)\n self._buffer_empty = False\n assert self._buffer is not None\n self._buffer[key].extend(value)\n\n def _get_from_buffer(self, key: str) -> List[np.ndarray]:\n assert not self._buffer_empty\n assert self._buffer is not None\n return self._buffer[key]\n\n def add_predictions(self, predictions: List[np.ndarray]):\n self._add_to_buffer(self.PREDICTIONS, predictions)\n\n def add_labels(self, labels: List[np.ndarray]):\n self._add_to_buffer(self.LABELS, labels)\n\n def reset_buffer(self) -> None:\n # E.g. call in on_validation_epoch_start\n self._reset_buffer()\n\n def has_data(self):\n return not self._buffer_empty\n\n def evaluate_buffer(self, img_height: int, img_width: int) -> Optional[Dict[str, Any]]:\n # e.g call in on_validation_epoch_end\n if self._buffer_empty:\n warn(\"Attempt to use prophesee evaluation buffer, but it is empty\", UserWarning, stacklevel=2)\n return\n\n labels = self._get_from_buffer(self.LABELS)\n predictions = self._get_from_buffer(self.PREDICTIONS)\n assert len(labels) == len(predictions)\n metrics = evaluate_list(result_boxes_list=predictions,\n gt_boxes_list=labels,\n height=img_height,\n width=img_width,\n apply_bbox_filters=True,\n downsampled_by_2=self.downsample_by_2,\n camera=self.dataset,\n ignored=self.ignored)\n return metrics"
},
{
"identifier": "to_prophesee",
"path": "utils/evaluation/prophesee/io/box_loading.py",
"snippet": "def to_prophesee(loaded_label_list: LOADED_LABELS, yolox_pred_list: YOLOX_PRED_PROCESSED, keep_classes: List = []) -> \\\n Tuple[List[np.ndarray], List[np.ndarray]]:\n \n assert len(loaded_label_list) == len(yolox_pred_list)\n loaded_label_list_proph = []\n yolox_pred_list_proph = []\n\n for loaded_labels, yolox_preds in zip(loaded_label_list, yolox_pred_list):\n # TODO: use loaded_label_to_prophesee func here\n time = None\n # --- LOADED LABELS ---\n\n loaded_labels.numpy_()\n loaded_label_proph = np.zeros((len(loaded_labels),), dtype=BBOX_DTYPE)\n for name in BBOX_DTYPE.names:\n if name == 'ignored_split':\n label = np.asarray(loaded_labels.get('class_id'), dtype=BBOX_DTYPE['class_id'])\n loaded_label_proph[name] = np.isin(label, np.array(keep_classes)).astype(dtype=BBOX_DTYPE[name])\n loaded_label_proph[name] = np.where(loaded_label_proph[name] == 0, 1, 0)\n continue\n # if name == 'class_Id':\n # loaded_label_proph[name] = np.zeros_like(np.asarray(loaded_labels.get(name), dtype=BBOX_DTYPE[name]))\n loaded_label_proph[name] = np.asarray(loaded_labels.get(name), dtype=BBOX_DTYPE[name])\n # if name =='class_id':\n # loaded_label_proph[name] = np.asarray(np.zeros_like(loaded_labels.get(name)), dtype=BBOX_DTYPE[name])\n if name == 't':\n time = np.unique(loaded_labels.get(name))\n assert time.size == 1\n time = time.item()\n\n #modified: we assign the class in keep_classes to 0\n # loaded_label_proph = np.array([(item[0], item[1], item[2], item[3], item[4], 0, item[6], item[7]) \n # for item in loaded_label_proph if int(item[5]) in keep_classes],dtype=BBOX_DTYPE)\n \n loaded_label_list_proph.append(loaded_label_proph)\n\n # --- YOLOX PREDICTIONS ---\n # Assumes batch of post-processed predictions from YoloX Head.\n # See postprocessing: https://github.com/Megvii-BaseDetection/YOLOX/blob/a5bb5ab12a61b8a25a5c3c11ae6f06397eb9b296/yolox/utils/boxes.py#L32\n # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)\n num_pred = 0 if yolox_preds is None else yolox_preds.shape[0]\n yolox_pred_proph = np.zeros((num_pred,), dtype=BBOX_DTYPE)\n if num_pred > 0:\n if isinstance(loaded_labels, np.ndarray):\n yolox_preds = yolox_preds\n else:\n yolox_preds = yolox_preds.detach().cpu().numpy()\n assert yolox_preds.shape == (num_pred, 6)\n yolox_pred_proph['t'] = np.ones((num_pred,), dtype=BBOX_DTYPE['t']) * time\n yolox_pred_proph['x'] = np.asarray(yolox_preds[:, 0], dtype=BBOX_DTYPE['x'])\n yolox_pred_proph['y'] = np.asarray(yolox_preds[:, 1], dtype=BBOX_DTYPE['y'])\n yolox_pred_proph['w'] = np.asarray(yolox_preds[:, 2] - yolox_preds[:, 0], dtype=BBOX_DTYPE['w'])\n yolox_pred_proph['h'] = np.asarray(yolox_preds[:, 3] - yolox_preds[:, 1], dtype=BBOX_DTYPE['h'])\n yolox_pred_proph['class_id'] = np.asarray(yolox_preds[:, 5], dtype=BBOX_DTYPE['class_id'])\n yolox_pred_proph['class_confidence'] = np.asarray(yolox_preds[:, 4], dtype=BBOX_DTYPE['class_confidence'])\n yolox_pred_list_proph.append(yolox_pred_proph)\n\n return loaded_label_list_proph, yolox_pred_list_proph"
},
{
"identifier": "InputPadderFromShape",
"path": "utils/padding.py",
"snippet": "class InputPadderFromShape:\n def __init__(self, desired_hw: Tuple[int, int], mode: str = 'constant', value: int = 0, type: str = 'corner'):\n \"\"\"\n :param desired_hw: Desired height and width\n :param mode: See torch.nn.functional.pad\n :param value: See torch.nn.functional.pad\n :param type: \"corner\": add zero to bottom and right\n \"\"\"\n assert isinstance(desired_hw, tuple)\n assert len(desired_hw) == 2\n assert desired_hw[0] % 4 == 0, 'Required for token mask padding'\n assert desired_hw[1] % 4 == 0, 'Required for token mask padding'\n assert type in {'corner'}\n\n self.desired_hw = desired_hw\n self.mode = mode\n self.value = value\n self.type = type\n self._pad_ev_repr = None\n self._pad_token_mask = None\n\n @staticmethod\n def _pad_tensor_impl(input_tensor: th.Tensor, desired_hw: Tuple[int, int], mode: str, value: Any) \\\n -> Tuple[th.Tensor, List[int]]:\n assert isinstance(input_tensor, th.Tensor)\n\n ht, wd = input_tensor.shape[-2:]\n ht_des, wd_des = desired_hw\n assert ht <= ht_des\n assert wd <= wd_des\n\n pad_left = 0\n pad_right = wd_des - wd\n pad_top = 0\n pad_bottom = ht_des - ht\n\n pad = [pad_left, pad_right, pad_top, pad_bottom]\n return F.pad(input_tensor, pad=pad, mode=mode, value=value if mode == 'constant' else None), pad\n\n def pad_tensor_ev_repr(self, ev_repr: th.Tensor) -> th.Tensor:\n padded_ev_repr, pad = self._pad_tensor_impl(input_tensor=ev_repr, desired_hw=self.desired_hw,\n mode=self.mode, value=self.value)\n if self._pad_ev_repr is None:\n self._pad_ev_repr = pad\n else:\n assert self._pad_ev_repr == pad\n return padded_ev_repr\n\n def pad_token_mask(self, token_mask: th.Tensor):\n assert isinstance(token_mask, th.Tensor)\n\n desired_hw = tuple(x // 4 for x in self.desired_hw)\n padded_token_mask, pad = self._pad_tensor_impl(input_tensor=token_mask, desired_hw=desired_hw,\n mode='constant', value=0)\n if self._pad_token_mask is None:\n self._pad_token_mask = pad\n else:\n assert self._pad_token_mask == pad\n return padded_token_mask"
},
{
"identifier": "BackboneFeatureSelector",
"path": "modules/utils/detection.py",
"snippet": "class Mode(Enum):\nclass BackboneFeatureSelector:\nclass EventReprSelector:\nclass REGStates:\nclass RNNStates:\n TRAIN = auto()\n VAL = auto()\n TEST = auto()\n def __init__(self):\n def reset(self):\n def add_backbone_features(self,\n backbone_features: BackboneFeatures,\n selected_indices: Optional[List[int]] = None) -> None:\n def get_batched_backbone_features(self) -> Optional[BackboneFeatures]:\n def __init__(self):\n def reset(self):\n def __len__(self):\n def add_event_representations(\n self, event_representations: th.Tensor, selected_indices: Optional[List[int]] = None) -> None:\n def get_event_representations_as_list(\n self, start_idx: int = 0, end_idx: Optional[int] = None) -> Optional[List[th.Tensor]]:\n def __init__(self):\n def _has_states(self):\n def recursive_detach(cls, inp: th.Tensor):\n def recursive_reset(cls,\n inp:th.Tensor,\n indices_or_bool_tensor: Optional[Union[List[int], torch.Tensor]] = None):\n def save_states_and_detach(self, worker_id: int, prev_reg: th.tensor) -> None:\n def get_states(self, worker_id: int):\n def reset(self, worker_id: int, indices_or_bool_tensor: Optional[Union[List[int], torch.Tensor]] = None):\n def __init__(self):\n def _has_states(self):\n def recursive_detach(cls, inp: Union[th.Tensor, List, Tuple, Dict]):\n def recursive_reset(cls,\n inp: Union[th.Tensor, List, Tuple, Dict],\n indices_or_bool_tensor: Optional[Union[List[int], torch.Tensor]] = None):\n def save_states_and_detach(self, worker_id: int, states: LstmStates) -> None:\n def get_states(self, worker_id: int) -> Optional[LstmStates]:\n def reset(self, worker_id: int, indices_or_bool_tensor: Optional[Union[List[int], torch.Tensor]] = None):\ndef mixed_collate_fn(x1: Union[th.Tensor, List[th.Tensor]], x2: Union[th.Tensor, List[th.Tensor]]):\ndef merge_mixed_batches(batch: Dict[str, Any]):"
}
] | from typing import Any, Optional, Tuple, Union, Dict
from warnings import warn
from omegaconf import DictConfig
from pytorch_lightning.utilities.types import STEP_OUTPUT
from data.genx_utils.labels import ObjectLabels
from data.utils.types import DataType, LstmStates, ObjDetOutput, DatasetSamplingMode
from models.detection.yolox.utils.boxes import postprocess
from models.detection.yolox_extension.models.detector import YoloXDetector
from utils.evaluation.prophesee.evaluator import PropheseeEvaluator
from utils.evaluation.prophesee.io.box_loading import to_prophesee
from utils.padding import InputPadderFromShape
from .utils.detection import BackboneFeatureSelector, EventReprSelector, RNNStates, REGStates, Mode, mode_2_string, \
merge_mixed_batches
import numpy as np
import pytorch_lightning as pl
import torch
import torch as th
import torch.distributed as dist
import os
import cv2
import ipdb | 9,370 | center = ((left + right) // 2, (top + bottom) // 2)
if class_id in unseen_classes:
color = (255, 165, 0)
cv2.putText(frame_copy, str(class_id), (center[0], bottom - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
else:
color = (0, 255, 0)
cv2.rectangle(frame_copy, (left, top), (right, bottom), color, 1)
stacked_image = cv2.hconcat([frame, frame_copy])
save_path = save_dir + '{}.png'.format(index)
cv2.imwrite(save_path, stacked_image)
def concatenate_tensors(self, tensor1, tensor2, order1, order2):
D1 = tensor1.shape[0]
D2 = tensor2.shape[0]
D = D1 + D2
result_shape = (D,) + tensor1.shape[1:]
result = torch.zeros(result_shape, dtype=tensor1.dtype).to(tensor1.device)
for i, idx in enumerate(order1):
result[idx] = tensor1[i]
for i, idx in enumerate(order2):
result[idx] = tensor2[i]
return result
def subtract_lists(self, listA: list, listB: list) -> list:
return [x for x in listA if x not in listB]
def merge_dicts_and_average(self, dicts_list: list):
result_dict = {}
num_dicts = len(dicts_list)
for d in dicts_list:
for key, value in d.items():
if key in result_dict:
result_dict[key] += value
else:
result_dict[key] = value
for key in result_dict:
result_dict[key] /= num_dicts
return result_dict
def training_step(self, batch: Any, batch_idx: int) -> STEP_OUTPUT:
batch = merge_mixed_batches(batch)
data = self.get_data_from_batch(batch)
worker_id = self.get_worker_id_from_batch(batch)
mode = Mode.TRAIN
self.started_training = True
step = self.trainer.global_step
ev_tensor_sequence = data[DataType.EV_REPR]
sparse_obj_labels = data[DataType.OBJLABELS_SEQ]
is_first_sample = data[DataType.IS_FIRST_SAMPLE]
token_mask_sequence = data.get(DataType.TOKEN_MASK, None)
self.mode_2_rnn_states[mode].reset(worker_id=worker_id, indices_or_bool_tensor=is_first_sample)
self.reg_states.reset(worker_id=worker_id, indices_or_bool_tensor=is_first_sample)
sequence_len = len(ev_tensor_sequence)
assert sequence_len > 0
batch_size = len(sparse_obj_labels[0])
if self.mode_2_batch_size[mode] is None:
self.mode_2_batch_size[mode] = batch_size
else:
assert self.mode_2_batch_size[mode] == batch_size
prev_states = self.mode_2_rnn_states[mode].get_states(worker_id=worker_id)
prev_reg = self.reg_states.get_states(worker_id=worker_id)
ev_repr_selector = EventReprSelector()
obj_labels = list()
predictions_list = list()
losses_list = list()
if type(self.training_classes) != list:
self.training_classes = list(self.training_classes.keys())
else:
self.training_classes = self.training_classes
first_valid_flag = True
for tidx in range(sequence_len):
ev_tensors = ev_tensor_sequence[tidx]
ev_tensors = ev_tensors.to(dtype=self.dtype)
ev_tensors = self.input_padder.pad_tensor_ev_repr(ev_tensors)
if token_mask_sequence is not None:
token_masks = self.input_padder.pad_token_mask(token_mask=token_mask_sequence[tidx])
else:
token_masks = None
if self.mode_2_hw[mode] is None:
self.mode_2_hw[mode] = tuple(ev_tensors.shape[-2:])
else:
assert self.mode_2_hw[mode] == ev_tensors.shape[-2:]
backbone_features, states = self.mdl.forward_backbone(x=ev_tensors,
previous_states=prev_states,
token_mask=token_masks)
prev_states = states
current_labels, valid_batch_indices = sparse_obj_labels[tidx].get_valid_labels_and_batch_indices()
inference_valid = self.subtract_lists(list(range(batch_size)), valid_batch_indices)
#get the feature
if len(current_labels) > 0:
backbone_feature_selector = BackboneFeatureSelector()
backbone_feature_selector.add_backbone_features(backbone_features=backbone_features,
selected_indices=valid_batch_indices)
selected_backbone_features = backbone_feature_selector.get_batched_backbone_features()
#get the label
|
def remove_elements(ori_items, moving_items):
return [elem for elem in ori_items if elem not in moving_items]
class Module(pl.LightningModule):
def __init__(self, full_config: DictConfig):
super().__init__()
self.full_config = full_config
self.mdl_config = full_config.model
in_res_hw = tuple(self.mdl_config.backbone.in_res_hw)
self.input_padder = InputPadderFromShape(desired_hw=in_res_hw)
self.mdl = YoloXDetector(self.mdl_config)
self.mode_2_rnn_states: Dict[Mode, RNNStates] = {
Mode.TRAIN: RNNStates(),
Mode.VAL: RNNStates(),
Mode.TEST: RNNStates(),
}
self.reg_states = REGStates()
def setup(self, stage: Optional[str] = None) -> None:
dataset_name = self.full_config.dataset.name
self.mode_2_hw: Dict[Mode, Optional[Tuple[int, int]]] = {}
self.mode_2_batch_size: Dict[Mode, Optional[int]] = {}
self.mode_2_psee_evaluator: Dict[Mode, Optional[PropheseeEvaluator]] = {}
self.mode_2_sampling_mode: Dict[Mode, DatasetSamplingMode] = {}
self.started_training = True
dataset_train_sampling = self.full_config.dataset.train.sampling
dataset_eval_sampling = self.full_config.dataset.eval.sampling
assert dataset_train_sampling in iter(DatasetSamplingMode)
assert dataset_eval_sampling in (DatasetSamplingMode.STREAM, DatasetSamplingMode.RANDOM)
if stage == 'fit': # train + val
self.training_classes = self.full_config.dataset.training_classes
self.unseen_classes = self.full_config.dataset.unseen_classes
self.testing_classes = self.full_config.dataset.testing_classes
self.train_config = self.full_config.training
self.train_metrics_config = self.full_config.logging.train.metrics
if self.train_metrics_config.compute:
self.mode_2_psee_evaluator[Mode.TRAIN] = PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
#We set two evaluator, one (0) for unseen classes and one (1) for all classes
self.mode_2_psee_evaluator[Mode.VAL] = [PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2),
PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
]
self.mode_2_sampling_mode[Mode.TRAIN] = dataset_train_sampling
self.mode_2_sampling_mode[Mode.VAL] = dataset_eval_sampling
for mode in (Mode.TRAIN, Mode.VAL):
self.mode_2_hw[mode] = None
self.mode_2_batch_size[mode] = None
self.started_training = False
elif stage == 'validate':
self.unseen_classes = self.full_config.dataset.unseen_classes
self.testing_classes = self.full_config.dataset.testing_classes
mode = Mode.VAL
self.mode_2_psee_evaluator[mode] = [PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2),
PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
]
self.mode_2_sampling_mode[Mode.VAL] = dataset_eval_sampling
self.mode_2_hw[mode] = None
self.mode_2_batch_size[mode] = None
elif stage == 'test':
mode = Mode.TEST
self.unseen_classes = self.full_config.dataset.unseen_classes
self.testing_classes = self.full_config.dataset.testing_classes
self.mode_2_psee_evaluator[Mode.TEST] = [PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2),
PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
]
self.mode_2_sampling_mode[Mode.TEST] = dataset_eval_sampling
self.mode_2_hw[mode] = None
self.mode_2_batch_size[mode] = None
else:
raise NotImplementedError
def forward(self,
event_tensor: th.Tensor,
previous_states: Optional[LstmStates] = None,
retrieve_detections: bool = True,
targets=None) \
-> Tuple[Union[th.Tensor, None], Union[Dict[str, th.Tensor], None], LstmStates]:
return self.mdl(x=event_tensor,
previous_states=previous_states,
retrieve_detections=retrieve_detections,
targets=targets)
def get_worker_id_from_batch(self, batch: Any) -> int:
return batch['worker_id']
def get_data_from_batch(self, batch: Any):
return batch['data']
def vis_and_save_image(self, ev_pr, label, pred, unseen_classes,
save_dir = '/home/zht/python_project/RVT_CAOD_v9/save_img/', threshold = 0.3, topn = 10):
files = os.listdir(save_dir)
index = len(files)
ev_pr = ev_pr.to('cpu')
assert ev_pr.shape[0] % 2 == 0
num_bins = int(ev_pr.shape[0] / 2)
height = int(ev_pr.shape[1])
width = int(ev_pr.shape[2])
ev_pr = ev_pr.permute(1, 2, 0)
ev_pr = ev_pr.numpy()
frame = np.zeros((height, width, 3), dtype=np.uint8)
for i in range(num_bins):
pos_image = (ev_pr[:, :, i + num_bins]).astype(np.uint8)
neg_image = (ev_pr[:, :, i]).astype(np.uint8)
pos_image = cv2.equalizeHist(pos_image)
neg_image = cv2.equalizeHist(neg_image)
image = np.concatenate((neg_image[..., None], np.zeros((height, width, 1), dtype=np.uint8), pos_image[..., None]), axis=-1)
frame = np.add(frame, image)
frame = frame * 255.0
frame_copy = frame.copy()
# topn = label.shape[0]
fix_num_threshold = np.partition(pred['class_confidence'], -topn)[-topn]
if fix_num_threshold > threshold:
pass
else:
threshold = fix_num_threshold
mask = pred['class_confidence'] > threshold
pred = pred[mask]
for item in pred:
x, y, w, h = item['x'], item['y'], item['w'], item['h']
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
cv2.rectangle(frame, (left, top), (right, bottom), (255, 250, 250), 1)
for item in label:
x, y, w, h = item['x'], item['y'], item['w'], item['h']
class_id = item['class_id']
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
center = ((left + right) // 2, (top + bottom) // 2)
if class_id in unseen_classes:
color = (255, 165, 0)
cv2.putText(frame_copy, str(class_id), (center[0], bottom - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
else:
color = (0, 255, 0)
cv2.rectangle(frame_copy, (left, top), (right, bottom), color, 1)
stacked_image = cv2.hconcat([frame, frame_copy])
save_path = save_dir + '{}.png'.format(index)
cv2.imwrite(save_path, stacked_image)
def concatenate_tensors(self, tensor1, tensor2, order1, order2):
D1 = tensor1.shape[0]
D2 = tensor2.shape[0]
D = D1 + D2
result_shape = (D,) + tensor1.shape[1:]
result = torch.zeros(result_shape, dtype=tensor1.dtype).to(tensor1.device)
for i, idx in enumerate(order1):
result[idx] = tensor1[i]
for i, idx in enumerate(order2):
result[idx] = tensor2[i]
return result
def subtract_lists(self, listA: list, listB: list) -> list:
return [x for x in listA if x not in listB]
def merge_dicts_and_average(self, dicts_list: list):
result_dict = {}
num_dicts = len(dicts_list)
for d in dicts_list:
for key, value in d.items():
if key in result_dict:
result_dict[key] += value
else:
result_dict[key] = value
for key in result_dict:
result_dict[key] /= num_dicts
return result_dict
def training_step(self, batch: Any, batch_idx: int) -> STEP_OUTPUT:
batch = merge_mixed_batches(batch)
data = self.get_data_from_batch(batch)
worker_id = self.get_worker_id_from_batch(batch)
mode = Mode.TRAIN
self.started_training = True
step = self.trainer.global_step
ev_tensor_sequence = data[DataType.EV_REPR]
sparse_obj_labels = data[DataType.OBJLABELS_SEQ]
is_first_sample = data[DataType.IS_FIRST_SAMPLE]
token_mask_sequence = data.get(DataType.TOKEN_MASK, None)
self.mode_2_rnn_states[mode].reset(worker_id=worker_id, indices_or_bool_tensor=is_first_sample)
self.reg_states.reset(worker_id=worker_id, indices_or_bool_tensor=is_first_sample)
sequence_len = len(ev_tensor_sequence)
assert sequence_len > 0
batch_size = len(sparse_obj_labels[0])
if self.mode_2_batch_size[mode] is None:
self.mode_2_batch_size[mode] = batch_size
else:
assert self.mode_2_batch_size[mode] == batch_size
prev_states = self.mode_2_rnn_states[mode].get_states(worker_id=worker_id)
prev_reg = self.reg_states.get_states(worker_id=worker_id)
ev_repr_selector = EventReprSelector()
obj_labels = list()
predictions_list = list()
losses_list = list()
if type(self.training_classes) != list:
self.training_classes = list(self.training_classes.keys())
else:
self.training_classes = self.training_classes
first_valid_flag = True
for tidx in range(sequence_len):
ev_tensors = ev_tensor_sequence[tidx]
ev_tensors = ev_tensors.to(dtype=self.dtype)
ev_tensors = self.input_padder.pad_tensor_ev_repr(ev_tensors)
if token_mask_sequence is not None:
token_masks = self.input_padder.pad_token_mask(token_mask=token_mask_sequence[tidx])
else:
token_masks = None
if self.mode_2_hw[mode] is None:
self.mode_2_hw[mode] = tuple(ev_tensors.shape[-2:])
else:
assert self.mode_2_hw[mode] == ev_tensors.shape[-2:]
backbone_features, states = self.mdl.forward_backbone(x=ev_tensors,
previous_states=prev_states,
token_mask=token_masks)
prev_states = states
current_labels, valid_batch_indices = sparse_obj_labels[tidx].get_valid_labels_and_batch_indices()
inference_valid = self.subtract_lists(list(range(batch_size)), valid_batch_indices)
#get the feature
if len(current_labels) > 0:
backbone_feature_selector = BackboneFeatureSelector()
backbone_feature_selector.add_backbone_features(backbone_features=backbone_features,
selected_indices=valid_batch_indices)
selected_backbone_features = backbone_feature_selector.get_batched_backbone_features()
#get the label | labels_yolox = ObjectLabels.get_labels_as_batched_tensor(obj_label_list=current_labels, training_classes = self.training_classes,format_='yolox') | 0 | 2023-12-29 04:04:34+00:00 | 12k |
Wangyuhao06/2022-adhoc | main.py | [
{
"identifier": "Environment",
"path": "src/env.py",
"snippet": "class Environment():\n #初始化环境\n def __init__(self):\n #初始数据-最大节点数\n self.node_max=NODE_MAX\n self.node_space_size=NODE_MAX\n self.node_moving_area=MOV_AREA\n #初始化二维平面\n self.geo_area = random_waypoint(self.node_max, dimensions=(MOV_AREA, MOV_AREA), velocity=(10, 15), wt_max=1.0)\n self.position=0\n #初始化随机相邻矩阵\n self.topology = np.zeros((self.node_space_size,self.node_space_size))\n self.topology[0:self.node_max,0:self.node_max] = np.random.randint(0,2,(self.node_max,self.node_max))\n for i in range(self.node_max):\n self.topology[i,i] = 1\n for j in range(self.node_max):\n #构建双向图\n if self.topology[i,j] == 1:\n self.topology[j,i] = 1\n #初始化节点动作空间\n self.topology_actSpace=[]\n #初始化频谱块元组-----(0,[])表示(占用与否,[占用transtaskID列表]) \n self.freqB_list=([],[],[],[],[],[],[],[],[],[]) #((0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]))\n self.freqB_use_history=([],[],[],[],[],[],[],[],[],[])\n #初始化传输事件列表\n self.trans_task_ID_inTR=[]\n self.trans_task_list=[]\n self.trans_task_cnt=0 # id计数器\n #初始化包列表\n self.amount_poisson_list = np.random.poisson(lam=LAMDA,size=MAX_TIME)#包数量初始化\n self.size_normal_list = ((np.random.normal(0,1,MAX_TIME*2)*16+16)//8)*8#包大小初始化\n self.pack_use_cnt=0#包序号计数器\n self.packets_list=[]#包列表\n self.packets_live_id=[]\n #初始化节点列表\n self.node_list=[]\n self.live_node_ID_list=[]\n for i in range(self.node_max):\n locals()['node_'+str(i)] = Node(i)\n self.node_list.append(locals()['node_'+str(i)])\n self.live_node_ID_list.append(i)\n #噪声系数\n self.noise_list = np.random.rayleigh(1,MAX_TIME*2)#*NOISE_CONST/2\n #统计参数\n self.envTr_time=0\n self.allNode_pw=0\n self.allNode_delay=0\n self.time_avg=0\n self.arrive_time=1\n self.end=0\n self.terminate=0\n \n self.packet_arrive_success=[]\n self.agent_arrive=[]\n for i in range(NODE_MAX):\n self.packet_arrive_success.append(0)#节点作为 |源节点| 发的包成功到达数\n self.agent_arrive.append(0)#节点作为 |最后一个中间节点| 发的包成功到达数\n # self.sum_packet_done_rate=0\n #四元组\n self.all_ob=np.array([[0]*OBS_LEN]*NODE_MAX)\n self.reward=np.array([1]*self.node_max)\n self.para_reward=np.array([1]*self.node_max) \n \n def generate_packet(self,cur_time):\n packetsList_temp=[]\n packets_cnt=self.amount_poisson_list[cur_time]\n for i in range(packets_cnt):\n nodes_temp = random.sample(self.live_node_ID_list,2)\n locals()['packet_'+str(self.pack_use_cnt)]=Packet(self.pack_use_cnt,abs(self.size_normal_list[self.pack_use_cnt])+8,nodes_temp[0],nodes_temp[1],cur_time)\n self.packets_list.append(locals()['packet_'+str(self.pack_use_cnt)])\n self.packets_live_id.append(self.pack_use_cnt)\n packetsList_temp.append(locals()['packet_'+str(self.pack_use_cnt)])\n self.node_list[nodes_temp[0]].packets_ToSend_id.append(self.pack_use_cnt)\n self.node_list[nodes_temp[0]].packets_id_list.append(self.pack_use_cnt)\n self.pack_use_cnt+=1\n return packetsList_temp\n \n #传输任务更新\n def trans_task_update(self,cur_time):\n \n if len(self.trans_task_ID_inTR)>0 and len(self.trans_task_list)>0:\n #所有在传传输任务\n for trans_temp_id in self.trans_task_ID_inTR:\n task_finish=self.trans_task_list[trans_temp_id].Trans_task_update()\n node_send_id,node_rec_id,packet_id=self.trans_task_list[trans_temp_id].show_info()\n #包传输更新\n self.packets_list[packet_id].time_use+=1\n #节点更新\n # self.node_list[node_send_id].next_hop_id=node_rec_id\n if node_send_id!=node_rec_id: \n self.node_list[node_send_id].power_list.append(self.trans_task_list[trans_temp_id].power_consume[0])\n self.node_list[node_send_id].current_power_send=self.trans_task_list[trans_temp_id].power_consume[0]\n self.node_list[node_send_id].energy_consumption+=self.trans_task_list[trans_temp_id].power_consume[0]\n\n self.node_list[node_rec_id].power_list.append(self.trans_task_list[trans_temp_id].power_consume[1])\n self.node_list[node_rec_id].current_power_receive=self.trans_task_list[trans_temp_id].power_consume[1]\n self.node_list[node_rec_id].energy_consumption+=self.trans_task_list[trans_temp_id].power_consume[1]\n #统计参数更新\n self.envTr_time+=1\n \n #trans任务完成更新\n if task_finish and self.topology[node_send_id,node_rec_id]==1 :\n #更新包与节点\n # T-T清除\n self.trans_task_ID_inTR.remove(trans_temp_id)\n # 包属性清除\n self.packets_list[packet_id].in_TR=0\n self.packets_list[packet_id].cur_trans_task_id=0\n self.packets_list[packet_id].cur_node_id=node_rec_id\n # 发送节点属性清除\n self.node_list[node_send_id].packets_ToSend_id.remove(packet_id)\n self.node_list[node_send_id].trans_task_send.get()\n self.node_list[node_send_id].sending_flag=0\n self.node_list[node_send_id].current_amp_send=0\n self.node_list[node_send_id].current_power_send=0\n # 接收节点属性清除\n self.node_list[node_rec_id].trans_taskID_rec.remove(trans_temp_id)\n if len(self.node_list[node_rec_id].trans_taskID_rec)==0:\n self.node_list[node_rec_id].rec_flag=0\n # self.node_list[node_rec_id].current_amp_receive=0\n self.node_list[node_rec_id].current_power_receive=0\n # 频谱环境更新(频谱块release)\n freqB_ID_now=0\n for freqB_ocp_now in self.trans_task_list[trans_temp_id].FreqB_occup:\n if freqB_ocp_now and node_send_id!=node_rec_id:\n self.freqB_list[freqB_ID_now].remove(node_send_id)\n freqB_ID_now+=1\n\n #判断是否到达目的地 \n if self.packets_list[packet_id].cur_node_id==self.packets_list[packet_id].dst_node_id and self.topology[node_send_id,node_rec_id]==1:\n # 可通信到达\n self.packets_list[packet_id].arrive_flag=1\n self.packets_live_id.remove(packet_id)\n ### 记录接受节点和发出节点的奖励 ###\n self.packet_arrive_success[self.packets_list[packet_id].ori_node_id]+=1\n self.agent_arrive[node_send_id]+=1 \n # self.arrive_time += self.trans_task_list[trans_temp_id].time_use # datacheck3\n self.arrive_success += 1\n elif self.topology[node_send_id,node_rec_id]==1 :\n #可通信没到达\n self.node_list[node_rec_id].packets_ToSend_id.append(packet_id)\n # self.arrive_time += (cur_time - self.packets_list[packet_id].time_start) # datacheck3\n else:\n #不可通信\n self.trans_task_list[trans_temp_id].time_cnt=0\n self.trans_task_list[trans_temp_id].finish_flag=0\n # for packet_id in self.packets_live_id:\n # #判断是否到达目的地 \n # if self.packets_list[packet_id].cur_node_id==self.packets_list[packet_id].dst_node_id or self.packets_list[packet_id].arrive_flag==1:\n # #到达\n # continue\n # # self.arrive_time += self.trans_task_list[trans_temp_id].time_use\n # else:#没到达\n # self.arrive_time += 1\n self.arrive_time += len(self.packets_live_id)\n \n \n \n def all_agent_observe(self): \n all_ob=[]\n # fBlst=[0,0,0,0,0,0,0,0,0,0]\n degree=0\n pack_storage=0\n pw_avg_all=0\n dst_node=-1\n\n # for node_id in range(self.node_max):\n # if len (self.node_list[node_id].packets_ToSend_id):\n # packet_toSend_id=self.node_list[node_id].packets_ToSend_id[0]\n # dst_node=self.packets_list[packet_toSend_id].dst_node_id\n \n # else:\n # dst_node=-1\n \n # for node_id in self.live_node_ID_list:\n # for node_id in range(self.node_max):\n # fb_tp_id=0\n # for fb_tp in self.node_list[node_id].current_freqB:\n # fBlst[fb_tp_id]=fb_tp\n # fb_tp_id+=1\n \n # for node_id in self.live_node_ID_list:\n #neibor_idlist=self.node_list[node_id].neibor_idlist[:]#深复制\n #receive ob?\n #neibor_idlist.append(node_id)\n #neibor_vector=[]\n #for i in neibor_idlist:\n \n # for node_id in range(self.node_max):\n # pwl=self.node_list[node_id].power_list\n # if len(pwl)>=BACKTIME:\n # pwlst=pwl[len(pwl)-BACKTIME:len(pwl)]\n # else:\n # pwlst=pwl \n # if len(pwlst)>0:\n # pw_avg=sum(pwlst)/len(pwlst)\n # else:\n # pw_avg=0\n # pw_avg_all+=pw_avg\n \n for node_id in range(self.node_max):\n pwl=self.node_list[node_id].power_list\n if len(pwl)>=BACKTIME:\n pwlst=pwl[len(pwl)-BACKTIME:len(pwl)]\n else:\n pwlst=pwl \n if len(pwlst)>0:\n pw_avg=sum(pwlst)/len(pwlst)\n else:\n pw_avg=0\n \n if len (self.node_list[node_id].packets_ToSend_id)>0:\n packet_toSend_id=self.node_list[node_id].packets_ToSend_id[0]\n dst_node=self.packets_list[packet_toSend_id].dst_node_id \n else:\n dst_node=-1\n \n pw=[]\n pw.append(pw_avg)\n \n dgr=[]\n degree=len(self.topology_actSpace[node_id][0])-1\n dgr.append(degree)\n \n pcs=[]\n pack_storage=len(self.node_list[node_id].packets_ToSend_id)\n pcs.append(pack_storage)\n \n dn=[]\n dn.append(dst_node)\n \n all_ob.append(pw+dgr+pcs+dn)\n #self.node_list[node_id].ob_send=neibor_vector\n \n return np.array(all_ob)\n \n \n # def generate_trans_task(self,trans_id,send_node,rec_node,packet):\n # trans_task_temp=Trans_task(trans_id,send_node,rec_node,packet)\n # return trans_task_temp\n \n def env_check_right(self):\n for node_id in self.live_node_ID_list:\n if self.node_list[node_id].trans_task_send.empty():\n assert self.node_list[node_id].sending_flag == 0\n elif not self.node_list[node_id].trans_task_send.empty():\n assert self.node_list[node_id].sending_flag == 1\n st_temp=self.node_list[node_id].trans_task_send.get()\n self.node_list[node_id].trans_task_send.put(st_temp)#无损使用队列内容\n s_node_send_id,s_node_rec_id,s_packet_id=st_temp.show_info()\n assert node_id==s_node_send_id\n # assert self.node_list[node_id].next_hop_id==s_node_rec_id\n assert self.node_list[node_id].packets_ToSend_id[0]==s_packet_id\n \n elif self.node_list[node_id].trans_task_rec.empty():\n assert self.rec_flag == 0\n elif not self.node_list[node_id].trans_task_rec.empty():\n assert self.node_list[node_id].rec_flag == 1\n rt_temp=self.node_list[node_id].trans_task_rec.get()\n self.node_list[node_id].trans_task_rec.put(rt_temp)#无损使用队列内容\n r_node_send_id,r_node_rec_id,r_packet_id=rt_temp.show_info()\n assert node_id==r_node_rec_id\n # assert self.node_list[node_id].next_hop_id==s_node_rec_id\n assert self.node_list[node_id].packets_ToSend_id[0] != r_packet_id\n \n return 0 \n \n\n def topology_update(self,cur_time,rand_change):\n self.topology = np.zeros((NODE_MAX,NODE_MAX))\n ################--------随机更改拓扑结构--------################\n if rand_change:\n positions=next(self.geo_area)\n self.position = positions\n for a in range(NODE_MAX):\n for b in range(NODE_MAX):\n if np.linalg.norm(positions[a]-positions[b]) <= COM_RANGE:\n self.topology[a,b]=1\n self.topology[b,a]=1\n else:\n self.topology[a,b]=0\n self.topology[b,a]=0\n # if np.random.rand()<DELTA and cur_time%30==0:\n # for i in np.random.randint(0,self.node_max,np.random.randint(3)+1):\n # self.topology[i,:]=np.random.randint(0,2,self.node_max)\n # self.topology[i,i] = 1\n # for j in range(self.node_max):\n # #构建双向图\n # if self.topology[i,j] == 1:\n # self.topology[j,i] = 1\n # print(positions)\n # print(\"****************\")\n # print(self.topology)\n # print(\"------------------------------------\")\n ################--------更新邻域--------################\n self.live_node_ID_list=[]\n self.topology_actSpace=[]\n for i in range(self.topology.shape[0]):\n if any(self.topology[i,:]):\n TPtemp = np.nonzero(self.topology[i,:])\n # self.node_list[i].neibor_idlist=TPtemp\n self.topology_actSpace.append(TPtemp)\n self.live_node_ID_list.append(i)\n else:\n TPtemp = -1\n self.topology_actSpace.append(TPtemp)\n return self.topology\n \n \n def get_state_reward(self):\n \n return self.topology,self.all_ob,self.reward \n \n def time_step(self,cur_time,action):\n \n self.packet_arrive_success=[]\n self.agent_arrive=[]\n for i in range(NODE_MAX):\n self.packet_arrive_success.append(0)\n self.agent_arrive.append(0)\n self.arrive_success=0\n \n # self.env_check_right()\n topology_now=self.topology_update(cur_time,1)\n self.generate_packet(cur_time)\n self.all_ob=self.all_agent_observe()\n self.trans_task_update(cur_time)\n for node_index in self.live_node_ID_list :\n if len(self.node_list[node_index].packets_ToSend_id)>0 and self.node_list[node_index].sending_flag!=1:\n packet_toSend_id=self.node_list[node_index].packets_ToSend_id[0]\n #包未到达且非在传----->生成trans_task\n if self.packets_list[packet_toSend_id].arrive_flag==0 and self.packets_list[packet_toSend_id].in_TR==0:\n #传输和接收节点决策\n send_node=self.node_list[node_index]\n Action=action[node_index]#######################################################\n next_hop_id,current_freqB,current_amp_send=Action[0],Action[1:N_ACTION_C],Action[N_ACTION_C]\n send_node.next_hop_id=next_hop_id \n rec_node=self.node_list[next_hop_id]\n current_amp_rec=RECAMP\n \n self.node_list[node_index].current_freqB=current_freqB\n self.node_list[node_index].next_hop_id=next_hop_id\n self.node_list[node_index].current_amp_send=current_amp_send\n #频谱环境更新\n freqB_ID_now=0\n for fB_ocp in current_freqB:\n if node_index!=next_hop_id and fB_ocp:\n self.freqB_list[freqB_ID_now].append(node_index)\n self.freqB_use_history[freqB_ID_now].append(node_index)\n freqB_ID_now+=1\n #T-T生成与T-T环境更新\n trans_task_now=Trans_task(self.trans_task_cnt,send_node,rec_node,self.packets_list[packet_toSend_id])\n trans_task_now.SNR_C=self.SNR_cac_update(cur_time,trans_task_now,current_amp_send,current_freqB,current_amp_rec)\n trans_task_now.time_use=int(trans_task_now.packsize/(trans_task_now.SNR_C[1]))+1\n \n if node_index==next_hop_id:\n trans_task_now.time_use=1#节点内部等待\n \n #节点与包写入\n #发送节点任务、标志更新\n self.node_list[node_index].trans_task_send.put_nowait(trans_task_now)\n self.node_list[node_index].sending_flag=1\n #接收节点任务、标志更新\n self.node_list[next_hop_id].trans_taskID_rec.append(trans_task_now.id)\n self.node_list[next_hop_id].rec_flag=1\n #包任务、标志更新\n self.packets_list[packet_toSend_id].cur_trans_task_id=self.trans_task_cnt\n self.packets_list[packet_toSend_id].in_TR=1\n #T-T环境写入\n self.trans_task_ID_inTR.append(trans_task_now.id)\n self.trans_task_list.append(trans_task_now)\n self.trans_task_cnt+=1\n #reward清算\n #总传输时间为self.envTr_time,总时间为cur_time\n packet_done_rate=1-round((len(self.packets_live_id)+0.1)/(len(self.packets_list)+0.1),4)#包传输完成率为packet_done_rate\n # self.avg_packet_done_rate += packet_done_rate\n # self.time_avg+=self.envTr_time/(1+len(self.packets_list)-len(self.packets_live_id))\n # self.time_avg+=packet_done_rate\n # self.time_avg+=self.arrive_time/((1+len(self.packets_list)-len(self.packets_live_id))*(packet_done_rate))\n \n # print(\"pdr: \"+str(packet_done_rate))\n if packet_done_rate<=0.03:\n packet_done_rate=0.03\n \n self.time_avg+=self.arrive_time/(1+len(self.packets_list)-len(self.packets_live_id))#*(packet_done_rate)\n \n # if len(self.packets_live_id) == 0:\n # self.terminate=1\n if len(self.packets_live_id) == 0:\n self.end=1\n \n for i in range(self.node_max):\n # pw_sum=sum(self.node_list[i].power_list)\n if len(self.node_list[i].power_list)>0:\n pw_now=self.node_list[i].power_list[-1]+1\n else:\n pw_now=1\n if not self.node_list[i].trans_task_send.empty():\n st_temp=self.node_list[i].trans_task_send.get()\n self.node_list[i].trans_task_send.put_nowait(st_temp)#无损使用队列内容\n trans_delay=st_temp.time_use+1\n else:\n trans_delay=1\n \n self.para_reward[i]= -trans_delay*pw_now\n # if packet_done_rate<=0.05:\n # packet_done_rate=0.05\n # self.reward[i]=round((packet_done_rate*cur_time*cur_time*self.node_max*DEVICE_ENERGY+0.0001)/(pw_sum*self.envTr_time+0.0001),6)\n # self.reward[i]=round((packet_done_rate*cur_time*self.node_max*DEVICE_ENERGY+1000)/(pw_sum+1000),6)\n # self.reward[i]=round(-(pw_sum*self.envTr_time)/((packet_done_rate+0.1)*(cur_time+1)*(cur_time+1)*self.node_max),6)#RWD2\n # self.reward[i]=round(-(10*pw_sum*self.envTr_time)/((packet_done_rate)*(cur_time+1)*(cur_time+1)*self.node_max),6)#RWD3\n # self.reward[i]=round(-(pw_sum*self.envTr_time)/((packet_done_rate+0.001)*(cur_time+1)*(cur_time+1)*self.node_max),6)\n # self.reward[i]=round(-(10*pw_sum*self.envTr_time)/((packet_done_rate)*(cur_time+1)*(cur_time+1)*self.node_max),6)#RWD3\n #self.reward[i]=round(-(10*log10(10+pw_sum)*self.arrive_time)/((1+len(self.packets_list)-len(self.packets_live_id))*(packet_done_rate)*(cur_time+1)*self.node_max),6)#RWD4\n # self.reward[i]=round(-(10*pw_sum*self.arrive_time)/((1+len(self.packets_list)-len(self.packets_live_id))*(packet_done_rate)*(cur_time+1)*self.node_max),6)#RWD5\n # self.reward[i]=-(10*log10(10+pw_sum)*(self.arrive_time+1))/((1+len(self.packets_list))*(packet_done_rate)*(cur_time+1)*self.node_max)#RWD6\n # self.reward[i]=self.arrive_success*1000-(self.arrive_time/(1+len(self.packets_list))*log10(10+pw_sum/(cur_time+1)))#RWD7\n # self.reward[i]=self.terminate*10000+self.arrive_success*300-100*self.arrive_time/(1+len(self.packets_live_id))/len(self.packets_list)#RWD8\n # self.reward[i]=(self.terminate*10000+self.arrive_success*1000-len(self.packets_live_id))/self.node_max#RWD9\n \n # ###RWD10###\n # if self.agent_arrive[i]==0 and self.packet_arrive_success[i]==0 and self.terminate!=1 :\n # self.reward[i] = - len(self.node_list[i].packets_ToSend_id) + 10000*self.end#*(1-self.terminate) #等待时延\n # elif self.agent_arrive[i]>0 or self.packet_arrive_success[i]>0 and self.terminate!=1:\n # # self.reward[i]=1000*self.agent_arrive[i]+1000*self.packet_arrive_success[i]+10000*self.terminate\n # self.reward[i] = 1000*self.agent_arrive[i] + 10000*self.end#*(1-self.terminate) \n # elif self.terminate:\n # self.reward[i] = 2000\n # # self.reward[i] = 1000*self.agent_arrive[i] + 1000*self.packet_arrive_success[i] -len(self.node_list[i].packets_ToSend_id)\n # ###########\n \n ###RWD11###\n if self.agent_arrive[i]==0 and self.packet_arrive_success[i]==0 :\n self.reward[i] = (- len(self.node_list[i].packets_ToSend_id) - 100*trans_delay) #等待时延 + 传输时延\n elif self.agent_arrive[i]>0 or self.packet_arrive_success[i]>0 :\n self.reward[i] = 1000*self.agent_arrive[i] #+ self.para_reward[i] #本节点作为 |最后一个中间节点| 发的包成功到达数 \n ###########\n \n self.allNode_delay+=trans_delay\n self.allNode_pw+=round(pw_now,6)\n \n if len(self.packets_live_id) == 0:\n self.terminate=1\n \n \n # self.time_avg+=self.envTr_time/(1+len(self.packets_list)-len(self.packets_live_id))*(cur_time+1)\n print(\"pdr: \"+str(packet_done_rate)+\" rwd: \"+str(sum(self.reward)))\n return topology_now,self.all_ob,self.reward,self.para_reward,self.terminate\n\n \n \n def SNR_cac_update(self,cur_time,trans_task,current_amp_send,current_freqB,current_amp_rec):\n trans_task_temp=trans_task\n node_send_id,node_rec_id,packet_id=trans_task_temp.show_info()\n trans_energy=round(current_amp_send*current_amp_rec*self.packets_list[packet_id].size*PACKENERGY,6)\n noise=NOISE_CONST\n SINR_fB=[]\n Capacity=1\n node_range = np.linalg.norm(self.position[node_send_id]-self.position[node_rec_id])\n for fB_id in range(len(current_freqB)):\n inference_temp=noise\n if current_freqB[fB_id]:\n node_list_temp=self.freqB_list[fB_id]\n for i in node_list_temp:\n if i==node_send_id:\n continue\n elif i in self.topology_actSpace[node_rec_id][0]:\n if self.node_list[i].sending_flag==1:\n ts_ttemp=self.node_list[i].trans_task_send.get_nowait()\n oth_node_send_id,oth_node_rec_id,oth_packet_id=ts_ttemp.show_info()\n inference_temp+=round(self.node_list[oth_node_send_id].current_amp_send*RECAMP*self.packets_list[oth_packet_id].size*PACKENERGY,6)\n self.node_list[i].trans_task_send.put_nowait(ts_ttemp)#无损使用队列内容\n Sinr=round(trans_energy*(10**(-abs(self.noise_list[cur_time])))*10**(-node_range/100/COM_RANGE)/inference_temp,6)\n Capacity+=round(8*4*log2(1+Sinr),6)\n SINR_fB.append(Sinr)\n return (SINR_fB,Capacity)\n \n \n def reset(self):\n #初始化二维平面\n self.geo_area = random_waypoint(self.node_max, dimensions=(MOV_AREA, MOV_AREA), velocity=(10, 15), wt_max=1.0)\n #初始化随机相邻矩阵\n self.topology = np.zeros((self.node_space_size,self.node_space_size))\n self.topology[0:self.node_max,0:self.node_max] = np.random.randint(0,2,(self.node_max,self.node_max))\n for i in range(self.node_max):\n self.topology[i,i] = 1\n for j in range(self.node_max):\n #构建双向图\n if self.topology[i,j] == 1:\n self.topology[j,i] = 1\n #初始化节点动作空间\n self.topology_actSpace=[]\n #初始化频谱块元组-----(0,[])表示(占用与否,[占用transtaskID列表]) \n self.freqB_list=([],[],[],[],[],[],[],[],[],[]) #((0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]))\n self.freqB_use_history=([],[],[],[],[],[],[],[],[],[])\n #初始化传输事件列表\n self.trans_task_ID_inTR=[]\n self.trans_task_list=[]\n self.trans_task_cnt=0 # id计数器\n #初始化包列表\n self.amount_poisson_list = np.random.poisson(lam=LAMDA,size=MAX_TIME)#包数量初始化\n self.size_normal_list = ((np.random.normal(0,1,MAX_TIME*2)*16+16)//8)*8#包大小初始化\n self.pack_use_cnt=0#包序号计数器\n self.packets_list=[]#包列表\n self.packets_live_id=[]\n #初始化节点列表\n self.node_list=[]\n self.live_node_ID_list=[]\n for i in range(self.node_max):\n locals()['node_'+str(i)] = Node(i)\n self.node_list.append(locals()['node_'+str(i)])\n self.live_node_ID_list.append(i)\n #统计参数\n self.envTr_time=0\n self.allNode_pw=0\n self.allNode_delay=0\n self.time_avg=0\n self.arrive_time=1\n # self.arrive_success=0\n self.terminate=0\n self.end=0\n self.packet_arrive_success=[]\n self.agent_arrive=[]\n for i in range(NODE_MAX):\n self.packet_arrive_success.append(0)\n self.agent_arrive.append(0)\n #四元组\n self.all_ob=np.array([[0]*OBS_LEN]*NODE_MAX)\n self.reward=np.array([1]*self.node_max) "
},
{
"identifier": "Node",
"path": "src/node.py",
"snippet": "class Node(object):\n def __init__(self,id_node):\n super(Node, self).__init__()\n #multi-agent sys setting\n self.node_max=36\n self.act_range=self.node_max-1 #最大邻居范围\n # current agent-property setting\n self.id=id_node#该节点id\n # 1 - packets\n self.packets_ToSend_id=[]#该节点当前待传的包\n self.packets_id_list=[]#该节点至今为止保存过的包id\n \n self.sending_flag=0\n self.rec_flag=0\n \n self.trans_task_send=Queue(maxsize=1)#该节点当前传输的任务\n self.trans_taskID_rec=[]#该节点当前接收的任务\n # 2 - energy\n self.current_amp_send=0#节点当前发送增益--------动作\n #self.current_amp_receive=0#节点当前接收增益--------动作\n \n self.current_power_send=0#节点当前发送功率\n self.current_power_receive=0#节点当前接收功率\n self.power_list=[]#节点使用能量记录\n \n self.energy_consumption=0#截至现在能量消耗\n # 3 - freq\n self.current_freqB=[1]#当前选用频谱块--------动作\n self.freqB_list=[1]#频谱块历史\n # 4 - topology\n self.neibor_idlist=[]\n self.next_hop_id=-1#下一条节点id--------动作\n # 5 - observation\n #self.ob_send=[]\n \n # def observation_rec(self,send_node):\n # if len(self.ob_send)==0 or len(send_node.ob_send)==0 :\n # raise ValueError(\"send observation unfinished\")\n # self.ob_rec.append(self.ob_send[-1])\n # self.ob_rec.append(send_node.ob_send[-1])\n # return self.ob_rec\n \n \n def get_send_action(self,ob,action_space):\n \n ###缺省决策###\n \n #改变属性\n return self.current_amp_send,self.current_freqB,self.next_hop_id\n \n def get_rec_action(self,ob):\n \n ###缺省决策###\n \n #改变属性\n return self.current_amp_receive "
},
{
"identifier": "Packet",
"path": "src/packet.py",
"snippet": "class Packet(object):\n def __init__(self,id_packet,packet_size,ori_node_id,dst_node_id,time_start_0):\n super(Packet, self).__init__()\n self.id=id_packet\n self.size=packet_size\n #节点属性\n self.ori_node_id=ori_node_id\n self.cur_node_id=ori_node_id\n self.dst_node_id=dst_node_id\n self.node_list=[ori_node_id]\n #T-T属性\n self.cur_trans_task_id=-100\n self.in_TR=0\n self.trans_task_IDlist=[]\n #路由属性\n self.time_start=time_start_0\n self.time_use=0\n self.arrive_flag=0\n \n def packet_trans_update(self,trans_task):\n if trans_task.trans_property[2]!=self.id:\n raise ValueError('trans_task not matched')\n self.cur_trans_task_id=trans_task.id"
},
{
"identifier": "Trans_task",
"path": "src/transtask.py",
"snippet": "class Trans_task(object):\n def __init__(self,trans_id,node_send,node_rec,packet):\n self.id=trans_id\n self.trans_property=(node_send.id,node_rec.id,packet.id)#基本属性\n self.packsize=packet.size\n ####frequency block info####\n self.FreqB_occup=node_send.current_freqB #占用频谱块id\n ####SINR and Capacity####\n self.SNR_C=([],1)#Y(SNR,Capacity)-----------------[X(timeslot1:SNR,Capacity),(timeslot2:SNR,Capacity),...]\n ####time of trans####\n self.time_use=1#int(self.packsize/self.SNR_C[1])+1\n self.time_cnt=0\n self.finish_flag=0\n ####energy setting####\n self.energy_property = (node_send.current_amp_send,RECAMP)\n self.energy_consume=(node_send.current_amp_send*packet.size*PACKENERGY,RECAMP*packet.size*PACKENERGY)\n self.power_consume=(round(node_send.current_amp_send*packet.size*PACKENERGY/self.time_use,6),round(RECAMP*packet.size*PACKENERGY/self.time_use,6))\n \n def show_info(self):\n return self.trans_property[0],self.trans_property[1],self.trans_property[2]\n \n def Trans_task_update(self):\n if self.finish_flag:\n return 1\n if self.time_cnt>=self.time_use:\n self.finish_flag=1\n return 1\n elif self.time_cnt<self.time_use:\n self.time_cnt+=1\n return 0\n \n \n #trans_task=tuple([],{},(node_send_id,node_send_amp,node_rec_id,node_rec_amp,packet_id),0)\n #tuple:([占用频谱块id],{(timeslot1:SNR,Capacity),(timeslot2:SNR,Capacity),...},(基本属性:发送节点id,发送增益,接收节点id,接收增益,包id),完成标志位)"
},
{
"identifier": "DGN",
"path": "src/DGN.py",
"snippet": "class DGN(nn.Module):\n\tdef __init__(self,n_agent,num_inputs,hidden_dim,num_actions):\n\t\tsuper(DGN, self).__init__()\n\t\t\n\t\tself.encoder = Encoder(num_inputs,hidden_dim)\n\t\tself.att_1 = AttModel(n_agent,hidden_dim,hidden_dim,hidden_dim)\n\t\t# self.att_2 = AttModel(n_agent,hidden_dim,hidden_dim,hidden_dim)\n\t\tself.q_net = Q_Net(hidden_dim,num_actions)\n\t\t\n\tdef forward(self, x, mask):\n\t\th1 = self.encoder(x)\n\t\th2 = self.att_1(h1, mask)\n\t\t# h3 = self.att_2(h2, mask)\n\t\tq = self.q_net(h2)\n\t\treturn q "
},
{
"identifier": "DPG",
"path": "src/DGN.py",
"snippet": "class DPG(nn.Module):\n def __init__(self,n_agent,din=11,hidden_dim=128,dout=11,init_w = 3e-3):\n super(DPG, self).__init__()\n self.fc1 = nn.Linear(din, hidden_dim)\n self.fc2 = nn.Linear(hidden_dim,dout) \n # uniform_将tensor用从均匀分布中抽样得到的值填充。参数初始化\n self.fc2.weight.data.uniform_(-init_w, init_w)\n # nn.init.uniform_(self.linear3.weight,(-init_w,init_w))\n #也用normal_(0, 0.1) 来初始化的,高斯分布中抽样填充,这两种都是比较有效的初始化方式\n self.fc2.bias.data.uniform_(-init_w, init_w)\n #其意义在于我们尽可能保持 每个神经元的输入和输出的方差一致。\n def forward(self, x):\n h = F.relu(self.fc1(x))\n h2 = F.relu(self.fc2(h))\n return torch.tanh(h2)"
},
{
"identifier": "ReplayBuffer",
"path": "src/buffereplay.py",
"snippet": "class ReplayBuffer(object):\n\n\tdef __init__(self, buffer_size):\n\t\tself.buffer_size = buffer_size\n\t\tself.num_experiences = 0\n\t\tself.buffer = deque()\n\n\tdef getBatch(self, batch_size):\n\t\tif self.num_experiences < batch_size:\n\t\t\treturn random.sample(self.buffer, self.num_experiences)\n\t\telse:\n\t\t\treturn random.sample(self.buffer, batch_size)\n\n\tdef add(self, obs, action, reward, new_obs, matrix, next_matrix, para_reward, terminate):\n\t\texperience = (obs, action, reward, new_obs, matrix, next_matrix, para_reward, terminate)\n\t\tif self.num_experiences < self.buffer_size:\n\t\t\tself.buffer.append(experience)\n\t\t\tself.num_experiences += 1\n\t\telse:\n\t\t\tself.buffer.popleft()\n\t\t\tself.buffer.append(experience)"
}
] | from src.env import Environment
from src.node import Node
from src.packet import Packet
from src.transtask import Trans_task
from src.DGN import DGN,DPG
from src.parameter import *
from src.buffereplay import ReplayBuffer
from queue import Queue
import math
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F | 9,346 | os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# import matplotlib.pyplot as plt
USE_CUDA = torch.cuda.is_available()
print(USE_CUDA)
env=Environment()
| os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# import matplotlib.pyplot as plt
USE_CUDA = torch.cuda.is_available()
print(USE_CUDA)
env=Environment() | buff=ReplayBuffer(BUFFERSIZE) | 6 | 2023-12-30 09:35:30+00:00 | 12k |
alshubati99/BeamEye | uiElements/uiHandler 3.py | [
{
"identifier": "TkinterVideo",
"path": "uiElements/tkVideoPlayer.py",
"snippet": "class TkinterVideo(tk.Label):\n\n\tdef __init__(self, master, scaled: bool = True, consistant_frame_rate: bool = True, keep_aspect: bool = False,\n\t\t\t\t *args, **kwargs):\n\t\tsuper(TkinterVideo, self).__init__(master, *args, **kwargs)\n\n\t\tself.path = \"\"\n\t\tself._load_thread = None\n\n\t\tself._paused = True\n\t\tself._stop = True\n\n\t\tself.consistant_frame_rate = consistant_frame_rate # tries to keep the frame rate consistant by skipping over a few frames\n\n\t\tself._container = None\n\n\t\tself._current_img = None\n\t\tself._current_frame_Tk = None\n\t\tself._frame_number = 0\n\t\tself._time_stamp = 0\n\n\t\tself._current_frame_size = (0, 0)\n\n\t\tself._seek = False\n\t\tself._seek_sec = 0\n\n\t\tself._video_info = {\n\t\t\t\"duration\": 0, # duration of the video\n\t\t\t\"framerate\": 0, # frame rate of the video\n\t\t\t\"framesize\": (0, 0) # tuple containing frame height and width of the video\n\n\t\t}\n\n\t\tself.set_scaled(scaled)\n\t\tself._keep_aspect_ratio = keep_aspect\n\t\tself._resampling_method: int = Image.NEAREST\n\n\t\tself.bind(\"<<Destroy>>\", self.stop)\n\t\tself.bind(\"<<FrameGenerated>>\", self._display_frame)\n\n\tdef keep_aspect(self, keep_aspect: bool):\n\t\t\"\"\" keeps the aspect ratio when resizing the image \"\"\"\n\t\tself._keep_aspect_ratio = keep_aspect\n\n\tdef set_resampling_method(self, method: int):\n\t\t\"\"\" sets the resampling method when resizing \"\"\"\n\t\tself._resampling_method = method\n\n\tdef set_size(self, size: Tuple[int, int], keep_aspect: bool = False):\n\t\t\"\"\" sets the size of the video \"\"\"\n\t\tself.set_scaled(False, self._keep_aspect_ratio)\n\t\tself._current_frame_size = size\n\t\tself._keep_aspect_ratio = keep_aspect\n\n\tdef _resize_event(self, event):\n\n\t\tself._current_frame_size = event.width, event.height\n\n\t\tif self._paused and self._current_img and self.scaled:\n\t\t\tif self._keep_aspect_ratio:\n\t\t\t\tproxy_img = ImageOps.contain(self._current_img.copy(), self._current_frame_size)\n\n\t\t\telse:\n\t\t\t\tproxy_img = self._current_img.copy().resize(self._current_frame_size)\n\n\t\t\tself._current_imgtk = ImageTk.PhotoImage(proxy_img)\n\t\t\tself.config(image=self._current_imgtk)\n\n\tdef set_scaled(self, scaled: bool, keep_aspect: bool = False):\n\t\tself.scaled = scaled\n\t\tself._keep_aspect_ratio = keep_aspect\n\n\t\tif scaled:\n\t\t\tself.bind(\"<Configure>\", self._resize_event)\n\n\t\telse:\n\t\t\tself.unbind(\"<Configure>\")\n\t\t\tself._current_frame_size = self.video_info()[\"framesize\"]\n\n\tdef _set_frame_size(self, event=None):\n\t\t\"\"\" sets frame size to avoid unexpected resizing \"\"\"\n\n\t\tself._video_info[\"framesize\"] = (\n\t\tself._container.streams.video[0].width, self._container.streams.video[0].height)\n\n\t\tself.current_imgtk = ImageTk.PhotoImage(Image.new(\"RGBA\", self._video_info[\"framesize\"], (255, 0, 0, 0)))\n\t\tself.config(width=150, height=100, image=self.current_imgtk)\n\n\tdef _load(self, path):\n\t\t\"\"\" load's file from a thread \"\"\"\n\n\t\tcurrent_thread = threading.current_thread()\n\n\t\ttry:\n\t\t\twith av.open(path) as self._container:\n\n\t\t\t\tself._container.streams.video[0].thread_type = \"AUTO\"\n\n\t\t\t\tself._container.fast_seek = True\n\t\t\t\tself._container.discard_corrupt = True\n\n\t\t\t\tstream = self._container.streams.video[0]\n\n\t\t\t\ttry:\n\t\t\t\t\tself._video_info[\"framerate\"] = int(stream.average_rate)\n\t\t\t\t\tprint(self._video_info[\"framerate\"] , int(stream.average_rate))\n\n\t\t\t\texcept TypeError:\n\t\t\t\t\traise TypeError(\"Not a video file\")\n\n\t\t\t\ttry:\n\n\t\t\t\t\tself._video_info[\"duration\"] = float(stream.duration * stream.time_base)\n\t\t\t\t\tself.event_generate(\"<<Duration>>\") # duration has been found\n\n\t\t\t\texcept (TypeError, tk.TclError): # the video duration cannot be found, this can happen for mkv files\n\t\t\t\t\tpass\n\n\t\t\t\tself._frame_number = 0\n\n\t\t\t\tself._set_frame_size()\n\n\t\t\t\tself.stream_base = stream.time_base\n\n\t\t\t\ttry:\n\t\t\t\t\tself.event_generate(\"<<Loaded>>\") # generated when the video file is opened\n\n\t\t\t\texcept tk.TclError:\n\t\t\t\t\tpass\n\n\t\t\t\tnow = time.time_ns() // 1_000_000 # time in milliseconds\n\t\t\t\tthen = now\n\n\t\t\t\t#time_in_frame = (1 / self._video_info[\"framerate\"]) * 1000 # second it should play each frame\n\t\t\t\ttime_in_frame = 1000/self._video_info[\"framerate\"]\n\n\t\t\t\twhile self._load_thread == current_thread and not self._stop:\n\t\t\t\t\tif self._seek: # seek to specific second\n\t\t\t\t\t\tself._container.seek(self._seek_sec * 1000000, whence='time', backward=True,\n\t\t\t\t\t\t\t\t\t\t\t any_frame=False) # the seek time is given in av.time_base, the multiplication is to correct the frame\n\t\t\t\t\t\tself._seek = False\n\t\t\t\t\t\tself._frame_number = self._video_info[\"framerate\"] * self._seek_sec\n\n\t\t\t\t\t\tself._seek_sec = 0\n\n\t\t\t\t\tif self._paused:\n\t\t\t\t\t\ttime.sleep(0.0001) # to allow other threads to function better when its paused\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tnow = time.time()*1000 # time in milliseconds\n\t\t\t\t\tdelta = now - then # time difference between current frame and previous frame\n\t\t\t\t\tthen = now\n\n\t\t\t\t\t# print(\"Frame: \", frame.time, frame.index, self._video_info[\"framerate\"])\n\t\t\t\t\ttry:\n\t\t\t\t\t\tframe = next(self._container.decode(video=0))\n\n\t\t\t\t\t\tself._time_stamp = float(frame.pts * stream.time_base)\n\n\t\t\t\t\t\twidth = self._current_frame_size[0]\n\t\t\t\t\t\theight = self._current_frame_size[1]\n\t\t\t\t\t\tif self._keep_aspect_ratio:\n\t\t\t\t\t\t\tim_ratio = frame.width / frame.height\n\t\t\t\t\t\t\tdest_ratio = width / height\n\t\t\t\t\t\t\tif im_ratio != dest_ratio:\n\t\t\t\t\t\t\t\tif im_ratio > dest_ratio:\n\t\t\t\t\t\t\t\t\tnew_height = round(frame.height / frame.width * width)\n\t\t\t\t\t\t\t\t\theight = new_height\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tnew_width = round(frame.width / frame.height * height)\n\t\t\t\t\t\t\t\t\twidth = new_width\n\n\t\t\t\t\t\tself._current_img = frame.to_image(width=width, height=height, interpolation=\"FAST_BILINEAR\")\n\n\t\t\t\t\t\tself._frame_number += 1\n\n\t\t\t\t\t\tself.event_generate(\"<<FrameGenerated>>\")\n\n\t\t\t\t\t\tif self._frame_number % self._video_info[\"framerate\"] == 0:\n\t\t\t\t\t\t\tself.event_generate(\"<<SecondChanged>>\")\n\n\t\t\t\t\t\tif self.consistant_frame_rate:\n\t\t\t\t\t\t\t#time.sleep(max((time_in_frame - delta) / 1000, 0))\n\t\t\t\t\t\t\ttime.sleep(max((time_in_frame-delta)/520, 0))\n\t\t\t\t\t\t\t# was playing at x1.? the speed\n\t\t\t # compared the time it took to display 1 second of the video\n\t\t\t\t\t\t\t# with 1 real life second, found it was off by 0.519\n\t\t\t\t\t\t\t# ==> the video was playing twice as fast\n\t\t\t\t\texcept (StopIteration, av.error.EOFError, tk.TclError):\n\t\t\t\t\t\tbreak\n\n\t\t\t\ttry:\n\t\t\t\t\tself._container.close()\n\t\t\t\t# added\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tpass\n\n\t\t\t# print(\"Container: \", self._container.c)\n\t\t\tif self._container:\n\t\t\t\tself._container.close()\n\t\t\t\tself._container = None\n\n\t\tfinally:\n\t\t\tself._cleanup()\n\t\t\tgc.collect()\n\n\tdef _cleanup(self):\n\t\tself._frame_number = 0\n\t\tself._paused = True\n\t\tself._stop = True\n\t\tif self._load_thread:\n\t\t\tself._load_thread = None\n\t\tif self._container:\n\t\t\tself._container.close()\n\t\t\tself._container = None\n\t\ttry:\n\t\t\tself.event_generate(\"<<Ended>>\")\n\t\texcept tk.TclError:\n\t\t\tpass\n\n\tdef load(self, path: str):\n\t\t\"\"\" loads the file from the given path \"\"\"\n\t\tself.stop()\n\t\tself.path = path\n\n\tdef stop(self):\n\t\t\"\"\" stops reading the file \"\"\"\n\t\tself._paused = True\n\t\tself._stop = True\n\t\tself._cleanup()\n\n\tdef pause(self):\n\t\t\"\"\" pauses the video file \"\"\"\n\t\tself._paused = True\n\n\tdef play(self):\n\t\t\"\"\" plays the video file \"\"\"\n\t\tself._paused = False\n\t\tself._stop = False\n\n\t\tif not self._load_thread:\n\t\t\t# print(\"loading new thread...\")\n\t\t\tself._load_thread = threading.Thread(target=self._load, args=(self.path,), daemon=True)\n\t\t\tself._load_thread.start()\n\n\tdef is_paused(self):\n\t\t\"\"\" returns if the video is paused \"\"\"\n\t\treturn self._paused\n\n\tdef video_info(self) -> Dict:\n\t\t\"\"\" returns dict containing duration, frame_rate, file\"\"\"\n\t\treturn self._video_info\n\n\tdef metadata(self) -> Dict:\n\t\t\"\"\" returns metadata if available \"\"\"\n\t\tif self._container:\n\t\t\treturn self._container.metadata\n\n\t\treturn {}\n\n\tdef current_frame_number(self) -> int:\n\t\t\"\"\" return current frame number \"\"\"\n\t\treturn self._frame_number\n\n\tdef current_duration(self) -> float:\n\t\t\"\"\" returns current playing duration in sec \"\"\"\n\t\treturn self._time_stamp\n\n\tdef current_img(self) -> Image:\n\t\t\"\"\" returns current frame image \"\"\"\n\t\treturn self._current_img\n\n\tdef _display_frame(self, event):\n\t\t\"\"\" displays the frame on the label \"\"\"\n\n\t\tif self.current_imgtk.width() == self._current_img.width and self.current_imgtk.height() == self._current_img.height:\n\t\t\tself.current_imgtk.paste(self._current_img)\n\t\telse:\n\t\t\tself.current_imgtk = ImageTk.PhotoImage(self._current_img)\n\t\tself.config(image=self.current_imgtk)\n\n\tdef seek(self, sec: int):\n\t\t\"\"\" seeks to specific time\"\"\"\n\n\t\tself._seek = True\n\t\tself._seek_sec = sec"
},
{
"identifier": "open_settings_window",
"path": "uiElements/SettingsWindow.py",
"snippet": "def open_settings_window(root=window):\n\t# settings will only be used in and by this function\n\twith open(uiElements + \"/userSettings.txt\", \"r\") as f:\n\t\tsettings = f.read()\n\t\tsettings = [line.split(\" \")[-1] for line in settings.split(\"\\n\")]\n\t\tinclude_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color, output_path = settings\n\t\toutput_path = output_path.replace(\"_SPACE_\", \" \")\n\t\tinclude_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color = int(include_labels), int(\n\t\t\tinclude_crowd), int(include_accuracy), int(pedestrian_color), int(crowd_color)\n\n\tcolor_dict = {1: \"#0094FF\", 2: \"#FF00F6\", 3: \"red\", 4: \"#FF6A00\", 5: \"yellow\", 6: \"#26FF5C\"}\n\tcolor_dict_key = 1\n\n\tdef pick_color():\n\t\tnonlocal color_dict_key\n\t\tcolor_dict_key += 1\n\t\treturn color_dict[color_dict_key - 1]\n\n\tglobal settings_opened\n\n\tsettings_opened = True\n\tsettings_window = tk.Toplevel(root, bg=\"#071F46\")\n\tsettings_window.geometry('450x600')\n\tsettings_window.title('Settings')\n\tsettings_window.iconbitmap(uiAssets + \"logo.ico\")\n\tsettings_window.configure()\n\tsettings_bg_color = \"#071F46\"\n\n\ttopy = -20\n\n\tdef increment_topy():\n\t\tnonlocal topy\n\t\ttopy += 60\n\t\treturn topy\n\n\tleft_x = 0\n\n\tdef increment_leftx():\n\t\tnonlocal left_x\n\t\tleft_x += 30\n\t\treturn left_x\n\n\ttop_font = (Lato, 18)\n\n\t# include labels\n\tdef include_labels_update():\n\t\tundo_saved()\n\t\tnonlocal include_labels\n\t\tinclude_labels += 1\n\t\tinclude_labels %= 2\n\t\tprint(include_labels, include_labels_box.get())\n\n\ttk.Label(settings_window, text='Include Labels', font=top_font, foreground=\"white\", background=\"#071F46\").place(\n\t\tx=20, y=increment_topy())\n\n\n\tinclude_labels_box = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t fg_color=settings_bg_color, border_color=\"white\", hover_color=\"white\",\n\t\t\t\t\t\t\t\t\t\t command=include_labels_update)\n\tif include_labels:\n\t\tinclude_labels_box.select()\n\telse:\n\t\tinclude_labels_box.deselect()\n\n\tinclude_labels_box.place(x=400, y=topy + 5)\n\n\t# include accuracy\n\tdef include_accuracy_update():\n\t\tundo_saved()\n\t\tnonlocal include_accuracy\n\t\tinclude_accuracy += 1\n\t\tinclude_accuracy %= 2\n\n\ttk.Label(settings_window, text='Include Accuracy', font=top_font, foreground=\"white\", background=\"#071F46\").place(\n\t\tx=20, y=increment_topy())\n\tinclude_accuracy_box = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t fg_color=settings_bg_color, border_color=\"white\", hover_color=\"white\",\n\t\t\t\t\t\t\t\t\t\t command=include_accuracy_update)\n\tif include_accuracy:\n\t\tinclude_accuracy_box.select()\n\telse:\n\t\tinclude_accuracy_box.deselect()\n\tinclude_accuracy_box.place(x=400, y=topy + 5)\n\n\t# crowd detect\n\tdef include_crowd_update():\n\t\tundo_saved()\n\t\tnonlocal include_crowd\n\t\tinclude_crowd += 1\n\t\tinclude_crowd %= 2\n\n\t# print(include_crowd)\n\n\ttk.Label(settings_window, text='Include Crowd Detection', font=top_font, foreground=\"white\",\n\t\t\t background=\"#071F46\").place(x=20, y=increment_topy())\n\tinclude_crowd_box = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\tfg_color=settings_bg_color, border_color=\"white\", hover_color=\"white\",\n\t\t\t\t\t\t\t\t\t\tcommand=include_crowd_update)\n\tif include_crowd:\n\t\tinclude_crowd_box.select()\n\telse:\n\t\tinclude_crowd_box.deselect()\n\n\tinclude_crowd_box.place(x=400, y=topy + 5)\n\ttopy += 10\n\t# pedestrian box color\n\tsame_color_error = tk.Label(settings_window, text='Pedestrian and Crowd colors can\\'t be similar', font=(Lato, 10),\n\t\t\t\t\t\t\t\tforeground=\"red\",\n\t\t\t\t\t\t\t\tbackground=\"#071F46\")\n\n\t# choosing colors\n\t# pedestrian box colors\n\tdef reset_pedestrian_checkboxes():\n\t\tundo_saved()\n\t\tnonlocal pd_color_checkBox_list, pedestrian_color, crowd_color, same_color_error\n\t\tsame_color_error.place_forget()\n\t\tpd_color_checkBox_list[pedestrian_color - 1].deselect()\n\t\tif all(not box.get() for box in pd_color_checkBox_list): # for no empty checkbox\n\t\t\tpd_color_checkBox_list[pedestrian_color - 1].select()\n\n\t\telse:\n\t\t\tfor box in pd_color_checkBox_list:\n\t\t\t\tprint(box.get(), end=\", \")\n\t\t\t\tif box.get():\n\t\t\t\t\ttmp = pd_color_checkBox_list.index(box) + 1\n\t\t\t\t\tif tmp == crowd_color:\n\t\t\t\t\t\tsame_color_error.place(x=20, y=560)\n\t\t\t\t\t\tpd_color_checkBox_list[tmp - 1].deselect()\n\t\t\t\t\t\tpd_color_checkBox_list[pedestrian_color - 1].select()\n\t\t\t\t\telse:\n\t\t\t\t\t\tpedestrian_color = tmp\n\n\ttk.Label(settings_window, text='Pedestrian Box Color', font=top_font, foreground=\"white\",\n\t\t\t background=\"#071F46\").place(x=20, y=increment_topy())\n\tincrement_topy()\n\n\tpd_color_checkBox_list = []\n\tpd_color_checkBox_one = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\tfg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\thover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\tcommand=reset_pedestrian_checkboxes, )\n\tpd_color_checkBox_one.place(x=increment_leftx(), y=topy)\n\tpd_color_checkBox_list.append(pd_color_checkBox_one)\n\n\tpd_color_checkBox_two = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\tfg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\thover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\tcommand=reset_pedestrian_checkboxes, )\n\tpd_color_checkBox_two.place(x=increment_leftx(), y=topy)\n\tpd_color_checkBox_list.append(pd_color_checkBox_two)\n\n\tpd_color_checkBox_three = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t fg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t hover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t command=reset_pedestrian_checkboxes, )\n\tpd_color_checkBox_three.place(x=increment_leftx(), y=topy)\n\tpd_color_checkBox_list.append(pd_color_checkBox_three)\n\n\tpd_color_checkBox_four = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t fg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t hover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t command=reset_pedestrian_checkboxes, )\n\tpd_color_checkBox_four.place(x=increment_leftx(), y=topy)\n\tpd_color_checkBox_list.append(pd_color_checkBox_four)\n\n\tpd_color_checkBox_five = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t fg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t hover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t command=reset_pedestrian_checkboxes, )\n\tpd_color_checkBox_five.place(x=increment_leftx(), y=topy)\n\tpd_color_checkBox_list.append(pd_color_checkBox_five)\n\n\tpd_color_checkBox_six = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\tfg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\thover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\tcommand=reset_pedestrian_checkboxes, )\n\tpd_color_checkBox_six.place(x=increment_leftx(), y=topy)\n\tpd_color_checkBox_list.append(pd_color_checkBox_six)\n\n\tpd_color_checkBox_list[pedestrian_color - 1].select()\n\tdefault_p = False\n\n\ttopy -= 20\n\tleft_x = 0\n\tcolor_dict_key = 1\n\n\t# crowd box color\n\tdef reset_crowd_checkboxes():\n\t\tundo_saved()\n\t\tnonlocal crowd_color_checkBox_list, crowd_color, default_c, pedestrian_color, same_color_error\n\t\tsame_color_error.place_forget()\n\t\tcrowd_color_checkBox_list[crowd_color - 1].deselect()\n\t\tif all(not box.get() for box in crowd_color_checkBox_list): # for no empty checkbox\n\t\t\tcrowd_color_checkBox_list[crowd_color - 1].select()\n\n\t\telse:\n\t\t\tfor box in crowd_color_checkBox_list:\n\t\t\t\tprint(box.get(), end=\", \")\n\t\t\t\tif box.get():\n\t\t\t\t\ttmp = crowd_color_checkBox_list.index(box) + 1\n\t\t\t\t\tif tmp == pedestrian_color:\n\t\t\t\t\t\tsame_color_error.place(x=20, y=560)\n\t\t\t\t\t\tcrowd_color_checkBox_list[tmp - 1].deselect()\n\t\t\t\t\t\tcrowd_color_checkBox_list[crowd_color - 1].select()\n\t\t\t\t\telse:\n\t\t\t\t\t\tcrowd_color = tmp\n\n\ttk.Label(settings_window, text='Crowd Box Color', font=top_font, foreground=\"white\", background=\"#071F46\").place(\n\t\tx=20, y=increment_topy())\n\tincrement_topy()\n\tcrowd_color_checkBox_list = []\n\tcrowd_color_checkBox_one = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t fg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t hover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t command=reset_crowd_checkboxes)\n\tcrowd_color_checkBox_one.place(x=increment_leftx(), y=topy)\n\tcrowd_color_checkBox_list.append(crowd_color_checkBox_one)\n\tcrowd_color_checkBox_two = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t fg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t hover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t command=reset_crowd_checkboxes)\n\tcrowd_color_checkBox_two.place(x=increment_leftx(), y=topy)\n\tcrowd_color_checkBox_list.append(crowd_color_checkBox_two)\n\tcrowd_color_checkBox_three = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t\t fg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t\t hover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t\t command=reset_crowd_checkboxes)\n\tcrowd_color_checkBox_three.place(x=increment_leftx(), y=topy)\n\tcrowd_color_checkBox_list.append(crowd_color_checkBox_three)\n\tcrowd_color_checkBox_four = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t\tfg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t\thover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t\tcommand=reset_crowd_checkboxes)\n\tcrowd_color_checkBox_four.place(x=increment_leftx(), y=topy)\n\tcrowd_color_checkBox_list.append(crowd_color_checkBox_four)\n\tcrowd_color_checkBox_five = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t\tfg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t\thover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t\tcommand=reset_crowd_checkboxes)\n\tcrowd_color_checkBox_five.place(x=increment_leftx(), y=topy)\n\tcrowd_color_checkBox_list.append(crowd_color_checkBox_five)\n\tcrowd_color_checkBox_six = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t fg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t hover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t command=reset_crowd_checkboxes)\n\tcrowd_color_checkBox_six.place(x=increment_leftx(), y=topy)\n\tcrowd_color_checkBox_list.append(crowd_color_checkBox_six)\n\n\tcrowd_color_checkBox_list[crowd_color - 1].select()\n\tdefault_c = False\n\n\t# output folder\n\tdef change_output_folder():\n\t\tundo_saved()\n\t\tnonlocal output_path, current_output_text\n\t\tsettings_window.withdraw()\n\t\toutput_path = filedialog.askdirectory()\n\t\tprint(\"got:\", output_path)\n\t\tdisp_output = output_path\n\t\tcurrent_output_text.configure(text=f'Current: {disp_output}')\n\t\tsettings_window.deiconify()\n\n\ttopy -= 20\n\tleft_x = 0\n\ttk.Label(settings_window, text='Output folder', font=top_font, foreground=\"white\", background=\"#071F46\").place(x=20,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y=increment_topy() + 20)\n\tdisplay_output = \"\\\\\" + output_path.replace(\"//\", \"\\\\\").replace('\"', \"\")\n\tcurrent_output_text = tk.Label(settings_window, text=f'Current: {display_output}', font=(Lato, 10),\n\t\t\t\t\t\t\t\t foreground=\"white\", background=\"#071F46\")\n\tcurrent_output_text.place(x=20, y=increment_topy())\n\toutput_folder_change_button = CTk.CTkButton(master=settings_window,\n\t\t\t\t\t\t\t\t\t\t\t\twidth=120,\n\t\t\t\t\t\t\t\t\t\t\t\theight=40,\n\t\t\t\t\t\t\t\t\t\t\t\tborder_width=2,\n\t\t\t\t\t\t\t\t\t\t\t\tborder_color=\"white\",\n\t\t\t\t\t\t\t\t\t\t\t\tbg_color=settings_bg_color,\n\t\t\t\t\t\t\t\t\t\t\t\tfg_color=settings_bg_color,\n\t\t\t\t\t\t\t\t\t\t\t\tcorner_radius=8,\n\t\t\t\t\t\t\t\t\t\t\t\ttext=\"Change\",\n\t\t\t\t\t\t\t\t\t\t\t\tfont=(\"Lato\", 20),\n\t\t\t\t\t\t\t\t\t\t\t\tcommand=change_output_folder\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\toutput_folder_change_button.place(x=300, y=topy - 50)\n\n\t# output_folder_change_button.place(x = )\n\n\t# save settings\n\tdef undo_saved():\n\t\tnonlocal settings_save_button\n\t\tsettings_save_button.configure(text_color=settings_bg_color,\n\t\t\t\t\t\t\t\t\t text='Save',\n\t\t\t\t\t\t\t\t\t bg_color=settings_bg_color,\n\t\t\t\t\t\t\t\t\t fg_color=\"white\",\n\t\t\t\t\t\t\t\t\t hover_color=\"#24EA3F\", )\n\n\tdef save_settings():\n\t\tnonlocal include_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color, output_path, settings_save_button\n\t\toutput_path = output_path.replace(\" \", \"_SPACE_\")\n\t\tsettings = f\"labels {include_labels}\\ncrowd {include_crowd}\\naccuracy {include_accuracy}\\npedestrian_color {pedestrian_color}\\ncrowd_color {crowd_color}\\nout_dir {output_path}\"\n\t\tprint(settings)\n\t\twith open(uiElements + \"/userSettings.txt\", \"w\") as f:\n\t\t\tf.write(settings)\n\t\tsettings_save_button.configure(text='Saved!', fg_color=\"#24EA3F\")\n\n\tsettings_save_button = CTk.CTkButton(settings_window,\n\t\t\t\t\t\t\t\t\t\t height=40,\n\t\t\t\t\t\t\t\t\t\t width=120,\n\t\t\t\t\t\t\t\t\t\t border_width=2,\n\t\t\t\t\t\t\t\t\t\t corner_radius=8,\n\t\t\t\t\t\t\t\t\t\t border_color=\"white\",\n\t\t\t\t\t\t\t\t\t\t font=(\"Lato\", 20),\n\t\t\t\t\t\t\t\t\t\t command=save_settings,\n\t\t\t\t\t\t\t\t\t\t text_color=settings_bg_color,\n\t\t\t\t\t\t\t\t\t\t text='Save',\n\t\t\t\t\t\t\t\t\t\t bg_color=settings_bg_color,\n\t\t\t\t\t\t\t\t\t\t fg_color=\"white\",\n\t\t\t\t\t\t\t\t\t\t hover_color=\"#24EA3F\",\n\n\t\t\t\t\t\t\t\t\t\t )\n\tsettings_save_button.place(x=300, y=520)\n\ttk.Label(settings_window, text='Close without saving \\nto cancel the changes', font=(Lato, 8), foreground=\"white\",\n\t\t\t background=\"#071F46\").place(x=305, y=560)\n\n\tsettings_window.wait_window()"
},
{
"identifier": "settings_inherit_root",
"path": "uiElements/SettingsWindow.py",
"snippet": "def settings_inherit_root(root):\n\tglobal window\n\twindow = root"
}
] | import os.path
import shutil
import tkinter as tk
import customtkinter as ctk
import threading
import cv2
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
from tkinter import PhotoImage, filedialog, messagebox
from uiElements.tkVideoPlayer import TkinterVideo
from uiElements.SettingsWindow import open_settings_window, settings_inherit_root
from time import sleep
from pathlib import Path
from shutil import move
from PIL import Image, ImageTk | 8,115 | # getting user screen size, change values to test different screen sizes
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
# define window size percentage, max is 1 == screen size
resize_ratio = .75
# setting window size 75% screen size
width, height = int(screen_width * resize_ratio), int((screen_width * resize_ratio) * 9 / 16)
# keeping 16:9 aspect ratio to match videos' placeholders
root.geometry(f"{int(screen_width * resize_ratio)}x{int((screen_width * resize_ratio) * 9 / 16)}")
# disable resizing
root.resizable(False, False)
root.configure(bg="black")
# root.geometry(f"{width}x{height}")
pc = "#30A8E6"
ended = False
crowd_is_included = None
progressbar, progressbar_progress, progressbar_placeholder_label = ctk.CTkProgressBar, 0, tk.Label
current_loading_canvas, current_video_canvas = tk.Canvas, tk.Canvas
# background_image_hello = PhotoImage(file=uiAssets + 'home2.png')
pedestrian_count_second, crowd_count_second = [], []
new_video = False
def set_aspect_ratio():
s_width = root.winfo_screenwidth()
s_height = root.winfo_screenheight()
# Initial aspect ratio adjustment
new_width = root.winfo_width()
new_height = int(new_width * 9 / 16)
# If height exceeds screen, adjust width based on screen height
if new_height > s_height:
new_height = s_height
new_width = int(new_height * 16 / 9)
# If width now exceeds screen, reduce both to fit within screen
if new_width > s_width:
new_width = s_width
new_height = int(new_width * 9 / 16)
# Apply the new dimensions
root.geometry(f"{new_width}x{new_height}")
def new_coordinates(old_x, old_y, old_width=None, old_height=None):
window_width, window_height = root.winfo_width(), root.winfo_height()
new_x = old_x * window_width / 1300
new_y = old_y * window_height / 750
if old_width is not None:
new_width = old_width * window_width / 1300
new_height = old_height * window_width / 750
return new_x, new_y, new_width, new_height
return new_x, new_y
def open_hello_window():
global current_canvas, main_root, w, h
# upload canvas
img_ = Image.open(uiAssets + 'home2.png')
resized_image_ = img_.resize((root.winfo_width(), root.winfo_height()))
tk_image_ = ImageTk.PhotoImage(resized_image_)
background_image_hello = tk_image_
hello_canvas = tk.Canvas(root, width=root.winfo_width() - 4, height=root.winfo_width() - 10)
current_canvas = hello_canvas
hello_canvas.place(x=0, y=0)
hello_canvas.create_image(root.winfo_width() / 2, root.winfo_height() / 2, image=background_image_hello, anchor="c")
# settings in upload window
progressbar_placeholder = ctk.CTkProgressBar(master=hello_canvas, height=20,
width=400, bg_color="#041632", fg_color="#041632",
progress_color="#30A8E6", border_color="#30A8E6",
border_width=2, indeterminate_speed=0.01, mode='determinate'
)
progressbar_placeholder.place(x=root.winfo_width() / 2 - 200, y=root.winfo_height() / 2 + 60)
progressbar_placeholder.set(0)
# settings canvas
def wait_for_tenserflow_import():
sleep(1)
for _ in range(7): # takes around 7 seconds to import tensorflow
for __ in range(7): # each .step() increases the bar by 2%, 7x7x2 = 98% of the bar after 7 seconds
progressbar_placeholder.step()
sleep(1 / 7)
progressbar_placeholder.set(1) # set the bar to 100%
sleep(1)
hello_canvas.destroy()
return
threading.Thread(target=wait_for_tenserflow_import).start()
def seconds_to_hhmmss(seconds):
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return "{:02d}:{:02d}:{:02d}".format(int(hours), int(minutes), int(seconds))
def update_current_timestamp(stamp: ctk.CTkLabel, timestamp: str): # ,
stamp.configure(text=timestamp)
pass
video_end = False
current_canvas = None
main_root, w, h = None, 0, 0
def open_video_window():
global root, current_video_canvas
# threading.Thread(target=open_hello_window).start()
video_canvas = tk.Canvas(root, bg="#051735")
|
input_video_path = ""
thread_crowd, thread_people, threads_started = threading.Thread, threading.Thread, False
current_pd_number_color, current_crowd_number_color = None, None
parent = Path(__file__).resolve().parent
# if called from uiHandler will return uiElements
# if called from BeamEye.py will return GP
# we need GP//uiAssets path for ui assets
# following block is to get path to folder of the app (GP), whatever its (new) name is
# and add \\uiuAssets\\ to it
# if the parent folder isn't GP ==> a sub-folder of GP
while not os.path.isdir(str(parent) + '\\uiAssets\\'):
# go back to its parent
parent = parent.parent
GP_path = parent
uiAssets = str(GP_path) + '\\uiAssets\\'
root = tk.Tk()
root.title("BeamEye")
root.iconbitmap(uiAssets + "logo.ico")
# UI has too many elements to control during resizing, especially during video
# playback, we get screen size and base the app window on a smaller area
# before resizing is disabled.
# getting user screen size, change values to test different screen sizes
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
# define window size percentage, max is 1 == screen size
resize_ratio = .75
# setting window size 75% screen size
width, height = int(screen_width * resize_ratio), int((screen_width * resize_ratio) * 9 / 16)
# keeping 16:9 aspect ratio to match videos' placeholders
root.geometry(f"{int(screen_width * resize_ratio)}x{int((screen_width * resize_ratio) * 9 / 16)}")
# disable resizing
root.resizable(False, False)
root.configure(bg="black")
# root.geometry(f"{width}x{height}")
pc = "#30A8E6"
ended = False
crowd_is_included = None
progressbar, progressbar_progress, progressbar_placeholder_label = ctk.CTkProgressBar, 0, tk.Label
current_loading_canvas, current_video_canvas = tk.Canvas, tk.Canvas
# background_image_hello = PhotoImage(file=uiAssets + 'home2.png')
pedestrian_count_second, crowd_count_second = [], []
new_video = False
def set_aspect_ratio():
s_width = root.winfo_screenwidth()
s_height = root.winfo_screenheight()
# Initial aspect ratio adjustment
new_width = root.winfo_width()
new_height = int(new_width * 9 / 16)
# If height exceeds screen, adjust width based on screen height
if new_height > s_height:
new_height = s_height
new_width = int(new_height * 16 / 9)
# If width now exceeds screen, reduce both to fit within screen
if new_width > s_width:
new_width = s_width
new_height = int(new_width * 9 / 16)
# Apply the new dimensions
root.geometry(f"{new_width}x{new_height}")
def new_coordinates(old_x, old_y, old_width=None, old_height=None):
window_width, window_height = root.winfo_width(), root.winfo_height()
new_x = old_x * window_width / 1300
new_y = old_y * window_height / 750
if old_width is not None:
new_width = old_width * window_width / 1300
new_height = old_height * window_width / 750
return new_x, new_y, new_width, new_height
return new_x, new_y
def open_hello_window():
global current_canvas, main_root, w, h
# upload canvas
img_ = Image.open(uiAssets + 'home2.png')
resized_image_ = img_.resize((root.winfo_width(), root.winfo_height()))
tk_image_ = ImageTk.PhotoImage(resized_image_)
background_image_hello = tk_image_
hello_canvas = tk.Canvas(root, width=root.winfo_width() - 4, height=root.winfo_width() - 10)
current_canvas = hello_canvas
hello_canvas.place(x=0, y=0)
hello_canvas.create_image(root.winfo_width() / 2, root.winfo_height() / 2, image=background_image_hello, anchor="c")
# settings in upload window
progressbar_placeholder = ctk.CTkProgressBar(master=hello_canvas, height=20,
width=400, bg_color="#041632", fg_color="#041632",
progress_color="#30A8E6", border_color="#30A8E6",
border_width=2, indeterminate_speed=0.01, mode='determinate'
)
progressbar_placeholder.place(x=root.winfo_width() / 2 - 200, y=root.winfo_height() / 2 + 60)
progressbar_placeholder.set(0)
# settings canvas
def wait_for_tenserflow_import():
sleep(1)
for _ in range(7): # takes around 7 seconds to import tensorflow
for __ in range(7): # each .step() increases the bar by 2%, 7x7x2 = 98% of the bar after 7 seconds
progressbar_placeholder.step()
sleep(1 / 7)
progressbar_placeholder.set(1) # set the bar to 100%
sleep(1)
hello_canvas.destroy()
return
threading.Thread(target=wait_for_tenserflow_import).start()
def seconds_to_hhmmss(seconds):
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return "{:02d}:{:02d}:{:02d}".format(int(hours), int(minutes), int(seconds))
def update_current_timestamp(stamp: ctk.CTkLabel, timestamp: str): # ,
stamp.configure(text=timestamp)
pass
video_end = False
current_canvas = None
main_root, w, h = None, 0, 0
def open_video_window():
global root, current_video_canvas
# threading.Thread(target=open_hello_window).start()
video_canvas = tk.Canvas(root, bg="#051735") | settings_inherit_root(root) | 2 | 2023-12-26 18:39:25+00:00 | 12k |
camenduru/MotionCtrl-hf | app.py | [
{
"identifier": "CAMERA_MOTION_MODE",
"path": "gradio_utils/camera_utils.py",
"snippet": "CAMERA_MOTION_MODE = [\"Basic Camera Poses\", \"Provided Complex Camera Poses\", \"Custom Camera Poses\"]"
},
{
"identifier": "process_camera",
"path": "gradio_utils/camera_utils.py",
"snippet": "def process_camera(camera_dict):\n # \"First A then B\", \"Both A and B\", \"Custom\"\n if camera_dict['complex'] is not None:\n with open(COMPLEX_CAMERA[camera_dict['complex']]) as f:\n RT = json.load(f) # [16, 12]\n RT = np.array(RT).reshape(-1, 3, 4)\n print(RT.shape)\n return RT\n\n\n motion_list = camera_dict['motion']\n mode = camera_dict['mode']\n speed = camera_dict['speed']\n print(len(motion_list))\n if len(motion_list) == 0:\n angle = np.array([0,0,0])\n T = np.array([0,0,0])\n RT = get_camera_motion(angle, T, speed, 16)\n\n\n elif len(motion_list) == 1:\n angle = np.array(CAMERA[motion_list[0]][\"angle\"])\n T = np.array(CAMERA[motion_list[0]][\"T\"])\n print(angle, T)\n RT = get_camera_motion(angle, T, speed, 16)\n \n \n \n elif len(motion_list) == 2:\n if mode == \"Customized Mode 1: First A then B\":\n angle = np.array(CAMERA[motion_list[0]][\"angle\"]) \n T = np.array(CAMERA[motion_list[0]][\"T\"]) \n RT_0 = get_camera_motion(angle, T, speed, 8)\n\n angle = np.array(CAMERA[motion_list[1]][\"angle\"]) \n T = np.array(CAMERA[motion_list[1]][\"T\"]) \n RT_1 = get_camera_motion(angle, T, speed, 8)\n\n RT = combine_camera_motion(RT_0, RT_1)\n\n elif mode == \"Customized Mode 2: Both A and B\":\n angle = np.array(CAMERA[motion_list[0]][\"angle\"]) + np.array(CAMERA[motion_list[1]][\"angle\"])\n T = np.array(CAMERA[motion_list[0]][\"T\"]) + np.array(CAMERA[motion_list[1]][\"T\"])\n RT = get_camera_motion(angle, T, speed, 16)\n\n\n # return RT.reshape(-1, 12)\n return RT"
},
{
"identifier": "OBJECT_MOTION_MODE",
"path": "gradio_utils/traj_utils.py",
"snippet": "OBJECT_MOTION_MODE = [\"Provided Trajectory\", \"Custom Trajectory\"]"
},
{
"identifier": "get_provided_traj",
"path": "gradio_utils/traj_utils.py",
"snippet": "def get_provided_traj(traj_name):\n traj = read_points(PROVIDED_TRAJS[traj_name])\n # xrange from 256 to 1024\n traj = [[int(1024*x/256), int(1024*y/256)] for x,y in traj]\n return traj"
},
{
"identifier": "process_points",
"path": "gradio_utils/traj_utils.py",
"snippet": "def process_points(points):\n frames = 16\n defualt_points = [[512,512]]*16\n\n if len(points) < 2:\n return defualt_points\n elif len(points) >= frames:\n skip = len(points)//frames\n return points[::skip][:15] + points[-1:]\n else:\n insert_num = frames - len(points)\n insert_num_dict = {}\n interval = len(points) - 1\n n = insert_num // interval\n m = insert_num % interval\n for i in range(interval):\n insert_num_dict[i] = n\n for i in range(m):\n insert_num_dict[i] += 1\n\n res = []\n for i in range(interval):\n insert_points = []\n x0,y0 = points[i]\n x1,y1 = points[i+1]\n\n delta_x = x1 - x0\n delta_y = y1 - y0\n for j in range(insert_num_dict[i]):\n x = x0 + (j+1)/(insert_num_dict[i]+1)*delta_x\n y = y0 + (j+1)/(insert_num_dict[i]+1)*delta_y\n insert_points.append([int(x), int(y)])\n\n res += points[i:i+1] + insert_points\n res += points[-1:]\n return res"
},
{
"identifier": "process_traj",
"path": "gradio_utils/traj_utils.py",
"snippet": "def process_traj(points, device='cpu'):\n xy_range = 1024\n points = process_points(points)\n points = [[int(256*x/xy_range), int(256*y/xy_range)] for x,y in points]\n \n optical_flow = get_flow(points)\n # optical_flow = torch.tensor(optical_flow).to(device)\n\n return optical_flow"
},
{
"identifier": "vis_camera",
"path": "gradio_utils/utils.py",
"snippet": "def vis_camera(RT_list, rescale_T=1):\n fig = go.Figure()\n showticklabels = True\n visible = True\n scene_bounds = 2\n base_radius = 2.5\n zoom_scale = 1.5\n fov_deg = 50.0\n \n edges = [(0, 1), (0, 2), (0, 3), (1, 2), (2, 3), (3, 1), (3, 4)] \n \n colors = px.colors.qualitative.Plotly\n \n cone_list = []\n n = len(RT_list)\n for i, RT in enumerate(RT_list):\n R = RT[:,:3]\n T = RT[:,-1]/rescale_T\n cone = calc_cam_cone_pts_3d(R, T, fov_deg)\n cone_list.append((cone, (i*1/n, \"green\"), f\"view_{i}\"))\n\n \n for (cone, clr, legend) in cone_list:\n for (i, edge) in enumerate(edges):\n (x1, x2) = (cone[edge[0], 0], cone[edge[1], 0])\n (y1, y2) = (cone[edge[0], 1], cone[edge[1], 1])\n (z1, z2) = (cone[edge[0], 2], cone[edge[1], 2])\n fig.add_trace(go.Scatter3d(\n x=[x1, x2], y=[y1, y2], z=[z1, z2], mode='lines',\n line=dict(color=clr, width=3),\n name=legend, showlegend=(i == 0))) \n fig.update_layout(\n height=500,\n autosize=True,\n # hovermode=False,\n margin=go.layout.Margin(l=0, r=0, b=0, t=0),\n \n showlegend=True,\n legend=dict(\n yanchor='bottom',\n y=0.01,\n xanchor='right',\n x=0.99,\n ),\n scene=dict(\n aspectmode='manual',\n aspectratio=dict(x=1, y=1, z=1.0),\n camera=dict(\n center=dict(x=0.0, y=0.0, z=0.0),\n up=dict(x=0.0, y=-1.0, z=0.0),\n eye=dict(x=scene_bounds/2, y=-scene_bounds/2, z=-scene_bounds/2),\n ),\n\n xaxis=dict(\n range=[-scene_bounds, scene_bounds],\n showticklabels=showticklabels,\n visible=visible,\n ),\n \n \n yaxis=dict(\n range=[-scene_bounds, scene_bounds],\n showticklabels=showticklabels,\n visible=visible,\n ),\n \n \n zaxis=dict(\n range=[-scene_bounds, scene_bounds],\n showticklabels=showticklabels,\n visible=visible,\n )\n ))\n return fig"
},
{
"identifier": "DDIMSampler",
"path": "lvdm/models/samplers/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.counter = 0\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n schedule_verbose=False,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n \n # check condition bs\n if conditioning is not None:\n if isinstance(conditioning, dict):\n try:\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n except:\n cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]\n\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=schedule_verbose)\n \n # make shape\n if len(shape) == 3:\n C, H, W = shape\n size = (batch_size, C, H, W)\n elif len(shape) == 4:\n C, T, H, W = shape\n size = (batch_size, C, T, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n \n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n verbose=verbose,\n **kwargs)\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, verbose=True,\n **kwargs):\n device = self.model.betas.device \n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n \n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n \n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n if verbose:\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n else:\n iterator = time_range\n\n clean_cond = kwargs.pop(\"clean_cond\", False)\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n # use mask to blend noised original latent (img_orig) & new sampled latent (img)\n if mask is not None:\n assert x0 is not None\n if clean_cond:\n img_orig = x0\n else:\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? <ddim inversion>\n img = img_orig * mask + (1. - mask) * img # keep original & modify use img\n \n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n \n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n uc_type=None, conditional_guidance_scale_temporal=None, **kwargs):\n b, *_, device = *x.shape, x.device\n if x.dim() == 5:\n is_video = True\n else:\n is_video = False\n # f=open('/apdcephfs_cq2/share_1290939/yingqinghe/code/LVDM-private/cfg_range_s5noclamp.txt','a')\n # print(f't={t}, model input, min={torch.min(x)}, max={torch.max(x)}',file=f)\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, **kwargs) # unet denoiser\n else:\n # with unconditional condition\n if isinstance(c, torch.Tensor):\n un_kwargs = kwargs.copy()\n if isinstance(unconditional_conditioning, dict):\n for uk, uv in unconditional_conditioning.items():\n if uk in un_kwargs:\n un_kwargs[uk] = uv\n unconditional_conditioning = unconditional_conditioning['uc']\n if 'cond_T' in kwargs and t < kwargs['cond_T']:\n if 'features_adapter' in kwargs:\n kwargs.pop('features_adapter')\n un_kwargs.pop('features_adapter')\n # kwargs['features_adapter'] = None\n # un_kwargs['features_adapter'] = None\n # if 'pose_emb' in kwargs:\n # kwargs.pop('pose_emb')\n # un_kwargs.pop('pose_emb')\n # kwargs['pose_emb'] = None\n # un_kwargs['pose_emb'] = None\n e_t = self.model.apply_model(x, t, c, **kwargs)\n # e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **un_kwargs)\n elif isinstance(c, dict):\n e_t = self.model.apply_model(x, t, c, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n else:\n raise NotImplementedError\n # text cfg\n if uc_type is None:\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n else:\n if uc_type == 'cfg_original':\n e_t = e_t + unconditional_guidance_scale * (e_t - e_t_uncond)\n elif uc_type == 'cfg_ours':\n e_t = e_t + unconditional_guidance_scale * (e_t_uncond - e_t)\n else:\n raise NotImplementedError\n # temporal guidance\n if conditional_guidance_scale_temporal is not None:\n e_t_temporal = self.model.apply_model(x, t, c, **kwargs)\n e_t_image = self.model.apply_model(x, t, c, no_temporal_attn=True, **kwargs)\n e_t = e_t + conditional_guidance_scale_temporal * (e_t_temporal - e_t_image)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n \n if is_video:\n size = (b, 1, 1, 1, 1)\n else:\n size = (b, 1, 1, 1)\n a_t = torch.full(size, alphas[index], device=device)\n a_prev = torch.full(size, alphas_prev[index], device=device)\n sigma_t = torch.full(size, sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(size, sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n # print(f't={t}, pred_x0, min={torch.min(pred_x0)}, max={torch.max(pred_x0)}',file=f)\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n # # norm pred_x0\n # p=2\n # s=()\n # pred_x0 = pred_x0 - torch.max(torch.abs(pred_x0))\n\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n \n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n\n return x_prev, pred_x0"
},
{
"identifier": "DEFAULT_NEGATIVE_PROMPT",
"path": "main/evaluation/motionctrl_inference.py",
"snippet": "DEFAULT_NEGATIVE_PROMPT = 'blur, haze, deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, '\\\n 'sketch, cartoon, drawing, anime, mutated hands and fingers, deformed, distorted, '\\\n 'disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, '\\\n 'floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation'\n RT = camera_poses[..., None]\n RT = None\ndef load_model_checkpoint(model, ckpt, adapter_ckpt=None):\ndef load_trajs(cond_dir, trajs):\ndef load_camera_pose(cond_dir, camera_poses):\ndef save_results(samples, filename, savedir, fps=10):\ndef motionctrl_sample(\n model, \n prompts, \n noise_shape,\n camera_poses=None, \n trajs=None,\n n_samples=1,\n unconditional_guidance_scale=1.0,\n unconditional_guidance_scale_temporal=None,\n ddim_steps=50,\n ddim_eta=1.,\n **kwargs):\ndef run_inference(args, gpu_num, gpu_no):\ndef save_images(samples, savedir):\ndef get_parser():"
},
{
"identifier": "instantiate_from_config",
"path": "utils/utils.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
}
] | import argparse
import os
import tempfile
import cv2
import gradio as gr
import imageio
import numpy as np
import torch
import torchvision
from functools import partial
from omegaconf import OmegaConf
from PIL import Image
from pytorch_lightning import seed_everything
from gradio_utils.camera_utils import CAMERA_MOTION_MODE, process_camera
from gradio_utils.traj_utils import (OBJECT_MOTION_MODE, get_provided_traj,
process_points, process_traj)
from gradio_utils.utils import vis_camera
from lvdm.models.samplers.ddim import DDIMSampler
from main.evaluation.motionctrl_inference import (DEFAULT_NEGATIVE_PROMPT,
load_model_checkpoint,
post_prompt)
from utils.utils import instantiate_from_config | 8,205 | camera_dict['speed'] = camera_speed
return display_camera_info(camera_dict)
def reset_camera():
global camera_dict
camera_dict = {
"motion":[],
"mode": "Customized Mode 1: First A then B",
"speed": 1.0,
"complex": None
}
return display_camera_info(camera_dict)
def fn_traj_droplast():
global traj_list
if traj_list:
traj_list.pop()
if traj_list:
traj_str = [f"{traj}" for traj in traj_list]
return ", ".join(traj_str)
else:
return "Click to specify trajectory"
def fn_traj_reset():
global traj_list
traj_list = []
return "Click to specify trajectory"
###########################################
model_path='./motionctrl.pth?download=true'
config_path='./configs/inference/config_both.yaml'
if not os.path.exists(model_path):
os.system(f'wget https://huggingface.co/TencentARC/MotionCtrl/resolve/main/motionctrl.pth?download=true -P .')
config = OmegaConf.load(config_path)
model_config = config.pop("model", OmegaConf.create())
model = instantiate_from_config(model_config)
if torch.cuda.is_available():
model = model.cuda()
model = load_model_checkpoint(model, model_path)
model.eval()
def model_run(prompts, infer_mode, seed, n_samples):
global traj_list
global camera_dict
RT = process_camera(camera_dict).reshape(-1,12)
traj_flow = process_traj(traj_list).transpose(3,0,1,2)
print(prompts)
print(RT.shape)
print(traj_flow.shape)
noise_shape = [1, 4, 16, 32, 32]
unconditional_guidance_scale = 7.5
unconditional_guidance_scale_temporal = None
# n_samples = 1
ddim_steps= 50
ddim_eta=1.0
cond_T=800
if n_samples < 1:
n_samples = 1
if n_samples > 4:
n_samples = 4
seed_everything(seed)
if infer_mode == MODE[0]:
camera_poses = RT
camera_poses = torch.tensor(camera_poses).float()
camera_poses = camera_poses.unsqueeze(0)
trajs = None
if torch.cuda.is_available():
camera_poses = camera_poses.cuda()
elif infer_mode == MODE[1]:
trajs = traj_flow
trajs = torch.tensor(trajs).float()
trajs = trajs.unsqueeze(0)
camera_poses = None
if torch.cuda.is_available():
trajs = trajs.cuda()
else:
camera_poses = RT
trajs = traj_flow
camera_poses = torch.tensor(camera_poses).float()
trajs = torch.tensor(trajs).float()
camera_poses = camera_poses.unsqueeze(0)
trajs = trajs.unsqueeze(0)
if torch.cuda.is_available():
camera_poses = camera_poses.cuda()
trajs = trajs.cuda()
ddim_sampler = DDIMSampler(model)
batch_size = noise_shape[0]
## get condition embeddings (support single prompt only)
if isinstance(prompts, str):
prompts = [prompts]
for i in range(len(prompts)):
prompts[i] = f'{prompts[i]}, {post_prompt}'
cond = model.get_learned_conditioning(prompts)
if camera_poses is not None:
RT = camera_poses[..., None]
else:
RT = None
if trajs is not None:
traj_features = model.get_traj_features(trajs)
else:
traj_features = None
if unconditional_guidance_scale != 1.0:
# prompts = batch_size * [""]
|
os.environ['KMP_DUPLICATE_LIB_OK']='True'
#### Description ####
title = r"""<h1 align="center">MotionCtrl: A Unified and Flexible Motion Controller for Video Generation</h1>"""
description = r"""
<b>Official Gradio demo</b> for <a href='https://github.com/TencentARC/MotionCtrl' target='_blank'><b>MotionCtrl: A Unified and Flexible Motion Controller for Video Generation</b></a>.<br>
🔥 MotionCtrl is capable of independently and flexibly controling the camera motion and object motion of a generated video, with only a unified model.<br>
🤗 Try to control the motion of the generated videos yourself!<br>
❗❗❗ Please note that current version of **MotionCtrl** is deployed on **LVDM/VideoCrafter**. The versions that depolyed on **AnimateDiff** and **SVD** will be released soon.<br>
"""
article = r"""
If MotionCtrl is helpful, please help to ⭐ the <a href='https://github.com/TencentARC/MotionCtrl' target='_blank'>Github Repo</a>. Thanks!
[](https://github.com/TencentARC/MotionCtrl)
---
📝 **Citation**
<br>
If our work is useful for your research, please consider citing:
```bibtex
@inproceedings{wang2023motionctrl,
title={MotionCtrl: A Unified and Flexible Motion Controller for Video Generation},
author={Wang, Zhouxia and Yuan, Ziyang and Wang, Xintao and Chen, Tianshui and Xia, Menghan and Luo, Ping and Shan, Yin},
booktitle={arXiv preprint arXiv:2312.03641},
year={2023}
}
```
📧 **Contact**
<br>
If you have any questions, please feel free to reach me out at <b>[email protected]</b>.
"""
css = """
.gradio-container {width: 85% !important}
.gr-monochrome-group {border-radius: 5px !important; border: revert-layer !important; border-width: 2px !important; color: black !important;}
span.svelte-s1r2yt {font-size: 17px !important; font-weight: bold !important; color: #d30f2f !important;}
button {border-radius: 8px !important;}
.add_button {background-color: #4CAF50 !important;}
.remove_button {background-color: #f44336 !important;}
.clear_button {background-color: gray !important;}
.mask_button_group {gap: 10px !important;}
.video {height: 300px !important;}
.image {height: 300px !important;}
.video .wrap.svelte-lcpz3o {display: flex !important; align-items: center !important; justify-content: center !important;}
.video .wrap.svelte-lcpz3o > :first-child {height: 100% !important;}
.margin_center {width: 50% !important; margin: auto !important;}
.jc_center {justify-content: center !important;}
"""
T_base = [
[1.,0.,0.], ## W2C x 的正方向: 相机朝左 left
[-1.,0.,0.], ## W2C x 的负方向: 相机朝右 right
[0., 1., 0.], ## W2C y 的正方向: 相机朝上 up
[0.,-1.,0.], ## W2C y 的负方向: 相机朝下 down
[0.,0.,1.], ## W2C z 的正方向: 相机往前 zoom out
[0.,0.,-1.], ## W2C z 的负方向: 相机往前 zoom in
]
radius = 1
n = 16
# step =
look_at = np.array([0, 0, 0.8]).reshape(3,1)
# look_at = np.array([0, 0, 0.2]).reshape(3,1)
T_list = []
base_R = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
res = []
res_forsave = []
T_range = 1.8
for i in range(0, 16):
# theta = (1)*np.pi*i/n
R = base_R[:,:3]
T = np.array([0.,0.,1.]).reshape(3,1) * (i/n)*2
RT = np.concatenate([R,T], axis=1)
res.append(RT)
fig = vis_camera(res)
# MODE = ["camera motion control", "object motion control", "camera + object motion control"]
MODE = ["control camera poses", "control object trajectory", "control both camera and object motion"]
BASE_MODEL = ['LVDM/VideoCrafter', 'AnimateDiff', 'SVD']
traj_list = []
camera_dict = {
"motion":[],
"mode": "Customized Mode 1: First A then B", # "First A then B", "Both A and B", "Custom"
"speed": 1.0,
"complex": None
}
def fn_vis_camera(info_mode):
global camera_dict
RT = process_camera(camera_dict) # [t, 3, 4]
if camera_dict['complex'] is not None:
# rescale T to [-2,2]
for i in range(3):
min_T = np.min(RT[:,i,-1])
max_T = np.max(RT[:,i,-1])
if min_T < -2 or max_T > 2:
RT[:,i,-1] = RT[:,i,-1] - min_T
RT[:,i,-1] = RT[:,i,-1] / (np.max(RT[:,:,-1]) + 1e-6)
RT[:,i,-1] = RT[:,i,-1] * 4
RT[:,i,-1] = RT[:,i,-1] - 2
fig = vis_camera(RT)
if info_mode == MODE[0]:
vis_step3_prompt_generate = True
vis_prompt = True
vis_num_samples = True
vis_seed = True
vis_start = True
vis_gen_video = True
vis_object_mode = False
vis_object_info = False
else:
vis_step3_prompt_generate = False
vis_prompt = False
vis_num_samples = False
vis_seed = False
vis_start = False
vis_gen_video = False
vis_object_mode = True
vis_object_info = True
return fig, \
gr.update(visible=vis_object_mode), \
gr.update(visible=vis_object_info), \
gr.update(visible=vis_step3_prompt_generate), \
gr.update(visible=vis_prompt), \
gr.update(visible=vis_num_samples), \
gr.update(visible=vis_seed), \
gr.update(visible=vis_start), \
gr.update(visible=vis_gen_video, value=None)
def fn_vis_traj():
global traj_list
xy_range = 1024
points = process_points(traj_list)
imgs = []
for idx in range(16):
bg_img = np.ones((1024, 1024, 3), dtype=np.uint8) * 255
for i in range(15):
p = points[i]
p1 = points[i+1]
cv2.line(bg_img, p, p1, (255, 0, 0), 2)
if i == idx:
cv2.circle(bg_img, p, 2, (0, 255, 0), 20)
if idx==(15):
cv2.circle(bg_img, points[-1], 2, (0, 255, 0), 20)
imgs.append(bg_img.astype(np.uint8))
# size = (512, 512)
fps = 10
path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
writer = imageio.get_writer(path, format='mp4', mode='I', fps=fps)
for img in imgs:
writer.append_data(img)
writer.close()
vis_step3_prompt_generate = True
vis_prompt = True
vis_num_samples = True
vis_seed = True
vis_start = True
vis_gen_video = True
return path, gr.update(visible=vis_step3_prompt_generate), \
gr.update(visible=vis_prompt), \
gr.update(visible=vis_num_samples), \
gr.update(visible=vis_seed), \
gr.update(visible=vis_start), \
gr.update(visible=vis_gen_video, value=None)
def display_camera_info(camera_dict, camera_mode=None):
if camera_dict['complex'] is not None:
res = f"complex : {camera_dict['complex']}. "
else:
res = ""
res += f"motion : {[_ for _ in camera_dict['motion']]}. "
res += f"speed : {camera_dict['speed']}. "
if camera_mode == CAMERA_MOTION_MODE[2]:
res += f"mode : {camera_dict['mode']}. "
return res
def add_traj_point(evt: gr.SelectData, ):
global traj_list
traj_list.append(evt.index)
traj_str = [f"{traj}" for traj in traj_list]
return ", ".join(traj_str)
def add_provided_traj(traj_name):
global traj_list
traj_list = get_provided_traj(traj_name)
traj_str = [f"{traj}" for traj in traj_list]
return ", ".join(traj_str)
def add_camera_motion(camera_motion, camera_mode):
global camera_dict
if camera_dict['complex'] is not None:
camera_dict['complex'] = None
if camera_mode == CAMERA_MOTION_MODE[2] and len(camera_dict['motion']) <2:
camera_dict['motion'].append(camera_motion)
else:
camera_dict['motion']=[camera_motion]
return display_camera_info(camera_dict, camera_mode)
def add_complex_camera_motion(camera_motion):
global camera_dict
camera_dict['complex']=camera_motion
return display_camera_info(camera_dict)
def change_camera_mode(combine_type, camera_mode):
global camera_dict
camera_dict['mode'] = combine_type
return display_camera_info(camera_dict, camera_mode)
def change_camera_speed(camera_speed):
global camera_dict
camera_dict['speed'] = camera_speed
return display_camera_info(camera_dict)
def reset_camera():
global camera_dict
camera_dict = {
"motion":[],
"mode": "Customized Mode 1: First A then B",
"speed": 1.0,
"complex": None
}
return display_camera_info(camera_dict)
def fn_traj_droplast():
global traj_list
if traj_list:
traj_list.pop()
if traj_list:
traj_str = [f"{traj}" for traj in traj_list]
return ", ".join(traj_str)
else:
return "Click to specify trajectory"
def fn_traj_reset():
global traj_list
traj_list = []
return "Click to specify trajectory"
###########################################
model_path='./motionctrl.pth?download=true'
config_path='./configs/inference/config_both.yaml'
if not os.path.exists(model_path):
os.system(f'wget https://huggingface.co/TencentARC/MotionCtrl/resolve/main/motionctrl.pth?download=true -P .')
config = OmegaConf.load(config_path)
model_config = config.pop("model", OmegaConf.create())
model = instantiate_from_config(model_config)
if torch.cuda.is_available():
model = model.cuda()
model = load_model_checkpoint(model, model_path)
model.eval()
def model_run(prompts, infer_mode, seed, n_samples):
global traj_list
global camera_dict
RT = process_camera(camera_dict).reshape(-1,12)
traj_flow = process_traj(traj_list).transpose(3,0,1,2)
print(prompts)
print(RT.shape)
print(traj_flow.shape)
noise_shape = [1, 4, 16, 32, 32]
unconditional_guidance_scale = 7.5
unconditional_guidance_scale_temporal = None
# n_samples = 1
ddim_steps= 50
ddim_eta=1.0
cond_T=800
if n_samples < 1:
n_samples = 1
if n_samples > 4:
n_samples = 4
seed_everything(seed)
if infer_mode == MODE[0]:
camera_poses = RT
camera_poses = torch.tensor(camera_poses).float()
camera_poses = camera_poses.unsqueeze(0)
trajs = None
if torch.cuda.is_available():
camera_poses = camera_poses.cuda()
elif infer_mode == MODE[1]:
trajs = traj_flow
trajs = torch.tensor(trajs).float()
trajs = trajs.unsqueeze(0)
camera_poses = None
if torch.cuda.is_available():
trajs = trajs.cuda()
else:
camera_poses = RT
trajs = traj_flow
camera_poses = torch.tensor(camera_poses).float()
trajs = torch.tensor(trajs).float()
camera_poses = camera_poses.unsqueeze(0)
trajs = trajs.unsqueeze(0)
if torch.cuda.is_available():
camera_poses = camera_poses.cuda()
trajs = trajs.cuda()
ddim_sampler = DDIMSampler(model)
batch_size = noise_shape[0]
## get condition embeddings (support single prompt only)
if isinstance(prompts, str):
prompts = [prompts]
for i in range(len(prompts)):
prompts[i] = f'{prompts[i]}, {post_prompt}'
cond = model.get_learned_conditioning(prompts)
if camera_poses is not None:
RT = camera_poses[..., None]
else:
RT = None
if trajs is not None:
traj_features = model.get_traj_features(trajs)
else:
traj_features = None
if unconditional_guidance_scale != 1.0:
# prompts = batch_size * [""] | prompts = batch_size * [DEFAULT_NEGATIVE_PROMPT] | 8 | 2023-12-27 19:32:03+00:00 | 12k |
0x00wolf/hkrsAI | hkrsai.py | [
{
"identifier": "fetch_args",
"path": "src/args.py",
"snippet": "def fetch_args():\n \"\"\"Function to handle command-line arguments\"\"\"\n p = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n prog='hkrsAI.v2',\n description=DESCRIPTION,\n epilog=MORE_INFO\n )\n p.add_argument('--system-prompt', '-sp', required=False, default=None, dest='system_prompt',\n help=SYSTEM_PROMPT),\n p.add_argument('--model', '-m', default='gpt-3.5-turbo', type=str, choices=['gpt-3.5-turbo', 'gpt-4', 'gpt-4-turbo'],\n help=MODEL),\n p.add_argument('--temperature', '-t', type=float, default=0.7, # min=0, max=2.0,\n help=TEMPERATURE),\n p.add_argument('--frequency-penalty', '-fp', type=float, default=0, # min=-2.0, max=2.0,\n help=FREQUENCY_PENALTY),\n p.add_argument('--presence-penalty', '-pp', type=float, default=0, # min=-2.0, max=2.0,\n help=PRESENCE_PENALTY),\n p.add_argument('--top-p', type=float, default=1.0, help=TOP_P), # min=0.1, max=1.0,\n # todo: p.add_argument('-st', '--stop', default=[], nargs='*', help=variables.stop),\n p.add_argument('--max-tokens', '-mt', type=int, default=1000, help=MAX_TOKENS),\n p.add_argument('-n', type=int, default=1, help=N),\n p.add_argument('--log-level', '-ll', default=2, type=int, help=LOG_LEVEL),\n p.add_argument('--log-format', '-lf', default='json', type=str, help=LOG_FORMAT)\n return p.parse_args()"
},
{
"identifier": "PathFinder",
"path": "src/pathfinder.py",
"snippet": "class PathFinder:\n \"\"\"Class that returns an object with necessary paths for runtime operations\"\"\"\n def __init__(self, cwd: str):\n self.cwd = cwd\n self.config = f'{self.cwd}/config.json'\n self.logs = f'{self.cwd}/logs'\n self.prompts = f'{self.cwd}/prompts'\n self._first_runtime()\n self._prompts_dir_exists()\n\n @staticmethod\n def _get_cwd():\n \"\"\"Fetch the current working directory\"\"\"\n abs_path = os.path.abspath(__file__)\n cwd = os.path.dirname(abs_path)\n return cwd\n\n def _first_runtime(self):\n \"\"\"Initialize the config.json and logs directory if not present at runtime.\"\"\"\n self._init_cfg_json()\n self._init_logs_dir()\n\n def _prompts_dir_exists(self):\n \"\"\"Check to see if the prompts directory is present, or print an error and exit.\"\"\"\n if not os.path.exists(self.prompts):\n print('[*] error: prompts directory is missing')\n sys.exit()\n\n def _init_cfg_json(self):\n \"\"\"Generate the config.json file.\"\"\"\n if not os.path.exists(self.config):\n self._dump(CONFIG_INIT, self.config)\n\n def _init_logs_dir(self):\n \"\"\"Generate the logs directory\"\"\"\n if not os.path.exists(self.logs):\n os.makedirs(self.logs)\n\n @staticmethod\n def _dump(json_dict, json_file):\n \"\"\"Dumps a JSON object to a file\"\"\"\n with open(json_file, 'w') as f:\n json.dump(json_dict, f, indent=6)"
},
{
"identifier": "Client",
"path": "src/client.py",
"snippet": "class Client:\n \"\"\"A class representing the OpenAI API Client\"\"\"\n def __init__(self, config):\n self.client = None\n self.api_key = ''\n self.config = config\n\n def initialize(self):\n \"\"\"Checks config.json for a stored API key, or prompts the user to input a new key\"\"\"\n config_data = self._json_load(self.config)\n api_key = config_data['api_key']\n if api_key:\n good_key = self.test_key(api_key)\n if good_key:\n self.api_key = api_key\n self.client = openai.OpenAI(api_key=self.api_key)\n else:\n self.set_key()\n else:\n self.set_key()\n\n @staticmethod\n def test_key(api_key):\n \"\"\"Send a test message to the GPT API to check if an API key is valid\"\"\"\n client = openai.OpenAI(api_key=api_key)\n try:\n try:\n response = client.chat.completions.create(\n model='gpt-3.5-turbo',\n max_tokens=5,\n messages=[{'role': 'user', 'content': 'This is a test .'}])\n except openai.AuthenticationError:\n print('[*] error, invalid API key')\n return False\n else:\n print('[*] API key verified')\n return True\n except openai.APIConnectionError:\n print('[*] network connection error\\n[*] exiting')\n sys.exit()\n\n def set_key(self):\n \"\"\"Set a new API key and test if it is valid\"\"\"\n while True:\n self.api_key = input('[*] insert OpenAI API key:\\n>')\n valid_key = self.test_key(self.api_key)\n if valid_key:\n config_data = self._json_load(self.config)\n config_data['api_key'] = self.api_key\n self._json_dump(config_data, self.config)\n self.client = openai.OpenAI(api_key=self.api_key)\n return\n\n @staticmethod\n def _json_load(json_file):\n \"\"\"Loads JSON object from a file\"\"\"\n with open(json_file, 'r') as f:\n data = json.load(f)\n return data\n\n @staticmethod\n def _json_dump(json_dict, json_file):\n \"\"\"Dumps a JSON object to a file\"\"\"\n with open(json_file, 'w') as f:\n json.dump(json_dict, f, indent=6)"
},
{
"identifier": "GPT",
"path": "src/gpt.py",
"snippet": "class GPT:\n def __init__(self, client, model, temperature, top_p, n, frequency_penalty, presence_penalty, max_tokens):\n self.client = client\n self.model = model\n self.temperature = temperature\n self.top_p = top_p\n self.n = n\n self.frequency_penalty = frequency_penalty\n self.presence_penalty = presence_penalty\n self.max_tokens = max_tokens\n\n @property\n def model(self):\n return self._model\n\n @model.setter\n def model(self, new_value: str):\n new_value = str(new_value)\n if new_value == 'gpt-3.5-turbo' or new_value == 'gpt-4':\n self._model = new_value\n else:\n raise ValueError(f'\\n{BAD_MODEL.format(new_value)}')\n\n @property\n def temperature(self):\n return self._temperature\n\n @temperature.setter\n def temperature(self, new_value: float):\n new_value = float(new_value)\n if not (0.0 <= new_value <= 2.0):\n raise ValueError(f'\\n{BAD_TEMP.format(new_value)}')\n else:\n self._temperature = new_value\n\n @property\n def top_p(self):\n return self._top_p\n\n @top_p.setter\n def top_p(self, new_value: float):\n new_value = float(new_value)\n if not (0 <= new_value <= 1.0):\n raise ValueError(f'\\n{BAD_TP.format(new_value)}')\n else:\n self._top_p = new_value\n\n @property\n def frequency_penalty(self):\n return self._frequency_penalty\n\n @frequency_penalty.setter\n def frequency_penalty(self, new_value: float):\n new_value = float(new_value)\n if not (-2.0 <= new_value <= 2.0):\n raise ValueError(f'\\n{BAD_FP.format(new_value)}')\n else:\n self._frequency_penalty = new_value\n\n @property\n def presence_penalty(self):\n return self._presence_penalty\n\n @presence_penalty.setter\n def presence_penalty(self, new_value: float):\n new_value = float(new_value)\n if not (-2.0 <= new_value <= 2.0):\n raise ValueError(f'\\n{BAD_PP.format(new_value)}')\n else:\n self._presence_penalty = new_value\n\n @property\n def n(self):\n return self._n\n\n @n.setter\n def n(self, new_value):\n new_value = int(new_value)\n if not (1 <= new_value <= 20):\n raise ValueError(f'\\n{BAD_N.format(new_value)}')\n else:\n self._n = new_value\n\n @property\n def max_tokens(self):\n return self._max_tokens\n\n @max_tokens.setter\n def max_tokens(self, new_value: int):\n new_value = int(new_value)\n if not (1 <= new_value <= 4096):\n raise ValueError(f'\\n{BAD_MT.format(new_value)}')\n else:\n self._max_tokens = new_value"
},
{
"identifier": "SystemPrompt",
"path": "src/systemprompt.py",
"snippet": "class SystemPrompt:\n \"\"\"A class that manages setting the system prompt used to define AI assistants. \\\n To add a new system prompt that will be selectable from the runtime menu, \\\n copy the prompt to an extensionless file in the appropriate category folder.\"\"\"\n def __init__(self, prompts_dir, path=''):\n self.dir = prompts_dir\n self.path = path\n self.content = ''\n self.title = 'custom'\n self._start()\n\n def _start(self):\n \"\"\"Allow the user to define a custom prompt, or select one of the pre-made options\"\"\"\n if not self.path:\n self.content = input(\"\\n[*] input a custom system prompt, \\\n \\n[*] hit enter to view preexisting options:\\n>\")\n if not self.content:\n self._set()\n else:\n self.content = self._fetch_contents(self.path)\n self.title = self.path.rpartition('/')[-1]\n\n def _set(self):\n \"\"\"Loop that runs until a prompt has been selected\"\"\"\n while True:\n category = self._select_category()\n title = self._select_prompt(category)\n if title == 'back':\n pass\n else:\n self.path = f'{self.dir}/{category}/{title}'\n prompt = self._fetch_contents(self.path)\n print(f'\\n{prompt}\\n')\n set_prompt = input(\"[*] select prompt\\n\\n[-] 'enter' to accept\\n[-] 'n' to go back\\n\"\n \"[-] 'x' to enter a custom font'\\n>\")\n if set_prompt == 'x':\n return SystemPrompt(prompts_dir=self.dir)\n elif set_prompt == 'n':\n pass\n else:\n self.title = self.path.rpartition('/')[-1]\n self.content = prompt\n print(f'[*] system prompt: {self.title}\\n[*] query AI:')\n return\n\n def _select_category(self):\n \"\"\"Select a system prompt category from the pre-made options\"\"\"\n print('\\n[-] categories\\n')\n categories = self._fetch_from(self.dir)\n categories.sort()\n choice = self._make_choice(categories)\n print(f'\\n[*] category: {choice}')\n return choice\n\n def _select_prompt(self, category):\n \"\"\"Select a pre-made system prompt from a particular category\"\"\"\n print('[-] prompts\\n')\n category = f'{self.dir}/{category}'\n system_prompts = self._fetch_from(category)\n system_prompts.sort()\n self.path = self._make_choice(system_prompts, go_back=True)\n return self.path\n\n def _make_choice(self, options_list, go_back=False):\n \"\"\"Provides the user with the ability to select a prompt from an enumerated options list\"\"\"\n # Select from a list of options by the objects enumerated position\n while True:\n try:\n self._enumerate_list(options_list, go_back)\n selection = input('\\n[*] select by position:\\n>')\n selection = int(selection)\n if 1 <= selection <= len(options_list):\n return options_list[selection - 1]\n elif go_back and selection == len(options_list) + 1:\n return 'back'\n except ValueError:\n print('[*] invalid selection')\n\n @staticmethod\n def _enumerate_list(options_list, go_back=False):\n \"\"\"\"Enumerates a list of options\"\"\"\n for x, _item in enumerate(options_list, 1):\n print(f'{x}. {_item}')\n if go_back:\n print(f'{x + 1}. back')\n\n @staticmethod\n def _fetch_contents(file_path):\n \"\"\"Fetches the contents of a file\"\"\"\n try:\n with open(file_path, 'r') as f:\n return f.read()\n except FileNotFoundError:\n pass\n\n @staticmethod\n def _fetch_from(root_dir):\n \"\"\"Returns a list containing the contents of a directory\"\"\"\n directories = os.listdir(root_dir)\n return directories"
},
{
"identifier": "Conversation",
"path": "src/conversation.py",
"snippet": "class Conversation:\n messages: list[dict] = dataclasses.field(default_factory=list)\n query: str = ''\n reply: str = ''\n response: dict = dataclasses.field(default_factory=dict)\n tokens: int = 0\n\n def start(self, system_prompt: str):\n self.messages = [{\"role\": \"system\", \"content\": system_prompt}]\n print()\n return Conversation(messages=self.messages)\n\n def speak(self, content: str):\n self.messages.append({\"role\": \"user\", \"content\": content})\n return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)\n\n def think(self, thought):\n if self.query == '':\n self.query = thought\n else:\n self.query = f'{self.query}\\n{thought}'\n return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)\n\n def listen(self, gpt: GPT):\n \"\"\"Function to perform GPT chat completions via the API\"\"\"\n self.response = gpt.client.chat.completions.create(\n model=gpt.model,\n messages=self.messages,\n temperature=gpt.temperature,\n top_p=gpt.top_p,\n n=gpt.n,\n max_tokens=gpt.max_tokens,\n frequency_penalty=gpt.frequency_penalty,\n presence_penalty=gpt.presence_penalty,\n )\n self.reply = self.response.choices[0].message.content\n self.tokens = self.response.usage.total_tokens\n print(f\"\\n{self.reply}\\n\")\n self.messages.append({\"role\": \"assistant\", \"content\": self.reply})\n\n return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)\n\n def breath(self):\n return Conversation(messages=self.messages, query='', reply=self.reply, response=self.response)\n\n @staticmethod\n def greet():\n return Conversation(messages=[], query='', reply='', response=None)"
},
{
"identifier": "Action",
"path": "src/action.py",
"snippet": "class Action:\n \"\"\"Action dataclass returned by the input parser after parsing new input. \\\n Gets passed to the dispatcher who turns Action into function.\"\"\"\n command: str = ''\n arguments: list[str] = dataclasses.field(default_factory=list)\n raw_input: str = ''"
},
{
"identifier": "InputParser",
"path": "src/inputparser.py",
"snippet": "class InputParser:\n @staticmethod\n def parse(user_input):\n \"\"\"parses user input and passes an Action to the Dispatcher\"\"\"\n if user_input.startswith('>'):\n if ' ' in user_input:\n user_input = user_input.split(' ')\n command = user_input.pop(0).replace('>', '')\n arguments = user_input[:]\n return Action(command=command, arguments=arguments)\n else:\n command = user_input.replace('>', '')\n for _command in COMMANDS:\n if command == _command:\n return Action(command=command)\n return Action(command='error')\n else:\n action = Action(command='chat', raw_input=user_input)\n return action"
},
{
"identifier": "Dispatcher",
"path": "src/dispatcher.py",
"snippet": "class Dispatcher:\n \"\"\"Dispatches functions and manages conversation state.\"\"\"\n def __init__(self):\n self.thinking: bool = False\n\n def dispatch(self, action: Action):\n \"\"\"Turns an Action into a function\"\"\"\n if action.command == 'stop':\n self.thinking = True # >stop\n return self.silence\n elif action.command == 'start':\n self.thinking = False # >start\n return self.silence\n elif self.thinking and action.command == 'chat':\n return self.think\n elif action.command == 'chat':\n return self.speak\n elif action.command == 'exec':\n return self.execute\n elif action.command == 'insert':\n return self.insert\n elif action.command == 'show':\n return self.show\n elif action.command == 'flush':\n return self.flush\n elif action.command == 'save':\n return self.save\n elif action.command == 'set':\n return self.set\n elif action.command == 'reset':\n return self.reset\n elif action.command == 'help':\n return self.help\n elif action.command == 'exit':\n return self.goodbye\n else:\n return self.silence\n\n @staticmethod\n def silence(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Whereof one cannot speak, thereof one must be silent\"\"\"\n return gpt, conversation, logger\n\n @staticmethod\n def think(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\">stop While thinking: Append user input to conversation.query.\"\"\"\n conversation = conversation.think(action.raw_input)\n return gpt, conversation, logger\n\n @staticmethod\n def speak(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Send query to GPT API and receive response\"\"\"\n try:\n if action.raw_input == '':\n conversation = conversation.speak(content=conversation.query)\n elif action.raw_input != '' and conversation.query == '':\n conversation = conversation.speak(content=action.raw_input)\n elif action.raw_input != '' and conversation.query != '':\n conversation = conversation.speak(content=f'{conversation.query}\\n{action.raw_input}')\n conversation = conversation.listen(gpt=gpt)\n logger = logger.log(conversation)\n except openai.BadRequestError as e:\n print(f'[*] ')\n except openai.APIError as e:\n # Handle API error here, e.g. retry or log\n print(f\"[*] OpenAI API returned an API Error: {e}\")\n except openai.RateLimitError as e:\n # Handle rate limit error (we recommend using exponential backoff)\n print(f\"[*] OpenAI API request exceeded rate limit: {e}\")\n return gpt, conversation.breath(), logger\n\n # >exec\n def execute(self, gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Execute a system-wide command from within the program.\n Author's intended use case is directory traversal\"\"\"\n try:\n if action.arguments[0] == 'cd': # hack to allow the user to change directories\n if action.arguments[1] == 'home':\n os.chdir(logger.paths.cwd)\n else:\n os.chdir(action.arguments[1])\n print(f'[*] cwd ~ {os.getcwd()}')\n elif action.arguments[0] == 'cat':\n print(self._fetch_contents(action.arguments[1]), '\\n')\n else:\n output = subprocess.check_output(action.arguments[:], shell=True, text=True,\n stderr=subprocess.STDOUT, timeout=3)\n print(output)\n except subprocess.CalledProcessError as e:\n print(f'[*] subprocess error: {e}')\n except OSError as e:\n print(f'[*] os error: {e}')\n return gpt, conversation, logger\n\n # >insert\n def insert(self, gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"While thinking: Appends the contents of a file to the query with the >insert command\"\"\"\n insert_me = self._fetch_contents(action.arguments[0])\n conversation.query += f'{conversation.query}\\n{insert_me}'\n return gpt, conversation, logger\n\n # >flush\n @staticmethod\n def flush(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"While thinking: Resets the conversation.query to ''\"\"\"\n conversation = conversation.breath()\n return gpt, conversation, logger\n\n # >save\n @staticmethod\n def save(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Extract and save code, the reply, or the response object to an absolute, relative, or generic path\"\"\"\n try:\n logger.save(arguments=action.arguments, conversation=conversation)\n except FileNotFoundError:\n print(f'[*] error saving data')\n return gpt, conversation, logger\n\n # >set\n @staticmethod\n def set(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Allows the user to change the values for the keys of instantiated objects\"\"\"\n try:\n if action.arguments[0] == 'level' or action.arguments[0] == 'format':\n setattr(logger, action.arguments[0], ast.literal_eval(action.arguments[1]))\n print(f'[*] gpt ~ {action.arguments[0]}: {action.arguments[1]}')\n elif action.arguments[0] in ['model', 'temperature', 'top_p', 'n', 'frequency_penalty',\n 'presence_penalty', 'max_tokens']:\n setattr(gpt, action.arguments[0], ast.literal_eval(action.arguments[1]))\n print(f'[*] gpt ~ {action.arguments[0]}: {action.arguments[1]}')\n elif action.arguments[0] == 'gpt':\n if action.arguments[1] == 'client':\n print('[*] use `>reset client` to change API key')\n else:\n setattr(gpt, action.arguments[1], ast.literal_eval(action.arguments[2]))\n print(f'[*] {action.arguments[0]} ~ {action.arguments[1]}: {action.arguments[2]}')\n elif action.arguments[0] == 'logger' and ('format' == action.arguments[1] == 'level'):\n setattr(logger, action.arguments[1], ast.literal_eval(action.arguments[2]))\n print(f'[*] {action.arguments[0]} ~ {action.arguments[1]}: {action.arguments[2]}')\n else:\n print('[*] invalid entry')\n except AttributeError:\n print('[*] attribute error')\n except ValueError:\n print('[*] value error')\n except TypeError:\n print('[*] type error')\n return gpt, conversation, logger\n\n # >show\n @staticmethod\n def show(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Display values contained by objects: gpt, conversation, action, and logger\"\"\"\n try:\n if len(action.arguments) == 0:\n print(f'[*] query:\\n{conversation.query}\\n')\n elif action.arguments[0] == 'query':\n print(f'[*] query:\\n{conversation.query}\\n')\n elif action.arguments[0] == 'gpt' and len(action.arguments) == 2:\n print(f'[*] gpt ~ {action.arguments[1]}: {getattr(gpt, action.arguments[1])}\\n')\n elif action.arguments[0] == 'conversation':\n print(f\"[*] conversation ~ {action.arguments[1]}: {getattr(conversation, action.arguments[1])}\\n\")\n elif action.arguments[0] == 'logger':\n print(f'[*]logger ~ {action.arguments[1]}: {getattr(logger, action.arguments[1])}')\n elif action.arguments[0] == 'all':\n objects = [gpt, logger]\n for instance in objects:\n print(f'\\n[*] {type(instance).__name__}')\n for key, value in instance.__dict__.items():\n if key == 'client' or key == 'paths':\n pass\n else:\n print(f\"[-] {key.lstrip('_')}: {value}\")\n elif action.arguments[0] == 'gpt':\n print(\"\\n[*] GPT:\")\n for key, value in gpt.__dict__.items():\n if key == 'client':\n pass\n else:\n print(f\"[-] {key.lstrip('_')}: {value}\")\n elif [action.arguments[0] in k for k, v in gpt.__dict__.items()]:\n print(f\"[*] {action.arguments[0]}: {getattr(gpt, action.arguments[0])}\")\n except AttributeError:\n print('[*] invalid entry')\n return gpt, conversation, logger\n\n # >reset\n @staticmethod\n def reset(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Reset the AI assistant, or start a new log entry\"\"\"\n if len(action.arguments) == 0 or ('chat' == action.arguments[0] == 'conversation'):\n print('[*] resetting AI')\n logger.new_log()\n prompt = SystemPrompt(prompts_dir=logger.paths.prompts)\n conversation = Conversation().start(prompt.content)\n elif action.arguments[0] == 'log':\n logger.new_log()\n return gpt, conversation, logger\n\n # >help\n @staticmethod\n def help(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Prints the help string for the context management commands\"\"\"\n print(f'[*] hkrsAI\\ncommand: >help\\n{HELP_STRING}')\n return gpt, conversation, logger\n\n # >exit\n @staticmethod\n def goodbye(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"This is the end\"\"\"\n logger.log(conversation)\n print('\\n[*] exiting\\n')\n sys.exit()\n\n @staticmethod\n def _fetch_contents(file_path):\n \"\"\"Return the contents of a file as a string variable\"\"\"\n try:\n with open(file_path, 'r') as f:\n return f.read()\n except FileNotFoundError:\n pass"
},
{
"identifier": "Logger",
"path": "src/logger.py",
"snippet": "class Logger:\n def __init__(self, paths: PathFinder, log_level: int, log_format: str):\n \"\"\"Logs conversations and saves data at the user's request\"\"\"\n self.level: int = log_level\n self.format: str = log_format\n self.paths: Paths = paths\n self.number: int = 0\n self.file: str = ''\n self.savefile: str = ''\n self.save_number: int = 0\n self.new_log()\n\n @property\n def level(self):\n return self._level\n\n @level.setter\n def level(self, new_value: int):\n if 1 != new_value != 2:\n raise TypeError\n else:\n self._level = new_value\n\n @property\n def format(self):\n return self._format\n\n @format.setter\n def format(self, new_value: str):\n if new_value == 'txt' or new_value == 'json':\n self._format = new_value\n else:\n self._format = new_value\n\n def new_log(self):\n self.number = self._next_number()\n self.file = self._new_file()\n \n def _next_number(self):\n \"\"\"Fetch the next log number from config.json and updates it\"\"\"\n config_data = self._load(self.paths.config)\n self.number = log_num = config_data['log_number']\n config_data['log_number'] = self.number + 1\n self._dump(config_data, self.paths.config)\n return self.number\n \n def _new_file(self):\n \"\"\"Generates a new logfile relative the current log number\"\"\"\n while True: # to prevent inadvertently overwriting logs if the value is changed in config.json\n self.file = f'{self.paths.logs}/log{self.number}.{self.format}'\n try:\n with open(self.file, 'x'):\n print(f'[*] logfile generated ~ {self.file}')\n return self.file\n except FileExistsError:\n self.number += 1\n\n def log(self, conversation: Conversation):\n \"\"\"Logs the response or messages as a JSON or TXT file relative to args\"\"\"\n if self.level == 1 and self.format != 'txt':\n print('[*] level 1 only supports .txt output')\n self.format = 'txt'\n if self.level == 1:\n self._dump(str(conversation.response), self.file)\n return self\n elif self.level == 2 and self.format == 'json':\n self._dump(conversation.messages, self.file)\n return self\n elif self.level == 2 and self.format == 'txt':\n with open(self.file, 'w') as f:\n for i in range(len(conversation.messages)):\n f.write(f\"{conversation.messages[i]['role']}:--------------\\n\\n\" \\\n f\"{conversation.messages[i]['content']}\\n\\n\")\n return self\n\n # >save\n def save(self, arguments, conversation):\n \"\"\"Saves information at the user's request\"\"\"\n if len(arguments) == 0:\n self._update_savefile()\n self._save_text(self.savefile, conversation.reply)\n print(f'[*] saving reply to ~ {self.savefile}')\n return\n if len(arguments) != 2:\n self._update_savefile()\n else:\n self.savefile = arguments[1]\n if arguments[0] == 'code':\n p = re.compile(r\"```((.|\\n)*)```\")\n match = re.search(p, conversation.reply)\n if match:\n self._save_text(self.savefile, match.group())\n print(f'[*] saving code to ~ {self.savefile}')\n else:\n print('[*] error: regex failed.\\n[*] ensure that GPT presents code in blocks ```code```')\n if arguments[0] == 'reply':\n self._save_text(self.savefile, conversation.reply)\n print(f'[*] saving reply to ~ {self.savefile}')\n elif arguments[0] == 'response':\n self._save_text(self.savefile, str(conversation.response))\n print(f'[*] saving response to ~ {self.savefile}')\n\n def _update_savefile(self):\n self.savefile = f'{self.paths.logs}/log{self.number}-{self.save_number}.pktai'\n self.save_number += 1\n\n @staticmethod\n def _save_text(filename, _text):\n \"\"\"Simple funtion to save text to a file\"\"\"\n with open(filename, 'w') as f:\n f.write(_text)\n\n @staticmethod\n def _load(json_file):\n \"\"\"Loads JSON object from a file\"\"\"\n with open(json_file, 'r') as f:\n data = json.load(f)\n return data\n\n @staticmethod\n def _dump(json_dict, json_file):\n \"\"\"Dumps a JSON object to a file\"\"\"\n with open(json_file, 'w') as f:\n json.dump(json_dict, f, indent=6)"
}
] | import sys
import os
import readline
from src.args import fetch_args
from src.pathfinder import PathFinder
from src.client import Client
from src.gpt import GPT
from src.systemprompt import SystemPrompt
from src.conversation import Conversation
from src.action import Action
from src.inputparser import InputParser
from src.dispatcher import Dispatcher
from src.logger import Logger | 7,825 |
HKRSAI = """
1 0 1
1 1 0
0 0 0
hkrsAI.v2
"""
def main():
print(HKRSAI)
args = fetch_args() # command-line arguments
paths = PathFinder(cwd=os.path.dirname(os.path.abspath(__file__)))
parser = InputParser() # Class to parse user input and return Actions.
dispatcher = Dispatcher() # Manages conversation state and turnsActions into functions.
logger = Logger(paths=paths, log_level=args.log_level, log_format=args.log_format)
client = Client(config=paths.config) # OpenAI API client management object
client.initialize() # Checks for valid saved API key or prompts user. Tests keys before proceeding
gpt = GPT( # Class that contains GPTs parameters
client=client.client,
model=args.model,
temperature=args.temperature,
top_p=args.top_p,
n=args.n,
frequency_penalty=args.frequency_penalty,
presence_penalty=args.presence_penalty,
max_tokens=args.max_tokens
)
|
HKRSAI = """
1 0 1
1 1 0
0 0 0
hkrsAI.v2
"""
def main():
print(HKRSAI)
args = fetch_args() # command-line arguments
paths = PathFinder(cwd=os.path.dirname(os.path.abspath(__file__)))
parser = InputParser() # Class to parse user input and return Actions.
dispatcher = Dispatcher() # Manages conversation state and turnsActions into functions.
logger = Logger(paths=paths, log_level=args.log_level, log_format=args.log_format)
client = Client(config=paths.config) # OpenAI API client management object
client.initialize() # Checks for valid saved API key or prompts user. Tests keys before proceeding
gpt = GPT( # Class that contains GPTs parameters
client=client.client,
model=args.model,
temperature=args.temperature,
top_p=args.top_p,
n=args.n,
frequency_penalty=args.frequency_penalty,
presence_penalty=args.presence_penalty,
max_tokens=args.max_tokens
)
| system_prompt = SystemPrompt(prompts_dir=paths.prompts, path=args.system_prompt) | 4 | 2023-12-22 07:04:47+00:00 | 12k |
hughxiouge/CompoundE3D | run.py | [
{
"identifier": "KGEModel",
"path": "model.py",
"snippet": "class KGEModel(nn.Module):\n def __init__(self, model_name, nentity, nrelation, hidden_dim, gamma, evaluator,\n double_entity_embedding=False, \n double_relation_embedding=False, triple_relation_embedding=False, quad_relation_embedding=False):\n super(KGEModel, self).__init__()\n self.model_name = model_name\n self.nentity = nentity\n self.nrelation = nrelation\n self.hidden_dim = hidden_dim\n self.epsilon = 2.0\n\n self.gamma = nn.Parameter(\n torch.Tensor([gamma]),\n requires_grad=False\n )\n\n self.embedding_range = nn.Parameter(\n torch.Tensor([(self.gamma.item() + self.epsilon) / hidden_dim]),\n requires_grad=False\n )\n\n self.entity_dim = hidden_dim*2 if double_entity_embedding else hidden_dim\n\n if double_relation_embedding:\n self.relation_dim = hidden_dim*2\n elif triple_relation_embedding:\n self.relation_dim = hidden_dim*3\n elif quad_relation_embedding:\n self.relation_dim = hidden_dim*4\n else:\n self.relation_dim = hidden_dim\n\n self.entity_embedding = nn.Parameter(torch.zeros(nentity, self.entity_dim))\n nn.init.uniform_(\n tensor=self.entity_embedding,\n a=-self.embedding_range.item(),\n b=self.embedding_range.item()\n )\n\n self.relation_embedding = nn.Parameter(torch.zeros(nrelation, self.relation_dim))\n nn.init.uniform_(\n tensor=self.relation_embedding,\n a=-self.embedding_range.item(),\n b=self.embedding_range.item()\n )\n\n #Do not forget to modify this line when you add a new model in the \"forward\" function\n if model_name not in ['TransE', 'DistMult', 'ComplEx', 'RotatE', 'PairRE', 'RotatEv2', 'CompoundE', 'CompoundE3D_Complete_Mix_T_H']:\n raise ValueError('model %s not supported' % model_name)\n\n if model_name == 'RotatE' and (not double_entity_embedding or double_relation_embedding):\n raise ValueError('RotatE should use --double_entity_embedding')\n\n if model_name == 'ComplEx' and (not double_entity_embedding or not double_relation_embedding):\n raise ValueError('ComplEx should use --double_entity_embedding and --double_relation_embedding')\n\n if model_name == 'PairRE' and (not double_relation_embedding):\n raise ValueError('PairRE should use --double_relation_embedding')\n\n if (model_name == 'CompoundE' or model_name == 'CompoundE3D_Complete_Mix_T_H') and (not triple_relation_embedding):\n raise ValueError('CompoundE should use --triple_relation_embedding')\n\n self.evaluator = evaluator\n\n def forward(self, sample, mode='single'):\n '''\n Forward function that calculate the score of a batch of triples.\n In the 'single' mode, sample is a batch of triple.\n In the 'head-batch' or 'tail-batch' mode, sample consists two part.\n The first part is usually the positive sample.\n And the second part is the entities in the negative samples.\n Because negative samples and positive samples usually share two elements\n in their triple ((head, relation) or (relation, tail)).\n '''\n\n if mode == 'single':\n batch_size, negative_sample_size = sample.size(0), 1\n\n head = torch.index_select(\n self.entity_embedding,\n dim=0,\n index=sample[:,0]\n ).unsqueeze(1)\n\n relation = torch.index_select(\n self.relation_embedding,\n dim=0,\n index=sample[:,1]\n ).unsqueeze(1)\n\n tail = torch.index_select(\n self.entity_embedding,\n dim=0,\n index=sample[:,2]\n ).unsqueeze(1)\n\n elif mode == 'head-batch':\n tail_part, head_part = sample\n batch_size, negative_sample_size = head_part.size(0), head_part.size(1)\n\n head = torch.index_select(\n self.entity_embedding,\n dim=0,\n index=head_part.view(-1)\n ).view(batch_size, negative_sample_size, -1)\n\n relation = torch.index_select(\n self.relation_embedding,\n dim=0,\n index=tail_part[:, 1]\n ).unsqueeze(1)\n\n tail = torch.index_select(\n self.entity_embedding,\n dim=0,\n index=tail_part[:, 2]\n ).unsqueeze(1)\n\n elif mode == 'tail-batch':\n head_part, tail_part = sample\n batch_size, negative_sample_size = tail_part.size(0), tail_part.size(1)\n\n head = torch.index_select(\n self.entity_embedding,\n dim=0,\n index=head_part[:, 0]\n ).unsqueeze(1)\n\n relation = torch.index_select(\n self.relation_embedding,\n dim=0,\n index=head_part[:, 1]\n ).unsqueeze(1)\n\n tail = torch.index_select(\n self.entity_embedding,\n dim=0,\n index=tail_part.view(-1)\n ).view(batch_size, negative_sample_size, -1)\n\n else:\n raise ValueError('mode %s not supported' % mode)\n\n model_func = {\n 'TransE': self.TransE,\n 'DistMult': self.DistMult,\n 'ComplEx': self.ComplEx,\n 'RotatE': self.RotatE,\n 'PairRE': self.PairRE,\n 'RotatEv2': self.RotatEv2,\n 'CompoundE': self.CompoundE,\n 'CompoundE3D_Complete_Mix_T_H': self.CompoundE3D_Complete_Mix_T_H\n }\n\n if self.model_name in model_func:\n score = model_func[self.model_name](head, relation, tail, mode)\n else:\n raise ValueError('model %s not supported' % self.model_name)\n\n return score\n\n def TransE(self, head, relation, tail, mode):\n if mode == 'head-batch':\n score = head + (relation - tail)\n else:\n score = (head + relation) - tail\n\n score = self.gamma.item() - torch.norm(score, p=1, dim=2)\n return score\n\n def DistMult(self, head, relation, tail, mode):\n if mode == 'head-batch':\n score = head * (relation * tail)\n else:\n score = (head * relation) * tail\n\n score = score.sum(dim = 2)\n return score\n\n def ComplEx(self, head, relation, tail, mode):\n re_head, im_head = torch.chunk(head, 2, dim=2)\n re_relation, im_relation = torch.chunk(relation, 2, dim=2)\n re_tail, im_tail = torch.chunk(tail, 2, dim=2)\n\n if mode == 'head-batch':\n re_score = re_relation * re_tail + im_relation * im_tail\n im_score = re_relation * im_tail - im_relation * re_tail\n score = re_head * re_score + im_head * im_score\n else:\n re_score = re_head * re_relation - im_head * im_relation\n im_score = re_head * im_relation + im_head * re_relation\n score = re_score * re_tail + im_score * im_tail\n\n score = score.sum(dim = 2)\n return score\n\n def RotatE(self, head, relation, tail, mode):\n pi = 3.14159265358979323846\n\n re_head, im_head = torch.chunk(head, 2, dim=2)\n re_tail, im_tail = torch.chunk(tail, 2, dim=2)\n\n #Make phases of relations uniformly distributed in [-pi, pi]\n\n phase_relation = relation/(self.embedding_range.item()/pi)\n\n re_relation = torch.cos(phase_relation)\n im_relation = torch.sin(phase_relation)\n\n if mode == 'head-batch':\n re_score = re_relation * re_tail + im_relation * im_tail\n im_score = re_relation * im_tail - im_relation * re_tail\n re_score = re_score - re_head\n im_score = im_score - im_head\n else:\n re_score = re_head * re_relation - im_head * im_relation\n im_score = re_head * im_relation + im_head * re_relation\n re_score = re_score - re_tail\n im_score = im_score - im_tail\n\n score = torch.stack([re_score, im_score], dim = 0)\n score = score.norm(dim = 0)\n\n score = self.gamma.item() - score.sum(dim = 2)\n return score\n\n def RotatEv2(self, head, relation, tail, mode, r_norm=None):\n pi = 3.14159265358979323846\n\n re_head, im_head = torch.chunk(head, 2, dim=2)\n re_tail, im_tail = torch.chunk(tail, 2, dim=2)\n\n #Make phases of relations uniformly distributed in [-pi, pi]\n phase_relation = relation/(self.embedding_range.item()/pi)\n\n re_relation = torch.cos(phase_relation)\n im_relation = torch.sin(phase_relation)\n\n re_relation_head, re_relation_tail = torch.chunk(re_relation, 2, dim=2)\n im_relation_head, im_relation_tail = torch.chunk(im_relation, 2, dim=2)\n\n re_score_head = re_head * re_relation_head - im_head * im_relation_head\n im_score_head = re_head * im_relation_head + im_head * re_relation_head\n\n re_score_tail = re_tail * re_relation_tail - im_tail * im_relation_tail\n im_score_tail = re_tail * im_relation_tail + im_tail * re_relation_tail\n\n re_score = re_score_head - re_score_tail\n im_score = im_score_head - im_score_tail\n\n score = torch.stack([re_score, im_score], dim = 0)\n score = score.norm(dim = 0)\n\n score = self.gamma.item() - score.sum(dim = 2)\n return score\n\n def PairRE(self, head, relation, tail, mode):\n re_head, re_tail = torch.chunk(relation, 2, dim=2)\n\n head = F.normalize(head, 2, -1)\n tail = F.normalize(tail, 2, -1)\n\n score = head * re_head - tail * re_tail\n score = self.gamma.item() - torch.norm(score, p=1, dim=2)\n return score\n\n def CompoundE(self, head, relation, tail, mode):\n tail_scale, tail_translate, theta = torch.chunk(relation, 3, dim=2)\n theta, _ = torch.chunk(theta, 2, dim=2)\n\n head = F.normalize(head, 2, -1)\n tail = F.normalize(tail, 2, -1)\n \n pi = 3.14159265358979323846\n\n theta = theta/(self.embedding_range.item()/pi)\n\n re_rotation = torch.cos(theta)\n im_rotation = torch.sin(theta)\n\n re_rotation = re_rotation.unsqueeze(-1)\n im_rotation = im_rotation.unsqueeze(-1)\n\n tail = tail.view((tail.shape[0], tail.shape[1], -1, 2))\n\n tail_r = torch.cat((re_rotation * tail[:, :, :, 0:1], im_rotation * tail[:, :, :, 0:1]), dim=-1)\n tail_r += torch.cat((-im_rotation * tail[:, :, :, 1:], re_rotation * tail[:, :, :, 1:]), dim=-1)\n\n tail_r = tail_r.view((tail_r.shape[0], tail_r.shape[1], -1))\n\n tail_r += tail_translate\n tail_r *= tail_scale\n\n score = head - tail_r\n score = self.gamma.item() - torch.norm(score, p=1, dim=2)\n return score\n\n def CompoundE3D_Complete_Mix_T_H(self, head, relation, tail, mode):\n head_translate, shear_tail_a, shear_tail_b = torch.chunk(relation, 3, dim=2)\n\n shear_tail_a = shear_tail_a.unsqueeze(-1)\n shear_tail_b = shear_tail_b.unsqueeze(-1)\n\n sh_y_x, sh_z_x, sh_x_y = torch.chunk(shear_tail_a, 3, dim=2)\n sh_z_y, sh_x_z, sh_y_z = torch.chunk(shear_tail_b, 3, dim=2)\n\n tail = tail.view((tail.shape[0], tail.shape[1], -1, 3))\n\n tail_r = torch.cat((tail[:, :, :, 0:1], sh_x_y * tail[:, :, :, 0:1], sh_x_z * tail[:, :, :, 0:1]), dim=-1)\n tail_r += torch.cat((sh_y_x * tail[:, :, :, 1:2], tail[:, :, :, 1:2], sh_y_z * tail[:, :, :, 1:2]), dim=-1)\n tail_r += torch.cat((sh_z_x * tail[:, :, :, 2:], sh_z_y * tail[:, :, :, 2:], tail[:, :, :, 2:]), dim=-1)\n\n tail_r = tail_r.view((tail_r.shape[0], tail_r.shape[1], -1))\n\n score = head + head_translate - tail_r\n score = self.gamma.item() - torch.norm(score, p=1, dim=2)\n return score\n \n @staticmethod\n def train_step(model, optimizer, train_iterator, args):\n '''\n A single train step. Apply back-propation and return the loss\n '''\n\n model.train()\n optimizer.zero_grad()\n positive_sample, negative_sample, subsampling_weight, mode = next(train_iterator)\n\n if args.cuda:\n positive_sample = positive_sample.cuda()\n negative_sample = negative_sample.cuda()\n subsampling_weight = subsampling_weight.cuda()\n\n negative_score = model((positive_sample, negative_sample), mode=mode)\n if args.negative_adversarial_sampling:\n #In self-adversarial sampling, we do not apply back-propagation on the sampling weight\n negative_score = (F.softmax(negative_score * args.adversarial_temperature, dim = 1).detach()\n * F.logsigmoid(-negative_score)).sum(dim = 1)\n else:\n negative_score = F.logsigmoid(-negative_score).mean(dim = 1)\n\n positive_score = model(positive_sample)\n positive_score = F.logsigmoid(positive_score).squeeze(dim = 1)\n\n if args.uni_weight:\n positive_sample_loss = - positive_score.mean()\n negative_sample_loss = - negative_score.mean()\n else:\n positive_sample_loss = - (subsampling_weight * positive_score).sum()/subsampling_weight.sum()\n negative_sample_loss = - (subsampling_weight * negative_score).sum()/subsampling_weight.sum()\n\n loss = (positive_sample_loss + negative_sample_loss)/2\n\n if args.regularization != 0.0:\n #Use L3 regularization for ComplEx and DistMult\n regularization = args.regularization * (\n model.entity_embedding.norm(p = 3)**3 +\n model.relation_embedding.norm(p = 3).norm(p = 3)**3\n )\n loss = loss + regularization\n regularization_log = {'regularization': regularization.item()}\n else:\n regularization_log = {}\n\n loss.backward()\n\n optimizer.step()\n\n log = {\n **regularization_log,\n 'positive_sample_loss': positive_sample_loss.item(),\n 'negative_sample_loss': negative_sample_loss.item(),\n 'loss': loss.item()\n }\n\n return log\n\n @staticmethod\n def test_step(model, test_triples, args, random_sampling=False):\n '''\n Evaluate the model on test or valid datasets\n '''\n\n model.eval()\n\n #Prepare dataloader for evaluation\n test_dataloader_head = DataLoader(\n TestDataset(\n test_triples,\n args,\n 'head-batch',\n random_sampling\n ),\n batch_size=args.test_batch_size,\n num_workers=max(1, args.cpu_num//2),\n collate_fn=TestDataset.collate_fn\n )\n\n test_dataloader_tail = DataLoader(\n TestDataset(\n test_triples,\n args,\n 'tail-batch',\n random_sampling\n ),\n batch_size=args.test_batch_size,\n num_workers=max(1, args.cpu_num//2),\n collate_fn=TestDataset.collate_fn\n )\n\n test_dataset_list = [test_dataloader_head, test_dataloader_tail]\n\n test_logs = defaultdict(list)\n\n step = 0\n total_steps = sum([len(dataset) for dataset in test_dataset_list])\n\n with torch.no_grad():\n t1 = datetime.datetime.now().microsecond\n t3 = time.mktime(datetime.datetime.now().timetuple())\n for test_dataset in test_dataset_list:\n for positive_sample, negative_sample, mode in test_dataset:\n if args.cuda:\n positive_sample = positive_sample.cuda()\n negative_sample = negative_sample.cuda()\n\n batch_size = positive_sample.size(0)\n score = model((positive_sample, negative_sample), mode)\n\n batch_results = model.evaluator.eval({'y_pred_pos': score[:, 0],\n 'y_pred_neg': score[:, 1:]})\n for metric in batch_results:\n test_logs[metric].append(batch_results[metric])\n\n if step % args.test_log_steps == 0:\n logging.info('Evaluating the model... (%d/%d)' % (step, total_steps))\n\n step += 1\n\n t2 = datetime.datetime.now().microsecond\n t4 = time.mktime(datetime.datetime.now().timetuple())\n strTime = 'funtion time use:%dms' % ((t4 - t3) * 1000 + (t2 - t1) / 1000)\n print (strTime)\n\n metrics = {}\n for metric in test_logs:\n metrics[metric] = torch.cat(test_logs[metric]).mean().item()\n\n return metrics"
},
{
"identifier": "TrainDataset",
"path": "dataloader.py",
"snippet": "class TrainDataset(Dataset):\n def __init__(self, triples, nentity, nrelation, negative_sample_size, mode, count, true_head, true_tail):\n self.len = len(triples['head'])\n self.triples = triples\n self.nentity = nentity\n self.nrelation = nrelation\n self.negative_sample_size = negative_sample_size\n self.mode = mode\n self.count = count\n self.true_head = true_head\n self.true_tail = true_tail\n \n def __len__(self):\n return self.len\n \n def __getitem__(self, idx):\n head, relation, tail = self.triples['head'][idx], self.triples['relation'][idx], self.triples['tail'][idx]\n positive_sample = [head, relation, tail]\n\n subsampling_weight = self.count[(head, relation)] + self.count[(tail, -relation-1)]\n subsampling_weight = torch.sqrt(1 / torch.Tensor([subsampling_weight]))\n \n negative_sample = torch.randint(0, self.nentity, (self.negative_sample_size,))\n positive_sample = torch.LongTensor(positive_sample)\n \n return positive_sample, negative_sample, subsampling_weight, self.mode\n \n @staticmethod\n def collate_fn(data):\n positive_sample = torch.stack([_[0] for _ in data], dim=0)\n negative_sample = torch.stack([_[1] for _ in data], dim=0)\n subsample_weight = torch.cat([_[2] for _ in data], dim=0)\n mode = data[0][3]\n return positive_sample, negative_sample, subsample_weight, mode"
},
{
"identifier": "BidirectionalOneShotIterator",
"path": "dataloader.py",
"snippet": "class BidirectionalOneShotIterator(object):\n def __init__(self, dataloader_head, dataloader_tail):\n self.iterator_head = self.one_shot_iterator(dataloader_head)\n self.iterator_tail = self.one_shot_iterator(dataloader_tail)\n self.step = 0\n \n def __next__(self):\n self.step += 1\n if self.step % 2 == 0:\n data = next(self.iterator_head)\n else:\n data = next(self.iterator_tail)\n return data\n \n @staticmethod\n def one_shot_iterator(dataloader):\n '''\n Transform a PyTorch Dataloader into python iterator\n '''\n while True:\n for data in dataloader:\n yield data"
}
] | import argparse
import json
import logging
import os
import random
import numpy as np
import torch
import time
import os.path as osp
from torch.utils.data import DataLoader
from model import KGEModel
from dataloader import TrainDataset
from dataloader import BidirectionalOneShotIterator
from ogb.linkproppred import LinkPropPredDataset, Evaluator
from collections import defaultdict
from tqdm import tqdm
from tensorboardX import SummaryWriter | 7,574 | logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='w'
)
if args.print_on_screen:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def log_metrics(mode, step, metrics, writer):
'''
Print the evaluation logs
'''
for metric in metrics:
logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))
writer.add_scalar("_".join([mode, metric]), metrics[metric], step)
def main(args):
if (not args.do_train) and (not args.do_valid) and (not args.do_test) and (not args.evaluate_train):
raise ValueError('one of train/val/test mode must be choosed.')
if args.init_checkpoint:
override_config(args)
args.save_path = 'log/%s/%s/%s-%s/%s'%(args.dataset, args.model, args.hidden_dim, args.gamma, time.time()) if args.save_path == None else args.save_path
writer = SummaryWriter(args.save_path)
# Write logs to checkpoint and console
set_logger(args)
dataset = LinkPropPredDataset(name = args.dataset)
split_dict = dataset.get_edge_split()
nentity = dataset.graph['num_nodes']
nrelation = int(max(dataset.graph['edge_reltype'])[0])+1
evaluator = Evaluator(name = args.dataset)
args.nentity = nentity
args.nrelation = nrelation
logging.info('Model: %s' % args.model)
logging.info('Dataset: %s' % args.dataset)
logging.info('#entity: %d' % nentity)
logging.info('#relation: %d' % nrelation)
train_triples = split_dict['train']
logging.info('#train: %d' % len(train_triples['head']))
valid_triples = split_dict['valid']
logging.info('#valid: %d' % len(valid_triples['head']))
test_triples = split_dict['test']
logging.info('#test: %d' % len(test_triples['head']))
logging.info('relation type %s' % args.relation_type)
print('relation type %s' % args.relation_type)
test_set_file = ''
if args.relation_type == '1-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-1.pt'
elif args.relation_type == '1-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-n.pt'
elif args.relation_type == 'n-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-1.pt'
elif args.relation_type == 'n-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-n.pt'
if test_set_file != '':
if osp.exists(test_set_pre_processed):
test_triples = torch.load(test_set_pre_processed, 'rb')
print("load pre processed test set")
else:
test_triples_new = {}
test_triples_chosen = []
test_triples_new['head'] = []
test_triples_new['relation'] = []
test_triples_new['tail'] = []
test_triples_new['head_neg'] = []
test_triples_new['tail_neg'] = []
f_test = open(test_set_file, "r")
for line in f_test:
h, r, t = line.strip().split('\t')
h, r, t = int(h), int(r), int(t)
test_triples_chosen.append((h, r, t))
f_test.close()
for idx in range(len(test_triples['head'])):
h, r, t = test_triples['head'][idx], test_triples['relation'][idx], test_triples['tail'][idx]
if (h, r, t) in test_triples_chosen:
test_triples_new['head'].append(h)
test_triples_new['relation'].append(r)
test_triples_new['tail'].append(t)
test_triples_new['head_neg'].append(test_triples['head_neg'][idx])
test_triples_new['tail_neg'].append(test_triples['tail_neg'][idx])
print('Saving ...')
torch.save(test_triples_new, test_set_pre_processed, pickle_protocol=4)
test_triples = test_triples_new
logging.info('#test: %d' % len(test_triples['head']))
train_count, train_true_head, train_true_tail = defaultdict(lambda: 4), defaultdict(list), defaultdict(list)
f_train = open("train.txt", "w")
for i in tqdm(range(len(train_triples['head']))):
head, relation, tail = train_triples['head'][i], train_triples['relation'][i], train_triples['tail'][i]
train_count[(head, relation)] += 1
train_count[(tail, -relation-1)] += 1
train_true_head[(relation, tail)].append(head)
train_true_tail[(head, relation)].append(tail)
f_train.write("\t".join([str(head), str(relation), str(tail)]) + '\n')
f_train.close()
| #!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Training and Testing Knowledge Graph Embedding Models',
usage='train.py [<args>] [-h | --help]'
)
parser.add_argument('--cuda', action='store_true', help='use GPU')
parser.add_argument('--do_train', action='store_true')
parser.add_argument('--do_valid', action='store_true')
parser.add_argument('--do_test', action='store_true')
parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data')
parser.add_argument('--dataset', type=str, default='ogbl-wikikg2', help='dataset name, default to wikikg')
parser.add_argument('--model', default='TransE', type=str)
parser.add_argument('-de', '--double_entity_embedding', action='store_true')
parser.add_argument('-dr', '--double_relation_embedding', action='store_true')
parser.add_argument('-tr', '--triple_relation_embedding', action='store_true')
parser.add_argument('-qr', '--quad_relation_embedding', action='store_true')
parser.add_argument('-n', '--negative_sample_size', default=128, type=int)
parser.add_argument('-d', '--hidden_dim', default=500, type=int)
parser.add_argument('-g', '--gamma', default=12.0, type=float)
parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true')
parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)
parser.add_argument('-b', '--batch_size', default=1024, type=int)
parser.add_argument('-r', '--regularization', default=0.0, type=float)
parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size')
parser.add_argument('--uni_weight', action='store_true',
help='Otherwise use subsampling weighting like in word2vec')
parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float)
parser.add_argument('-cpu', '--cpu_num', default=10, type=int)
parser.add_argument('-init', '--init_checkpoint', default=None, type=str)
parser.add_argument('-save', '--save_path', default=None, type=str)
parser.add_argument('--max_steps', default=100000, type=int)
parser.add_argument('--warm_up_steps', default=None, type=int)
parser.add_argument('--save_checkpoint_steps', default=10000, type=int)
parser.add_argument('--valid_steps', default=10000, type=int)
parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps')
parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps')
parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--print_on_screen', action='store_true', help='log on screen or not')
parser.add_argument('--ntriples_eval_train', type=int, default=200000, help='number of training triples to evaluate eventually')
parser.add_argument('--neg_size_eval_train', type=int, default=500, help='number of negative samples when evaluating training triples')
parser.add_argument('--relation_type', type=str, default='all', help='1-1, 1-n, n-1, n-n')
return parser.parse_args(args)
def override_config(args):
'''
Override model and data configuration
'''
with open(os.path.join(args.init_checkpoint, 'config.json'), 'r') as fjson:
argparse_dict = json.load(fjson)
args.dataset = argparse_dict['dataset']
args.model = argparse_dict['model']
args.double_entity_embedding = argparse_dict['double_entity_embedding']
args.double_relation_embedding = argparse_dict['double_relation_embedding']
args.triple_relation_embedding = argparse_dict['triple_relation_embedding']
args.quad_relation_embedding = argparse_dict['quad_relation_embedding']
args.hidden_dim = argparse_dict['hidden_dim']
args.test_batch_size = argparse_dict['test_batch_size']
def save_model(model, optimizer, save_variable_list, args):
'''
Save the parameters of the model and the optimizer,
as well as some other variables such as step and learning_rate
'''
argparse_dict = vars(args)
with open(os.path.join(args.save_path, 'config.json'), 'w') as fjson:
json.dump(argparse_dict, fjson)
entity_embedding = model.entity_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'entity_embedding'),
entity_embedding
)
relation_embedding = model.relation_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'relation_embedding'),
relation_embedding
)
def set_logger(args):
'''
Write logs to checkpoint and console
'''
if args.do_train:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'train.log')
else:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'test.log')
print(log_file)
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='w'
)
if args.print_on_screen:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def log_metrics(mode, step, metrics, writer):
'''
Print the evaluation logs
'''
for metric in metrics:
logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))
writer.add_scalar("_".join([mode, metric]), metrics[metric], step)
def main(args):
if (not args.do_train) and (not args.do_valid) and (not args.do_test) and (not args.evaluate_train):
raise ValueError('one of train/val/test mode must be choosed.')
if args.init_checkpoint:
override_config(args)
args.save_path = 'log/%s/%s/%s-%s/%s'%(args.dataset, args.model, args.hidden_dim, args.gamma, time.time()) if args.save_path == None else args.save_path
writer = SummaryWriter(args.save_path)
# Write logs to checkpoint and console
set_logger(args)
dataset = LinkPropPredDataset(name = args.dataset)
split_dict = dataset.get_edge_split()
nentity = dataset.graph['num_nodes']
nrelation = int(max(dataset.graph['edge_reltype'])[0])+1
evaluator = Evaluator(name = args.dataset)
args.nentity = nentity
args.nrelation = nrelation
logging.info('Model: %s' % args.model)
logging.info('Dataset: %s' % args.dataset)
logging.info('#entity: %d' % nentity)
logging.info('#relation: %d' % nrelation)
train_triples = split_dict['train']
logging.info('#train: %d' % len(train_triples['head']))
valid_triples = split_dict['valid']
logging.info('#valid: %d' % len(valid_triples['head']))
test_triples = split_dict['test']
logging.info('#test: %d' % len(test_triples['head']))
logging.info('relation type %s' % args.relation_type)
print('relation type %s' % args.relation_type)
test_set_file = ''
if args.relation_type == '1-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-1.pt'
elif args.relation_type == '1-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-n.pt'
elif args.relation_type == 'n-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-1.pt'
elif args.relation_type == 'n-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-n.pt'
if test_set_file != '':
if osp.exists(test_set_pre_processed):
test_triples = torch.load(test_set_pre_processed, 'rb')
print("load pre processed test set")
else:
test_triples_new = {}
test_triples_chosen = []
test_triples_new['head'] = []
test_triples_new['relation'] = []
test_triples_new['tail'] = []
test_triples_new['head_neg'] = []
test_triples_new['tail_neg'] = []
f_test = open(test_set_file, "r")
for line in f_test:
h, r, t = line.strip().split('\t')
h, r, t = int(h), int(r), int(t)
test_triples_chosen.append((h, r, t))
f_test.close()
for idx in range(len(test_triples['head'])):
h, r, t = test_triples['head'][idx], test_triples['relation'][idx], test_triples['tail'][idx]
if (h, r, t) in test_triples_chosen:
test_triples_new['head'].append(h)
test_triples_new['relation'].append(r)
test_triples_new['tail'].append(t)
test_triples_new['head_neg'].append(test_triples['head_neg'][idx])
test_triples_new['tail_neg'].append(test_triples['tail_neg'][idx])
print('Saving ...')
torch.save(test_triples_new, test_set_pre_processed, pickle_protocol=4)
test_triples = test_triples_new
logging.info('#test: %d' % len(test_triples['head']))
train_count, train_true_head, train_true_tail = defaultdict(lambda: 4), defaultdict(list), defaultdict(list)
f_train = open("train.txt", "w")
for i in tqdm(range(len(train_triples['head']))):
head, relation, tail = train_triples['head'][i], train_triples['relation'][i], train_triples['tail'][i]
train_count[(head, relation)] += 1
train_count[(tail, -relation-1)] += 1
train_true_head[(relation, tail)].append(head)
train_true_tail[(head, relation)].append(tail)
f_train.write("\t".join([str(head), str(relation), str(tail)]) + '\n')
f_train.close()
| kge_model = KGEModel( | 0 | 2023-12-29 22:57:53+00:00 | 12k |
daswer123/rvc-python | rvc_python/modules/vc/modules.py | [
{
"identifier": "load_audio",
"path": "rvc_python/lib/audio.py",
"snippet": "def load_audio(file, sr):\n file = (\n file.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n ) # 防止小白拷路径头尾带了空格和\"和回车\n if os.path.exists(file) == False:\n raise RuntimeError(\n \"You input a wrong audio path that does not exists, please fix it!\"\n )\n try:\n with open(file, \"rb\") as f:\n with BytesIO() as out:\n audio2(f, out, \"f32le\", sr)\n return np.frombuffer(out.getvalue(), np.float32).flatten()\n\n except AttributeError:\n audio = file[1] / 32768.0\n if len(audio.shape) == 2:\n audio = np.mean(audio, -1)\n return librosa.resample(audio, orig_sr=file[0], target_sr=16000)\n\n except:\n raise RuntimeError(traceback.format_exc())"
},
{
"identifier": "wav2",
"path": "rvc_python/lib/audio.py",
"snippet": "def wav2(i, o, format):\n inp = av.open(i, \"rb\")\n if format == \"m4a\":\n format = \"mp4\"\n out = av.open(o, \"wb\", format=format)\n if format == \"ogg\":\n format = \"libvorbis\"\n if format == \"mp4\":\n format = \"aac\"\n\n ostream = out.add_stream(format)\n\n for frame in inp.decode(audio=0):\n for p in ostream.encode(frame):\n out.mux(p)\n\n for p in ostream.encode(None):\n out.mux(p)\n\n out.close()\n inp.close()"
},
{
"identifier": "SynthesizerTrnMs256NSFsid",
"path": "rvc_python/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs256NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate:\n head = int(z_p.shape[2] * rate)\n z_p = z_p[:, :, -head:]\n x_mask = x_mask[:, :, -head:]\n nsff0 = nsff0[:, -head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "SynthesizerTrnMs256NSFsid_nono",
"path": "rvc_python/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs256NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, sid, rate=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate:\n head = int(z_p.shape[2] * rate)\n z_p = z_p[:, :, -head:]\n x_mask = x_mask[:, :, -head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "SynthesizerTrnMs768NSFsid",
"path": "rvc_python/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs768NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate:\n head = int(z_p.shape[2] * rate)\n z_p = z_p[:, :, -head:]\n x_mask = x_mask[:, :, -head:]\n nsff0 = nsff0[:, -head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "SynthesizerTrnMs768NSFsid_nono",
"path": "rvc_python/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs768NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, sid, rate=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate:\n head = int(z_p.shape[2] * rate)\n z_p = z_p[:, :, -head:]\n x_mask = x_mask[:, :, -head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "Pipeline",
"path": "rvc_python/modules/vc/pipeline.py",
"snippet": "class Pipeline(object):\n def __init__(self, tgt_sr, config, lib_dir):\n self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (\n config.x_pad,\n config.x_query,\n config.x_center,\n config.x_max,\n config.is_half,\n )\n self.sr = 16000 # hubert输入采样率\n self.window = 160 # 每帧点数\n self.t_pad = self.sr * self.x_pad # 每条前后pad时间\n self.t_pad_tgt = tgt_sr * self.x_pad\n self.t_pad2 = self.t_pad * 2\n self.t_query = self.sr * self.x_query # 查询切点前后查询时间\n self.t_center = self.sr * self.x_center # 查询切点位置\n self.t_max = self.sr * self.x_max # 免查询时长阈值\n self.device = config.device\n self.lib_dir = lib_dir\n\n def get_f0(\n self,\n input_audio_path,\n x,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n inp_f0=None,\n ):\n global input_audio_path2wav\n time_step = self.window / self.sr * 1000\n f0_min = 50\n f0_max = 1100\n f0_mel_min = 1127 * np.log(1 + f0_min / 700)\n f0_mel_max = 1127 * np.log(1 + f0_max / 700)\n if f0_method == \"pm\":\n f0 = (\n parselmouth.Sound(x, self.sr)\n .to_pitch_ac(\n time_step=time_step / 1000,\n voicing_threshold=0.6,\n pitch_floor=f0_min,\n pitch_ceiling=f0_max,\n )\n .selected_array[\"frequency\"]\n )\n pad_size = (p_len - len(f0) + 1) // 2\n if pad_size > 0 or p_len - len(f0) - pad_size > 0:\n f0 = np.pad(\n f0, [[pad_size, p_len - len(f0) - pad_size]], mode=\"constant\"\n )\n elif f0_method == \"harvest\":\n input_audio_path2wav[input_audio_path] = x.astype(np.double)\n f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)\n if filter_radius > 2:\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"crepe\":\n model = \"full\"\n # Pick a batch size that doesn't cause memory errors on your gpu\n batch_size = 512\n # Compute pitch using first gpu\n audio = torch.tensor(np.copy(x))[None].float()\n f0, pd = torchcrepe.predict(\n audio,\n self.sr,\n self.window,\n f0_min,\n f0_max,\n model,\n batch_size=batch_size,\n device=self.device,\n return_periodicity=True,\n )\n pd = torchcrepe.filter.median(pd, 3)\n f0 = torchcrepe.filter.mean(f0, 3)\n f0[pd < 0.1] = 0\n f0 = f0[0].cpu().numpy()\n elif f0_method == \"rmvpe\":\n if not hasattr(self, \"model_rmvpe\"):\n from rvc_python.lib.rmvpe import RMVPE\n\n logger.info(\n \"Loading rmvpe model - base_models/rmvpe.pth\"\n )\n rmvpe_path = Path(f\"{self.lib_dir}\\\\base_model\\\\rmvpe.pt\")\n self.model_rmvpe = RMVPE(\n rmvpe_path,\n is_half=self.is_half,\n device=self.device,\n )\n f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)\n\n if \"privateuseone\" in str(self.device): # clean ortruntime memory\n del self.model_rmvpe.model\n del self.model_rmvpe\n logger.info(\"Cleaning ortruntime memory\")\n\n f0 *= pow(2, f0_up_key / 12)\n # with open(\"test.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n tf0 = self.sr // self.window # 每秒f0点数\n if inp_f0 is not None:\n delta_t = np.round(\n (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1\n ).astype(\"int16\")\n replace_f0 = np.interp(\n list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]\n )\n shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]\n f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[\n :shape\n ]\n # with open(\"test_opt.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n f0bak = f0.copy()\n f0_mel = 1127 * np.log(1 + f0 / 700)\n f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (\n f0_mel_max - f0_mel_min\n ) + 1\n f0_mel[f0_mel <= 1] = 1\n f0_mel[f0_mel > 255] = 255\n f0_coarse = np.rint(f0_mel).astype(np.int32)\n return f0_coarse, f0bak # 1-0\n\n def vc(\n self,\n model,\n net_g,\n sid,\n audio0,\n pitch,\n pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n ): # ,file_index,file_big_npy\n feats = torch.from_numpy(audio0)\n if self.is_half:\n feats = feats.half()\n else:\n feats = feats.float()\n if feats.dim() == 2: # double channels\n feats = feats.mean(-1)\n assert feats.dim() == 1, feats.dim()\n feats = feats.view(1, -1)\n padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)\n\n inputs = {\n \"source\": feats.to(self.device),\n \"padding_mask\": padding_mask,\n \"output_layer\": 9 if version == \"v1\" else 12,\n }\n t0 = ttime()\n with torch.no_grad():\n logits = model.extract_features(**inputs)\n feats = model.final_proj(logits[0]) if version == \"v1\" else logits[0]\n if protect < 0.5 and pitch is not None and pitchf is not None:\n feats0 = feats.clone()\n if (\n not isinstance(index, type(None))\n and not isinstance(big_npy, type(None))\n and index_rate != 0\n ):\n npy = feats[0].cpu().numpy()\n if self.is_half:\n npy = npy.astype(\"float32\")\n\n # _, I = index.search(npy, 1)\n # npy = big_npy[I.squeeze()]\n\n score, ix = index.search(npy, k=8)\n weight = np.square(1 / score)\n weight /= weight.sum(axis=1, keepdims=True)\n npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)\n\n if self.is_half:\n npy = npy.astype(\"float16\")\n feats = (\n torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate\n + (1 - index_rate) * feats\n )\n\n feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)\n if protect < 0.5 and pitch is not None and pitchf is not None:\n feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(\n 0, 2, 1\n )\n t1 = ttime()\n p_len = audio0.shape[0] // self.window\n if feats.shape[1] < p_len:\n p_len = feats.shape[1]\n if pitch is not None and pitchf is not None:\n pitch = pitch[:, :p_len]\n pitchf = pitchf[:, :p_len]\n\n if protect < 0.5 and pitch is not None and pitchf is not None:\n pitchff = pitchf.clone()\n pitchff[pitchf > 0] = 1\n pitchff[pitchf < 1] = protect\n pitchff = pitchff.unsqueeze(-1)\n feats = feats * pitchff + feats0 * (1 - pitchff)\n feats = feats.to(feats0.dtype)\n p_len = torch.tensor([p_len], device=self.device).long()\n with torch.no_grad():\n hasp = pitch is not None and pitchf is not None\n arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid)\n audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy()\n del hasp, arg\n del feats, p_len, padding_mask\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n t2 = ttime()\n times[0] += t1 - t0\n times[2] += t2 - t1\n return audio1\n\n def pipeline(\n self,\n model,\n net_g,\n sid,\n audio,\n input_audio_path,\n times,\n f0_up_key,\n f0_method,\n file_index,\n index_rate,\n if_f0,\n filter_radius,\n tgt_sr,\n resample_sr,\n rms_mix_rate,\n version,\n protect,\n f0_file=None,\n ):\n if (\n file_index != \"\"\n # and file_big_npy != \"\"\n # and os.path.exists(file_big_npy) == True\n and os.path.exists(file_index)\n and index_rate != 0\n ):\n try:\n index = faiss.read_index(file_index)\n # big_npy = np.load(file_big_npy)\n big_npy = index.reconstruct_n(0, index.ntotal)\n except:\n traceback.print_exc()\n index = big_npy = None\n else:\n index = big_npy = None\n audio = signal.filtfilt(bh, ah, audio)\n audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode=\"reflect\")\n opt_ts = []\n if audio_pad.shape[0] > self.t_max:\n audio_sum = np.zeros_like(audio)\n for i in range(self.window):\n audio_sum += np.abs(audio_pad[i : i - self.window])\n for t in range(self.t_center, audio.shape[0], self.t_center):\n opt_ts.append(\n t\n - self.t_query\n + np.where(\n audio_sum[t - self.t_query : t + self.t_query]\n == audio_sum[t - self.t_query : t + self.t_query].min()\n )[0][0]\n )\n s = 0\n audio_opt = []\n t = None\n t1 = ttime()\n audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode=\"reflect\")\n p_len = audio_pad.shape[0] // self.window\n inp_f0 = None\n if hasattr(f0_file, \"name\"):\n try:\n with open(f0_file.name, \"r\") as f:\n lines = f.read().strip(\"\\n\").split(\"\\n\")\n inp_f0 = []\n for line in lines:\n inp_f0.append([float(i) for i in line.split(\",\")])\n inp_f0 = np.array(inp_f0, dtype=\"float32\")\n except:\n traceback.print_exc()\n # print(sid)\n # sid = os.path.abspath(sid)\n sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()\n pitch, pitchf = None, None\n if if_f0 == 1:\n pitch, pitchf = self.get_f0(\n input_audio_path,\n audio_pad,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n inp_f0,\n )\n pitch = pitch[:p_len]\n pitchf = pitchf[:p_len]\n if \"mps\" not in str(self.device) or \"xpu\" not in str(self.device):\n pitchf = pitchf.astype(np.float32)\n pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()\n pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()\n t2 = ttime()\n times[1] += t2 - t1\n for t in opt_ts:\n t = t // self.window * self.window\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n pitch[:, s // self.window : (t + self.t_pad2) // self.window],\n pitchf[:, s // self.window : (t + self.t_pad2) // self.window],\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n s = t\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n pitch[:, t // self.window :] if t is not None else pitch,\n pitchf[:, t // self.window :] if t is not None else pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n audio_opt = np.concatenate(audio_opt)\n if rms_mix_rate != 1:\n audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)\n if tgt_sr != resample_sr >= 16000:\n audio_opt = librosa.resample(\n audio_opt, orig_sr=tgt_sr, target_sr=resample_sr\n )\n audio_max = np.abs(audio_opt).max() / 0.99\n max_int16 = 32768\n if audio_max > 1:\n max_int16 /= audio_max\n audio_opt = (audio_opt * max_int16).astype(np.int16)\n del pitch, pitchf, sid\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return audio_opt"
}
] | import traceback
import logging
import numpy as np
import soundfile as sf
import torch
from io import BytesIO
from rvc_python.lib.audio import load_audio, wav2
from rvc_python.lib.infer_pack.models import (
SynthesizerTrnMs256NSFsid,
SynthesizerTrnMs256NSFsid_nono,
SynthesizerTrnMs768NSFsid,
SynthesizerTrnMs768NSFsid_nono,
)
from rvc_python.modules.vc.pipeline import Pipeline
from rvc_python.modules.vc.utils import * | 9,413 |
logger = logging.getLogger(__name__)
class VC:
def __init__(self, lib_dir, config):
self.lib_dir = lib_dir
self.n_spk = None
self.tgt_sr = None
self.net_g = None
self.pipeline = None
self.cpt = None
self.version = None
self.if_f0 = None
self.version = None
self.hubert_model = None
self.config = config
def get_vc(self,sid,version = "v2", *to_return_protect):
# logger.info("Get sid: " + sid)
to_return_protect0 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[0]
if self.if_f0 != 0 and to_return_protect
else 0.5,
"__type__": "update",
}
to_return_protect1 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[1]
if self.if_f0 != 0 and to_return_protect
else 0.33,
"__type__": "update",
}
if sid == "" or sid == []:
if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
logger.info("Clean model cache")
del (self.net_g, self.n_spk, self.hubert_model, self.tgt_sr) # ,cpt
self.hubert_model = (
self.net_g
) = self.n_spk = self.hubert_model = self.tgt_sr = None
if torch.cuda.is_available():
torch.cuda.empty_cache()
###楼下不这么折腾清理不干净
self.if_f0 = self.cpt.get("f0", 1)
self.version = self.cpt.get("version", "v1")
if self.version == "v1":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs256NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"])
elif self.version == "v2":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs768NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
|
logger = logging.getLogger(__name__)
class VC:
def __init__(self, lib_dir, config):
self.lib_dir = lib_dir
self.n_spk = None
self.tgt_sr = None
self.net_g = None
self.pipeline = None
self.cpt = None
self.version = None
self.if_f0 = None
self.version = None
self.hubert_model = None
self.config = config
def get_vc(self,sid,version = "v2", *to_return_protect):
# logger.info("Get sid: " + sid)
to_return_protect0 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[0]
if self.if_f0 != 0 and to_return_protect
else 0.5,
"__type__": "update",
}
to_return_protect1 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[1]
if self.if_f0 != 0 and to_return_protect
else 0.33,
"__type__": "update",
}
if sid == "" or sid == []:
if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
logger.info("Clean model cache")
del (self.net_g, self.n_spk, self.hubert_model, self.tgt_sr) # ,cpt
self.hubert_model = (
self.net_g
) = self.n_spk = self.hubert_model = self.tgt_sr = None
if torch.cuda.is_available():
torch.cuda.empty_cache()
###楼下不这么折腾清理不干净
self.if_f0 = self.cpt.get("f0", 1)
self.version = self.cpt.get("version", "v1")
if self.version == "v1":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs256NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"])
elif self.version == "v2":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs768NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else: | self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"]) | 5 | 2023-12-26 19:05:42+00:00 | 12k |
open-mmlab/Amphion | modules/wenet_extractor/transformer/asr_model.py | [
{
"identifier": "CTC",
"path": "modules/wenet_extractor/transformer/ctc.py",
"snippet": "class CTC(torch.nn.Module):\n \"\"\"CTC module\"\"\"\n\n def __init__(\n self,\n odim: int,\n encoder_output_size: int,\n dropout_rate: float = 0.0,\n reduce: bool = True,\n ):\n \"\"\"Construct CTC module\n Args:\n odim: dimension of outputs\n encoder_output_size: number of encoder projection units\n dropout_rate: dropout rate (0.0 ~ 1.0)\n reduce: reduce the CTC loss into a scalar\n \"\"\"\n super().__init__()\n eprojs = encoder_output_size\n self.dropout_rate = dropout_rate\n self.ctc_lo = torch.nn.Linear(eprojs, odim)\n\n reduction_type = \"sum\" if reduce else \"none\"\n self.ctc_loss = torch.nn.CTCLoss(reduction=reduction_type)\n\n def forward(\n self,\n hs_pad: torch.Tensor,\n hlens: torch.Tensor,\n ys_pad: torch.Tensor,\n ys_lens: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"Calculate CTC loss.\n\n Args:\n hs_pad: batch of padded hidden state sequences (B, Tmax, D)\n hlens: batch of lengths of hidden state sequences (B)\n ys_pad: batch of padded character id sequence tensor (B, Lmax)\n ys_lens: batch of lengths of character sequence (B)\n \"\"\"\n # hs_pad: (B, L, NProj) -> ys_hat: (B, L, Nvocab)\n ys_hat = self.ctc_lo(F.dropout(hs_pad, p=self.dropout_rate))\n # ys_hat: (B, L, D) -> (L, B, D)\n ys_hat = ys_hat.transpose(0, 1)\n ys_hat = ys_hat.log_softmax(2)\n loss = self.ctc_loss(ys_hat, ys_pad, hlens, ys_lens)\n # Batch-size average\n loss = loss / ys_hat.size(1)\n return loss\n\n def log_softmax(self, hs_pad: torch.Tensor) -> torch.Tensor:\n \"\"\"log_softmax of frame activations\n\n Args:\n Tensor hs_pad: 3d tensor (B, Tmax, eprojs)\n Returns:\n torch.Tensor: log softmax applied 3d tensor (B, Tmax, odim)\n \"\"\"\n return F.log_softmax(self.ctc_lo(hs_pad), dim=2)\n\n def argmax(self, hs_pad: torch.Tensor) -> torch.Tensor:\n \"\"\"argmax of frame activations\n\n Args:\n torch.Tensor hs_pad: 3d tensor (B, Tmax, eprojs)\n Returns:\n torch.Tensor: argmax applied 2d tensor (B, Tmax)\n \"\"\"\n return torch.argmax(self.ctc_lo(hs_pad), dim=2)"
},
{
"identifier": "TransformerDecoder",
"path": "modules/wenet_extractor/transformer/decoder.py",
"snippet": "class TransformerDecoder(torch.nn.Module):\n \"\"\"Base class of Transfomer decoder module.\n Args:\n vocab_size: output dim\n encoder_output_size: dimension of attention\n attention_heads: the number of heads of multi head attention\n linear_units: the hidden units number of position-wise feedforward\n num_blocks: the number of decoder blocks\n dropout_rate: dropout rate\n self_attention_dropout_rate: dropout rate for attention\n input_layer: input layer type\n use_output_layer: whether to use output layer\n pos_enc_class: PositionalEncoding or ScaledPositionalEncoding\n normalize_before:\n True: use layer_norm before each sub-block of a layer.\n False: use layer_norm after each sub-block of a layer.\n src_attention: if false, encoder-decoder cross attention is not\n applied, such as CIF model\n \"\"\"\n\n def __init__(\n self,\n vocab_size: int,\n encoder_output_size: int,\n attention_heads: int = 4,\n linear_units: int = 2048,\n num_blocks: int = 6,\n dropout_rate: float = 0.1,\n positional_dropout_rate: float = 0.1,\n self_attention_dropout_rate: float = 0.0,\n src_attention_dropout_rate: float = 0.0,\n input_layer: str = \"embed\",\n use_output_layer: bool = True,\n normalize_before: bool = True,\n src_attention: bool = True,\n ):\n super().__init__()\n attention_dim = encoder_output_size\n\n if input_layer == \"embed\":\n self.embed = torch.nn.Sequential(\n torch.nn.Embedding(vocab_size, attention_dim),\n PositionalEncoding(attention_dim, positional_dropout_rate),\n )\n elif input_layer == \"none\":\n self.embed = NoPositionalEncoding(attention_dim, positional_dropout_rate)\n else:\n raise ValueError(f\"only 'embed' is supported: {input_layer}\")\n\n self.normalize_before = normalize_before\n self.after_norm = torch.nn.LayerNorm(attention_dim, eps=1e-5)\n self.use_output_layer = use_output_layer\n self.output_layer = torch.nn.Linear(attention_dim, vocab_size)\n self.num_blocks = num_blocks\n self.decoders = torch.nn.ModuleList(\n [\n DecoderLayer(\n attention_dim,\n MultiHeadedAttention(\n attention_heads, attention_dim, self_attention_dropout_rate\n ),\n MultiHeadedAttention(\n attention_heads, attention_dim, src_attention_dropout_rate\n )\n if src_attention\n else None,\n PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),\n dropout_rate,\n normalize_before,\n )\n for _ in range(self.num_blocks)\n ]\n )\n\n def forward(\n self,\n memory: torch.Tensor,\n memory_mask: torch.Tensor,\n ys_in_pad: torch.Tensor,\n ys_in_lens: torch.Tensor,\n r_ys_in_pad: torch.Tensor = torch.empty(0),\n reverse_weight: float = 0.0,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Forward decoder.\n Args:\n memory: encoded memory, float32 (batch, maxlen_in, feat)\n memory_mask: encoder memory mask, (batch, 1, maxlen_in)\n ys_in_pad: padded input token ids, int64 (batch, maxlen_out)\n ys_in_lens: input lengths of this batch (batch)\n r_ys_in_pad: not used in transformer decoder, in order to unify api\n with bidirectional decoder\n reverse_weight: not used in transformer decoder, in order to unify\n api with bidirectional decode\n Returns:\n (tuple): tuple containing:\n x: decoded token score before softmax (batch, maxlen_out,\n vocab_size) if use_output_layer is True,\n torch.tensor(0.0), in order to unify api with bidirectional decoder\n olens: (batch, )\n \"\"\"\n tgt = ys_in_pad\n maxlen = tgt.size(1)\n # tgt_mask: (B, 1, L)\n tgt_mask = ~make_pad_mask(ys_in_lens, maxlen).unsqueeze(1)\n tgt_mask = tgt_mask.to(tgt.device)\n # m: (1, L, L)\n m = subsequent_mask(tgt_mask.size(-1), device=tgt_mask.device).unsqueeze(0)\n # tgt_mask: (B, L, L)\n tgt_mask = tgt_mask & m\n x, _ = self.embed(tgt)\n for layer in self.decoders:\n x, tgt_mask, memory, memory_mask = layer(x, tgt_mask, memory, memory_mask)\n if self.normalize_before:\n x = self.after_norm(x)\n if self.use_output_layer:\n x = self.output_layer(x)\n olens = tgt_mask.sum(1)\n return x, torch.tensor(0.0), olens\n\n def forward_one_step(\n self,\n memory: torch.Tensor,\n memory_mask: torch.Tensor,\n tgt: torch.Tensor,\n tgt_mask: torch.Tensor,\n cache: Optional[List[torch.Tensor]] = None,\n ) -> Tuple[torch.Tensor, List[torch.Tensor]]:\n \"\"\"Forward one step.\n This is only used for decoding.\n Args:\n memory: encoded memory, float32 (batch, maxlen_in, feat)\n memory_mask: encoded memory mask, (batch, 1, maxlen_in)\n tgt: input token ids, int64 (batch, maxlen_out)\n tgt_mask: input token mask, (batch, maxlen_out)\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (include 1.2)\n cache: cached output list of (batch, max_time_out-1, size)\n Returns:\n y, cache: NN output value and cache per `self.decoders`.\n y.shape` is (batch, maxlen_out, token)\n \"\"\"\n x, _ = self.embed(tgt)\n new_cache = []\n for i, decoder in enumerate(self.decoders):\n if cache is None:\n c = None\n else:\n c = cache[i]\n x, tgt_mask, memory, memory_mask = decoder(\n x, tgt_mask, memory, memory_mask, cache=c\n )\n new_cache.append(x)\n if self.normalize_before:\n y = self.after_norm(x[:, -1])\n else:\n y = x[:, -1]\n if self.use_output_layer:\n y = torch.log_softmax(self.output_layer(y), dim=-1)\n return y, new_cache"
},
{
"identifier": "TransformerEncoder",
"path": "modules/wenet_extractor/transformer/encoder.py",
"snippet": "class TransformerEncoder(BaseEncoder):\n \"\"\"Transformer encoder module.\"\"\"\n\n def __init__(\n self,\n input_size: int,\n output_size: int = 256,\n attention_heads: int = 4,\n linear_units: int = 2048,\n num_blocks: int = 6,\n dropout_rate: float = 0.1,\n positional_dropout_rate: float = 0.1,\n attention_dropout_rate: float = 0.0,\n input_layer: str = \"conv2d\",\n pos_enc_layer_type: str = \"abs_pos\",\n normalize_before: bool = True,\n static_chunk_size: int = 0,\n use_dynamic_chunk: bool = False,\n global_cmvn: torch.nn.Module = None,\n use_dynamic_left_chunk: bool = False,\n ):\n \"\"\"Construct TransformerEncoder\n\n See Encoder for the meaning of each parameter.\n \"\"\"\n super().__init__(\n input_size,\n output_size,\n attention_heads,\n linear_units,\n num_blocks,\n dropout_rate,\n positional_dropout_rate,\n attention_dropout_rate,\n input_layer,\n pos_enc_layer_type,\n normalize_before,\n static_chunk_size,\n use_dynamic_chunk,\n global_cmvn,\n use_dynamic_left_chunk,\n )\n self.encoders = torch.nn.ModuleList(\n [\n TransformerEncoderLayer(\n output_size,\n MultiHeadedAttention(\n attention_heads, output_size, attention_dropout_rate\n ),\n PositionwiseFeedForward(output_size, linear_units, dropout_rate),\n dropout_rate,\n normalize_before,\n )\n for _ in range(num_blocks)\n ]\n )"
},
{
"identifier": "LabelSmoothingLoss",
"path": "modules/wenet_extractor/transformer/label_smoothing_loss.py",
"snippet": "class LabelSmoothingLoss(nn.Module):\n \"\"\"Label-smoothing loss.\n\n In a standard CE loss, the label's data distribution is:\n [0,1,2] ->\n [\n [1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0],\n ]\n\n In the smoothing version CE Loss,some probabilities\n are taken from the true label prob (1.0) and are divided\n among other labels.\n\n e.g.\n smoothing=0.1\n [0,1,2] ->\n [\n [0.9, 0.05, 0.05],\n [0.05, 0.9, 0.05],\n [0.05, 0.05, 0.9],\n ]\n\n Args:\n size (int): the number of class\n padding_idx (int): padding class id which will be ignored for loss\n smoothing (float): smoothing rate (0.0 means the conventional CE)\n normalize_length (bool):\n normalize loss by sequence length if True\n normalize loss by batch size if False\n \"\"\"\n\n def __init__(\n self,\n size: int,\n padding_idx: int,\n smoothing: float,\n normalize_length: bool = False,\n ):\n \"\"\"Construct an LabelSmoothingLoss object.\"\"\"\n super(LabelSmoothingLoss, self).__init__()\n self.criterion = nn.KLDivLoss(reduction=\"none\")\n self.padding_idx = padding_idx\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.size = size\n self.normalize_length = normalize_length\n\n def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n \"\"\"Compute loss between x and target.\n\n The model outputs and data labels tensors are flatten to\n (batch*seqlen, class) shape and a mask is applied to the\n padding part which should not be calculated for loss.\n\n Args:\n x (torch.Tensor): prediction (batch, seqlen, class)\n target (torch.Tensor):\n target signal masked with self.padding_id (batch, seqlen)\n Returns:\n loss (torch.Tensor) : The KL loss, scalar float value\n \"\"\"\n assert x.size(2) == self.size\n batch_size = x.size(0)\n x = x.view(-1, self.size)\n target = target.view(-1)\n # use zeros_like instead of torch.no_grad() for true_dist,\n # since no_grad() can not be exported by JIT\n true_dist = torch.zeros_like(x)\n true_dist.fill_(self.smoothing / (self.size - 1))\n ignore = target == self.padding_idx # (B,)\n total = len(target) - ignore.sum().item()\n target = target.masked_fill(ignore, 0) # avoid -1 index\n true_dist.scatter_(1, target.unsqueeze(1), self.confidence)\n kl = self.criterion(torch.log_softmax(x, dim=1), true_dist)\n denom = total if self.normalize_length else batch_size\n return kl.masked_fill(ignore.unsqueeze(1), 0).sum() / denom"
},
{
"identifier": "IGNORE_ID",
"path": "modules/wenet_extractor/utils/common.py",
"snippet": "IGNORE_ID = -1"
},
{
"identifier": "add_sos_eos",
"path": "modules/wenet_extractor/utils/common.py",
"snippet": "def add_sos_eos(\n ys_pad: torch.Tensor, sos: int, eos: int, ignore_id: int\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Add <sos> and <eos> labels.\n\n Args:\n ys_pad (torch.Tensor): batch of padded target sequences (B, Lmax)\n sos (int): index of <sos>\n eos (int): index of <eeos>\n ignore_id (int): index of padding\n\n Returns:\n ys_in (torch.Tensor) : (B, Lmax + 1)\n ys_out (torch.Tensor) : (B, Lmax + 1)\n\n Examples:\n >>> sos_id = 10\n >>> eos_id = 11\n >>> ignore_id = -1\n >>> ys_pad\n tensor([[ 1, 2, 3, 4, 5],\n [ 4, 5, 6, -1, -1],\n [ 7, 8, 9, -1, -1]], dtype=torch.int32)\n >>> ys_in,ys_out=add_sos_eos(ys_pad, sos_id , eos_id, ignore_id)\n >>> ys_in\n tensor([[10, 1, 2, 3, 4, 5],\n [10, 4, 5, 6, 11, 11],\n [10, 7, 8, 9, 11, 11]])\n >>> ys_out\n tensor([[ 1, 2, 3, 4, 5, 11],\n [ 4, 5, 6, 11, -1, -1],\n [ 7, 8, 9, 11, -1, -1]])\n \"\"\"\n _sos = torch.tensor(\n [sos], dtype=torch.long, requires_grad=False, device=ys_pad.device\n )\n _eos = torch.tensor(\n [eos], dtype=torch.long, requires_grad=False, device=ys_pad.device\n )\n ys = [y[y != ignore_id] for y in ys_pad] # parse padded ys\n ys_in = [torch.cat([_sos, y], dim=0) for y in ys]\n ys_out = [torch.cat([y, _eos], dim=0) for y in ys]\n return pad_list(ys_in, eos), pad_list(ys_out, ignore_id)"
},
{
"identifier": "log_add",
"path": "modules/wenet_extractor/utils/common.py",
"snippet": "def log_add(args: List[int]) -> float:\n \"\"\"\n Stable log add\n \"\"\"\n if all(a == -float(\"inf\") for a in args):\n return -float(\"inf\")\n a_max = max(args)\n lsp = math.log(sum(math.exp(a - a_max) for a in args))\n return a_max + lsp"
},
{
"identifier": "remove_duplicates_and_blank",
"path": "modules/wenet_extractor/utils/common.py",
"snippet": "def remove_duplicates_and_blank(hyp: List[int]) -> List[int]:\n new_hyp: List[int] = []\n cur = 0\n while cur < len(hyp):\n if hyp[cur] != 0:\n new_hyp.append(hyp[cur])\n prev = cur\n while cur < len(hyp) and hyp[cur] == hyp[prev]:\n cur += 1\n return new_hyp"
},
{
"identifier": "th_accuracy",
"path": "modules/wenet_extractor/utils/common.py",
"snippet": "def th_accuracy(\n pad_outputs: torch.Tensor, pad_targets: torch.Tensor, ignore_label: int\n) -> float:\n \"\"\"Calculate accuracy.\n\n Args:\n pad_outputs (Tensor): Prediction tensors (B * Lmax, D).\n pad_targets (LongTensor): Target label tensors (B, Lmax).\n ignore_label (int): Ignore label id.\n\n Returns:\n float: Accuracy value (0.0 - 1.0).\n\n \"\"\"\n pad_pred = pad_outputs.view(\n pad_targets.size(0), pad_targets.size(1), pad_outputs.size(1)\n ).argmax(2)\n mask = pad_targets != ignore_label\n numerator = torch.sum(\n pad_pred.masked_select(mask) == pad_targets.masked_select(mask)\n )\n denominator = torch.sum(mask)\n return float(numerator) / float(denominator)"
},
{
"identifier": "reverse_pad_list",
"path": "modules/wenet_extractor/utils/common.py",
"snippet": "def reverse_pad_list(\n ys_pad: torch.Tensor, ys_lens: torch.Tensor, pad_value: float = -1.0\n) -> torch.Tensor:\n \"\"\"Reverse padding for the list of tensors.\n\n Args:\n ys_pad (tensor): The padded tensor (B, Tokenmax).\n ys_lens (tensor): The lens of token seqs (B)\n pad_value (int): Value for padding.\n\n Returns:\n Tensor: Padded tensor (B, Tokenmax).\n\n Examples:\n >>> x\n tensor([[1, 2, 3, 4], [5, 6, 7, 0], [8, 9, 0, 0]])\n >>> pad_list(x, 0)\n tensor([[4, 3, 2, 1],\n [7, 6, 5, 0],\n [9, 8, 0, 0]])\n\n \"\"\"\n r_ys_pad = pad_sequence(\n [(torch.flip(y.int()[:i], [0])) for y, i in zip(ys_pad, ys_lens)],\n True,\n pad_value,\n )\n return r_ys_pad"
},
{
"identifier": "make_pad_mask",
"path": "modules/wenet_extractor/utils/mask.py",
"snippet": "def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:\n \"\"\"Make mask tensor containing indices of padded part.\n\n See description of make_non_pad_mask.\n\n Args:\n lengths (torch.Tensor): Batch of lengths (B,).\n Returns:\n torch.Tensor: Mask tensor containing indices of padded part.\n\n Examples:\n >>> lengths = [5, 3, 2]\n >>> make_pad_mask(lengths)\n masks = [[0, 0, 0, 0 ,0],\n [0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]]\n \"\"\"\n batch_size = lengths.size(0)\n max_len = max_len if max_len > 0 else lengths.max().item()\n seq_range = torch.arange(0, max_len, dtype=torch.int64, device=lengths.device)\n seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)\n seq_length_expand = lengths.unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n return mask"
},
{
"identifier": "mask_finished_preds",
"path": "modules/wenet_extractor/utils/mask.py",
"snippet": "def mask_finished_preds(\n pred: torch.Tensor, flag: torch.Tensor, eos: int\n) -> torch.Tensor:\n \"\"\"\n If a sequence is finished, all of its branch should be <eos>\n\n Args:\n pred (torch.Tensor): A int array with shape\n (batch_size * beam_size, beam_size).\n flag (torch.Tensor): A bool array with shape\n (batch_size * beam_size, 1).\n\n Returns:\n torch.Tensor: (batch_size * beam_size).\n \"\"\"\n beam_size = pred.size(-1)\n finished = flag.repeat([1, beam_size])\n return pred.masked_fill_(finished, eos)"
},
{
"identifier": "mask_finished_scores",
"path": "modules/wenet_extractor/utils/mask.py",
"snippet": "def mask_finished_scores(score: torch.Tensor, flag: torch.Tensor) -> torch.Tensor:\n \"\"\"\n If a sequence is finished, we only allow one alive branch. This function\n aims to give one branch a zero score and the rest -inf score.\n\n Args:\n score (torch.Tensor): A real value array with shape\n (batch_size * beam_size, beam_size).\n flag (torch.Tensor): A bool array with shape\n (batch_size * beam_size, 1).\n\n Returns:\n torch.Tensor: (batch_size * beam_size, beam_size).\n \"\"\"\n beam_size = score.size(-1)\n zero_mask = torch.zeros_like(flag, dtype=torch.bool)\n if beam_size > 1:\n unfinished = torch.cat((zero_mask, flag.repeat([1, beam_size - 1])), dim=1)\n finished = torch.cat((flag, zero_mask.repeat([1, beam_size - 1])), dim=1)\n else:\n unfinished = zero_mask\n finished = flag\n score.masked_fill_(unfinished, -float(\"inf\"))\n score.masked_fill_(finished, 0)\n return score"
},
{
"identifier": "subsequent_mask",
"path": "modules/wenet_extractor/utils/mask.py",
"snippet": "def subsequent_mask(\n size: int,\n device: torch.device = torch.device(\"cpu\"),\n) -> torch.Tensor:\n \"\"\"Create mask for subsequent steps (size, size).\n\n This mask is used only in decoder which works in an auto-regressive mode.\n This means the current step could only do attention with its left steps.\n\n In encoder, fully attention is used when streaming is not necessary and\n the sequence is not long. In this case, no attention mask is needed.\n\n When streaming is need, chunk-based attention is used in encoder. See\n subsequent_chunk_mask for the chunk-based attention mask.\n\n Args:\n size (int): size of mask\n str device (str): \"cpu\" or \"cuda\" or torch.Tensor.device\n dtype (torch.device): result dtype\n\n Returns:\n torch.Tensor: mask\n\n Examples:\n >>> subsequent_mask(3)\n [[1, 0, 0],\n [1, 1, 0],\n [1, 1, 1]]\n \"\"\"\n arange = torch.arange(size, device=device)\n mask = arange.expand(size, size)\n arange = arange.unsqueeze(-1)\n mask = mask <= arange\n return mask"
}
] | from collections import defaultdict
from typing import Dict, List, Optional, Tuple
from torch.nn.utils.rnn import pad_sequence
from modules.wenet_extractor.transformer.ctc import CTC
from modules.wenet_extractor.transformer.decoder import TransformerDecoder
from modules.wenet_extractor.transformer.encoder import TransformerEncoder
from modules.wenet_extractor.transformer.label_smoothing_loss import LabelSmoothingLoss
from modules.wenet_extractor.utils.common import (
IGNORE_ID,
add_sos_eos,
log_add,
remove_duplicates_and_blank,
th_accuracy,
reverse_pad_list,
)
from modules.wenet_extractor.utils.mask import (
make_pad_mask,
mask_finished_preds,
mask_finished_scores,
subsequent_mask,
)
import torch
import torch.nn.functional as F | 8,948 | scores = torch.tensor(
[0.0] + [-float("inf")] * (beam_size - 1), dtype=torch.float
)
scores = (
scores.to(device).repeat([batch_size]).unsqueeze(1).to(device)
) # (B*N, 1)
end_flag = torch.zeros_like(scores, dtype=torch.bool, device=device)
cache: Optional[List[torch.Tensor]] = None
# 2. Decoder forward step by step
for i in range(1, maxlen + 1):
# Stop if all batch and all beam produce eos
if end_flag.sum() == running_size:
break
# 2.1 Forward decoder step
hyps_mask = (
subsequent_mask(i).unsqueeze(0).repeat(running_size, 1, 1).to(device)
) # (B*N, i, i)
# logp: (B*N, vocab)
logp, cache = self.decoder.forward_one_step(
encoder_out, encoder_mask, hyps, hyps_mask, cache
)
# 2.2 First beam prune: select topk best prob at current time
top_k_logp, top_k_index = logp.topk(beam_size) # (B*N, N)
top_k_logp = mask_finished_scores(top_k_logp, end_flag)
top_k_index = mask_finished_preds(top_k_index, end_flag, self.eos)
# 2.3 Second beam prune: select topk score with history
scores = scores + top_k_logp # (B*N, N), broadcast add
scores = scores.view(batch_size, beam_size * beam_size) # (B, N*N)
scores, offset_k_index = scores.topk(k=beam_size) # (B, N)
# Update cache to be consistent with new topk scores / hyps
cache_index = (offset_k_index // beam_size).view(-1) # (B*N)
base_cache_index = (
torch.arange(batch_size, device=device)
.view(-1, 1)
.repeat([1, beam_size])
* beam_size
).view(
-1
) # (B*N)
cache_index = base_cache_index + cache_index
cache = [torch.index_select(c, dim=0, index=cache_index) for c in cache]
scores = scores.view(-1, 1) # (B*N, 1)
# 2.4. Compute base index in top_k_index,
# regard top_k_index as (B*N*N),regard offset_k_index as (B*N),
# then find offset_k_index in top_k_index
base_k_index = (
torch.arange(batch_size, device=device)
.view(-1, 1)
.repeat([1, beam_size])
) # (B, N)
base_k_index = base_k_index * beam_size * beam_size
best_k_index = base_k_index.view(-1) + offset_k_index.view(-1) # (B*N)
# 2.5 Update best hyps
best_k_pred = torch.index_select(
top_k_index.view(-1), dim=-1, index=best_k_index
) # (B*N)
best_hyps_index = best_k_index // beam_size
last_best_k_hyps = torch.index_select(
hyps, dim=0, index=best_hyps_index
) # (B*N, i)
hyps = torch.cat(
(last_best_k_hyps, best_k_pred.view(-1, 1)), dim=1
) # (B*N, i+1)
# 2.6 Update end flag
end_flag = torch.eq(hyps[:, -1], self.eos).view(-1, 1)
# 3. Select best of best
scores = scores.view(batch_size, beam_size)
# TODO: length normalization
best_scores, best_index = scores.max(dim=-1)
best_hyps_index = (
best_index
+ torch.arange(batch_size, dtype=torch.long, device=device) * beam_size
)
best_hyps = torch.index_select(hyps, dim=0, index=best_hyps_index)
best_hyps = best_hyps[:, 1:]
return best_hyps, best_scores
def ctc_greedy_search(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> List[List[int]]:
"""Apply CTC greedy search
Args:
speech (torch.Tensor): (batch, max_len, feat_dim)
speech_length (torch.Tensor): (batch, )
beam_size (int): beam size for beam search
decoding_chunk_size (int): decoding chunk for dynamic chunk
trained model.
<0: for decoding, use full chunk.
>0: for decoding, use fixed chunk size as set.
0: used for training, it's prohibited here
simulate_streaming (bool): whether do encoder forward in a
streaming fashion
Returns:
List[List[int]]: best path result
"""
assert speech.shape[0] == speech_lengths.shape[0]
assert decoding_chunk_size != 0
batch_size = speech.shape[0]
# Let's assume B = batch_size
encoder_out, encoder_mask = self._forward_encoder(
speech,
speech_lengths,
decoding_chunk_size,
num_decoding_left_chunks,
simulate_streaming,
) # (B, maxlen, encoder_dim)
maxlen = encoder_out.size(1)
encoder_out_lens = encoder_mask.squeeze(1).sum(1)
ctc_probs = self.ctc.log_softmax(encoder_out) # (B, maxlen, vocab_size)
topk_prob, topk_index = ctc_probs.topk(1, dim=2) # (B, maxlen, 1)
topk_index = topk_index.view(batch_size, maxlen) # (B, maxlen)
| # This module is from [WeNet](https://github.com/wenet-e2e/wenet).
# ## Citations
# ```bibtex
# @inproceedings{yao2021wenet,
# title={WeNet: Production oriented Streaming and Non-streaming End-to-End Speech Recognition Toolkit},
# author={Yao, Zhuoyuan and Wu, Di and Wang, Xiong and Zhang, Binbin and Yu, Fan and Yang, Chao and Peng, Zhendong and Chen, Xiaoyu and Xie, Lei and Lei, Xin},
# booktitle={Proc. Interspeech},
# year={2021},
# address={Brno, Czech Republic },
# organization={IEEE}
# }
# @article{zhang2022wenet,
# title={WeNet 2.0: More Productive End-to-End Speech Recognition Toolkit},
# author={Zhang, Binbin and Wu, Di and Peng, Zhendong and Song, Xingchen and Yao, Zhuoyuan and Lv, Hang and Xie, Lei and Yang, Chao and Pan, Fuping and Niu, Jianwei},
# journal={arXiv preprint arXiv:2203.15455},
# year={2022}
# }
#
class ASRModel(torch.nn.Module):
"""CTC-attention hybrid Encoder-Decoder model"""
def __init__(
self,
vocab_size: int,
encoder: TransformerEncoder,
decoder: TransformerDecoder,
ctc: CTC,
ctc_weight: float = 0.5,
ignore_id: int = IGNORE_ID,
reverse_weight: float = 0.0,
lsm_weight: float = 0.0,
length_normalized_loss: bool = False,
lfmmi_dir: str = "",
):
assert 0.0 <= ctc_weight <= 1.0, ctc_weight
super().__init__()
# note that eos is the same as sos (equivalent ID)
self.sos = vocab_size - 1
self.eos = vocab_size - 1
self.vocab_size = vocab_size
self.ignore_id = ignore_id
self.ctc_weight = ctc_weight
self.reverse_weight = reverse_weight
self.encoder = encoder
self.decoder = decoder
self.ctc = ctc
self.criterion_att = LabelSmoothingLoss(
size=vocab_size,
padding_idx=ignore_id,
smoothing=lsm_weight,
normalize_length=length_normalized_loss,
)
self.lfmmi_dir = lfmmi_dir
if self.lfmmi_dir != "":
self.load_lfmmi_resource()
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
) -> Dict[str, Optional[torch.Tensor]]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
text: (Batch, Length)
text_lengths: (Batch,)
"""
assert text_lengths.dim() == 1, text_lengths.shape
# Check that batch_size is unified
assert (
speech.shape[0]
== speech_lengths.shape[0]
== text.shape[0]
== text_lengths.shape[0]
), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape)
# 1. Encoder
encoder_out, encoder_mask = self.encoder(speech, speech_lengths)
encoder_out_lens = encoder_mask.squeeze(1).sum(1)
# 2a. Attention-decoder branch
if self.ctc_weight != 1.0:
loss_att, acc_att = self._calc_att_loss(
encoder_out, encoder_mask, text, text_lengths
)
else:
loss_att = None
# 2b. CTC branch or LF-MMI loss
if self.ctc_weight != 0.0:
if self.lfmmi_dir != "":
loss_ctc = self._calc_lfmmi_loss(encoder_out, encoder_mask, text)
else:
loss_ctc = self.ctc(encoder_out, encoder_out_lens, text, text_lengths)
else:
loss_ctc = None
if loss_ctc is None:
loss = loss_att
elif loss_att is None:
loss = loss_ctc
else:
loss = self.ctc_weight * loss_ctc + (1 - self.ctc_weight) * loss_att
return {"loss": loss, "loss_att": loss_att, "loss_ctc": loss_ctc}
def _calc_att_loss(
self,
encoder_out: torch.Tensor,
encoder_mask: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
) -> Tuple[torch.Tensor, float]:
ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)
ys_in_lens = ys_pad_lens + 1
# reverse the seq, used for right to left decoder
r_ys_pad = reverse_pad_list(ys_pad, ys_pad_lens, float(self.ignore_id))
r_ys_in_pad, r_ys_out_pad = add_sos_eos(
r_ys_pad, self.sos, self.eos, self.ignore_id
)
# 1. Forward decoder
decoder_out, r_decoder_out, _ = self.decoder(
encoder_out,
encoder_mask,
ys_in_pad,
ys_in_lens,
r_ys_in_pad,
self.reverse_weight,
)
# 2. Compute attention loss
loss_att = self.criterion_att(decoder_out, ys_out_pad)
r_loss_att = torch.tensor(0.0)
if self.reverse_weight > 0.0:
r_loss_att = self.criterion_att(r_decoder_out, r_ys_out_pad)
loss_att = (
loss_att * (1 - self.reverse_weight) + r_loss_att * self.reverse_weight
)
acc_att = th_accuracy(
decoder_out.view(-1, self.vocab_size),
ys_out_pad,
ignore_label=self.ignore_id,
)
return loss_att, acc_att
def _forward_encoder(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
# Let's assume B = batch_size
# 1. Encoder
if simulate_streaming and decoding_chunk_size > 0:
encoder_out, encoder_mask = self.encoder.forward_chunk_by_chunk(
speech,
decoding_chunk_size=decoding_chunk_size,
num_decoding_left_chunks=num_decoding_left_chunks,
) # (B, maxlen, encoder_dim)
else:
encoder_out, encoder_mask = self.encoder(
speech,
speech_lengths,
decoding_chunk_size=decoding_chunk_size,
num_decoding_left_chunks=num_decoding_left_chunks,
) # (B, maxlen, encoder_dim)
return encoder_out, encoder_mask
def encoder_extractor(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
# assert speech.shape[0] == speech_lengths[0]
assert decoding_chunk_size != 0
batch_size = speech.shape[0]
encoder_out, encoder_mask = self._forward_encoder(
speech,
speech_lengths,
decoding_chunk_size,
num_decoding_left_chunks,
simulate_streaming,
) # (B, maxlen, encoder_dim)
return encoder_out
def recognize(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
beam_size: int = 10,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> torch.Tensor:
"""Apply beam search on attention decoder
Args:
speech (torch.Tensor): (batch, max_len, feat_dim)
speech_length (torch.Tensor): (batch, )
beam_size (int): beam size for beam search
decoding_chunk_size (int): decoding chunk for dynamic chunk
trained model.
<0: for decoding, use full chunk.
>0: for decoding, use fixed chunk size as set.
0: used for training, it's prohibited here
simulate_streaming (bool): whether do encoder forward in a
streaming fashion
Returns:
torch.Tensor: decoding result, (batch, max_result_len)
"""
assert speech.shape[0] == speech_lengths.shape[0]
assert decoding_chunk_size != 0
device = speech.device
batch_size = speech.shape[0]
# Let's assume B = batch_size and N = beam_size
# 1. Encoder
encoder_out, encoder_mask = self._forward_encoder(
speech,
speech_lengths,
decoding_chunk_size,
num_decoding_left_chunks,
simulate_streaming,
) # (B, maxlen, encoder_dim)
maxlen = encoder_out.size(1)
encoder_dim = encoder_out.size(2)
running_size = batch_size * beam_size
encoder_out = (
encoder_out.unsqueeze(1)
.repeat(1, beam_size, 1, 1)
.view(running_size, maxlen, encoder_dim)
) # (B*N, maxlen, encoder_dim)
encoder_mask = (
encoder_mask.unsqueeze(1)
.repeat(1, beam_size, 1, 1)
.view(running_size, 1, maxlen)
) # (B*N, 1, max_len)
hyps = torch.ones([running_size, 1], dtype=torch.long, device=device).fill_(
self.sos
) # (B*N, 1)
scores = torch.tensor(
[0.0] + [-float("inf")] * (beam_size - 1), dtype=torch.float
)
scores = (
scores.to(device).repeat([batch_size]).unsqueeze(1).to(device)
) # (B*N, 1)
end_flag = torch.zeros_like(scores, dtype=torch.bool, device=device)
cache: Optional[List[torch.Tensor]] = None
# 2. Decoder forward step by step
for i in range(1, maxlen + 1):
# Stop if all batch and all beam produce eos
if end_flag.sum() == running_size:
break
# 2.1 Forward decoder step
hyps_mask = (
subsequent_mask(i).unsqueeze(0).repeat(running_size, 1, 1).to(device)
) # (B*N, i, i)
# logp: (B*N, vocab)
logp, cache = self.decoder.forward_one_step(
encoder_out, encoder_mask, hyps, hyps_mask, cache
)
# 2.2 First beam prune: select topk best prob at current time
top_k_logp, top_k_index = logp.topk(beam_size) # (B*N, N)
top_k_logp = mask_finished_scores(top_k_logp, end_flag)
top_k_index = mask_finished_preds(top_k_index, end_flag, self.eos)
# 2.3 Second beam prune: select topk score with history
scores = scores + top_k_logp # (B*N, N), broadcast add
scores = scores.view(batch_size, beam_size * beam_size) # (B, N*N)
scores, offset_k_index = scores.topk(k=beam_size) # (B, N)
# Update cache to be consistent with new topk scores / hyps
cache_index = (offset_k_index // beam_size).view(-1) # (B*N)
base_cache_index = (
torch.arange(batch_size, device=device)
.view(-1, 1)
.repeat([1, beam_size])
* beam_size
).view(
-1
) # (B*N)
cache_index = base_cache_index + cache_index
cache = [torch.index_select(c, dim=0, index=cache_index) for c in cache]
scores = scores.view(-1, 1) # (B*N, 1)
# 2.4. Compute base index in top_k_index,
# regard top_k_index as (B*N*N),regard offset_k_index as (B*N),
# then find offset_k_index in top_k_index
base_k_index = (
torch.arange(batch_size, device=device)
.view(-1, 1)
.repeat([1, beam_size])
) # (B, N)
base_k_index = base_k_index * beam_size * beam_size
best_k_index = base_k_index.view(-1) + offset_k_index.view(-1) # (B*N)
# 2.5 Update best hyps
best_k_pred = torch.index_select(
top_k_index.view(-1), dim=-1, index=best_k_index
) # (B*N)
best_hyps_index = best_k_index // beam_size
last_best_k_hyps = torch.index_select(
hyps, dim=0, index=best_hyps_index
) # (B*N, i)
hyps = torch.cat(
(last_best_k_hyps, best_k_pred.view(-1, 1)), dim=1
) # (B*N, i+1)
# 2.6 Update end flag
end_flag = torch.eq(hyps[:, -1], self.eos).view(-1, 1)
# 3. Select best of best
scores = scores.view(batch_size, beam_size)
# TODO: length normalization
best_scores, best_index = scores.max(dim=-1)
best_hyps_index = (
best_index
+ torch.arange(batch_size, dtype=torch.long, device=device) * beam_size
)
best_hyps = torch.index_select(hyps, dim=0, index=best_hyps_index)
best_hyps = best_hyps[:, 1:]
return best_hyps, best_scores
def ctc_greedy_search(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> List[List[int]]:
"""Apply CTC greedy search
Args:
speech (torch.Tensor): (batch, max_len, feat_dim)
speech_length (torch.Tensor): (batch, )
beam_size (int): beam size for beam search
decoding_chunk_size (int): decoding chunk for dynamic chunk
trained model.
<0: for decoding, use full chunk.
>0: for decoding, use fixed chunk size as set.
0: used for training, it's prohibited here
simulate_streaming (bool): whether do encoder forward in a
streaming fashion
Returns:
List[List[int]]: best path result
"""
assert speech.shape[0] == speech_lengths.shape[0]
assert decoding_chunk_size != 0
batch_size = speech.shape[0]
# Let's assume B = batch_size
encoder_out, encoder_mask = self._forward_encoder(
speech,
speech_lengths,
decoding_chunk_size,
num_decoding_left_chunks,
simulate_streaming,
) # (B, maxlen, encoder_dim)
maxlen = encoder_out.size(1)
encoder_out_lens = encoder_mask.squeeze(1).sum(1)
ctc_probs = self.ctc.log_softmax(encoder_out) # (B, maxlen, vocab_size)
topk_prob, topk_index = ctc_probs.topk(1, dim=2) # (B, maxlen, 1)
topk_index = topk_index.view(batch_size, maxlen) # (B, maxlen) | mask = make_pad_mask(encoder_out_lens, maxlen) # (B, maxlen) | 10 | 2023-11-15 09:19:27+00:00 | 12k |
banodoco/Steerable-Motion | imports/AdvancedControlNet/nodes.py | [
{
"identifier": "load_controlnet",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "def load_controlnet(ckpt_path, timestep_keyframe: TimestepKeyframeGroupImport=None, model=None):\n control = comfy_cn.load_controlnet(ckpt_path, model=model)\n # TODO: support controlnet-lllite\n # if is None, see if is a non-vanilla ControlNet\n # if control is None:\n # controlnet_data = comfy.utils.load_torch_file(ckpt_path, safe_load=True)\n # # check if lllite\n # if \"lllite_unet\" in controlnet_data:\n # pass\n return convert_to_advanced(control, timestep_keyframe=timestep_keyframe)"
},
{
"identifier": "convert_to_advanced",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "def convert_to_advanced(control, timestep_keyframe: TimestepKeyframeGroupImport=None):\n # if already advanced, leave it be\n if is_advanced_controlnet(control):\n return control\n # if exactly ControlNet returned, transform it into ControlNetAdvancedImport\n if type(control) == ControlNet:\n return ControlNetAdvancedImport.from_vanilla(v=control, timestep_keyframe=timestep_keyframe)\n # if exactly ControlLora returned, transform it into ControlLoraAdvancedImport\n elif type(control) == ControlLora:\n return ControlLoraAdvancedImport.from_vanilla(v=control, timestep_keyframe=timestep_keyframe)\n # if T2IAdapter returned, transform it into T2IAdapterAdvancedImport\n elif isinstance(control, T2IAdapter):\n return T2IAdapterAdvancedImport.from_vanilla(v=control, timestep_keyframe=timestep_keyframe)\n # otherwise, leave it be - might be something I am not supporting yet\n return control"
},
{
"identifier": "ControlWeightsImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class ControlWeightsImport:\n def __init__(self, weight_type: str, base_multiplier: float=1.0, flip_weights: bool=False, weights: list[float]=None, weight_mask: Tensor=None):\n self.weight_type = weight_type\n self.base_multiplier = base_multiplier\n self.flip_weights = flip_weights\n self.weights = weights\n if self.weights is not None and self.flip_weights:\n self.weights.reverse()\n self.weight_mask = weight_mask\n\n def get(self, idx: int) -> Union[float, Tensor]:\n # if weights is not none, return index\n if self.weights is not None:\n return self.weights[idx]\n return 1.0\n\n @classmethod\n def default(cls):\n return cls(ControlWeightTypeImport.DEFAULT)\n\n @classmethod\n def universal(cls, base_multiplier: float, flip_weights: bool=False):\n return cls(ControlWeightTypeImport.UNIVERSAL, base_multiplier=base_multiplier, flip_weights=flip_weights)\n \n @classmethod\n def universal_mask(cls, weight_mask: Tensor):\n return cls(ControlWeightTypeImport.UNIVERSAL, weight_mask=weight_mask)\n\n @classmethod\n def t2iadapter(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n weights = [1.0]*12\n return cls(ControlWeightTypeImport.T2IADAPTER, weights=weights,flip_weights=flip_weights)\n\n @classmethod\n def controlnet(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n weights = [1.0]*13\n return cls(ControlWeightTypeImport.CONTROLNET, weights=weights, flip_weights=flip_weights)\n \n @classmethod\n def controllora(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n weights = [1.0]*10\n return cls(ControlWeightTypeImport.CONTROLLORA, weights=weights, flip_weights=flip_weights)\n \n @classmethod\n def controllllite(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n # TODO: make this have a real value\n weights = [1.0]*200\n return cls(ControlWeightTypeImport.CONTROLLLLITE, weights=weights, flip_weights=flip_weights)"
},
{
"identifier": "ControlWeightTypeImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class ControlWeightTypeImport:\n DEFAULT = \"default\"\n UNIVERSAL = \"universal\"\n T2IADAPTER = \"t2iadapter\"\n CONTROLNET = \"controlnet\"\n CONTROLLORA = \"controllora\"\n CONTROLLLLITE = \"controllllite\""
},
{
"identifier": "LatentKeyframeGroupImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class LatentKeyframeGroupImport:\n def __init__(self) -> None:\n self.keyframes: list[LatentKeyframeImport] = []\n\n def add(self, keyframe: LatentKeyframeImport) -> None:\n added = False\n # replace existing keyframe if same batch_index\n for i in range(len(self.keyframes)):\n if self.keyframes[i].batch_index == keyframe.batch_index:\n self.keyframes[i] = keyframe\n added = True\n break\n if not added:\n self.keyframes.append(keyframe)\n self.keyframes.sort(key=lambda k: k.batch_index)\n \n def get_index(self, index: int) -> Union[LatentKeyframeImport, None]:\n try:\n return self.keyframes[index]\n except IndexError:\n return None\n \n def __getitem__(self, index) -> LatentKeyframeImport:\n return self.keyframes[index]\n \n def is_empty(self) -> bool:\n return len(self.keyframes) == 0\n\n def clone(self) -> 'LatentKeyframeGroupImport':\n cloned = LatentKeyframeGroupImport()\n for tk in self.keyframes:\n cloned.add(tk)\n return cloned"
},
{
"identifier": "TimestepKeyframeImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class TimestepKeyframeImport:\n def __init__(self,\n start_percent: float = 0.0,\n strength: float = 1.0,\n interpolation: str = StrengthInterpolationImport.NONE,\n control_weights: ControlWeightsImport = None,\n latent_keyframes: LatentKeyframeGroupImport = None,\n null_latent_kf_strength: float = 0.0,\n inherit_missing: bool = True,\n guarantee_usage: bool = True,\n mask_hint_orig: Tensor = None) -> None:\n self.start_percent = start_percent\n self.start_t = 999999999.9\n self.strength = strength\n self.interpolation = interpolation\n self.control_weights = control_weights\n self.latent_keyframes = latent_keyframes\n self.null_latent_kf_strength = null_latent_kf_strength\n self.inherit_missing = inherit_missing\n self.guarantee_usage = guarantee_usage\n self.mask_hint_orig = mask_hint_orig\n\n def has_control_weights(self):\n return self.control_weights is not None\n \n def has_latent_keyframes(self):\n return self.latent_keyframes is not None\n \n def has_mask_hint(self):\n return self.mask_hint_orig is not None\n \n \n @classmethod\n def default(cls) -> 'TimestepKeyframeImport':\n return cls(0.0)"
},
{
"identifier": "TimestepKeyframeGroupImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class TimestepKeyframeGroupImport:\n def __init__(self) -> None:\n self.keyframes: list[TimestepKeyframeImport] = []\n self.keyframes.append(TimestepKeyframeImport.default())\n\n def add(self, keyframe: TimestepKeyframeImport) -> None:\n added = False\n # replace existing keyframe if same start_percent\n for i in range(len(self.keyframes)):\n if self.keyframes[i].start_percent == keyframe.start_percent:\n self.keyframes[i] = keyframe\n added = True\n break\n if not added:\n self.keyframes.append(keyframe)\n self.keyframes.sort(key=lambda k: k.start_percent)\n\n def get_index(self, index: int) -> Union[TimestepKeyframeImport, None]:\n try:\n return self.keyframes[index]\n except IndexError:\n return None\n \n def has_index(self, index: int) -> int:\n return index >=0 and index < len(self.keyframes)\n\n def __getitem__(self, index) -> TimestepKeyframeImport:\n return self.keyframes[index]\n \n def __len__(self) -> int:\n return len(self.keyframes)\n\n def is_empty(self) -> bool:\n return len(self.keyframes) == 0\n \n def clone(self) -> 'TimestepKeyframeGroupImport':\n cloned = TimestepKeyframeGroupImport()\n for tk in self.keyframes:\n cloned.add(tk)\n return cloned\n \n @classmethod\n def default(cls, keyframe: TimestepKeyframeImport) -> 'TimestepKeyframeGroupImport':\n group = cls()\n group.keyframes[0] = keyframe\n return group"
},
{
"identifier": "is_advanced_controlnet",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "def is_advanced_controlnet(input_object):\n return hasattr(input_object, \"sub_idxs\")"
},
{
"identifier": "StrengthInterpolationImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class StrengthInterpolationImport:\n LINEAR = \"linear\"\n EASE_IN = \"ease-in\"\n EASE_OUT = \"ease-out\"\n EASE_IN_OUT = \"ease-in-out\"\n NONE = \"none\""
},
{
"identifier": "DefaultWeightsImport",
"path": "imports/AdvancedControlNet/weight_nodes.py",
"snippet": "class DefaultWeightsImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n }\n \n RETURN_TYPES = (\"CONTROL_NET_WEIGHTS\", \"TIMESTEP_KEYFRAME\",)\n RETURN_NAMES = WEIGHTS_RETURN_NAMES\n FUNCTION = \"load_weights\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/weights\"\n\n def load_weights(self):\n weights = ControlWeightsImport.default()\n return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights))) "
},
{
"identifier": "ScaledSoftMaskedUniversalWeightsImport",
"path": "imports/AdvancedControlNet/weight_nodes.py",
"snippet": "class ScaledSoftMaskedUniversalWeightsImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"mask\": (\"MASK\", ),\n \"min_base_multiplier\": (\"FLOAT\", {\"default\": 0.0, \"min\": 0.0, \"max\": 1.0, \"step\": 0.001}, ),\n \"max_base_multiplier\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 1.0, \"step\": 0.001}, ),\n #\"lock_min\": (\"BOOLEAN\", {\"default\": False}, ),\n #\"lock_max\": (\"BOOLEAN\", {\"default\": False}, ),\n },\n }\n \n RETURN_TYPES = (\"CONTROL_NET_WEIGHTS\", \"TIMESTEP_KEYFRAME\",)\n RETURN_NAMES = WEIGHTS_RETURN_NAMES\n FUNCTION = \"load_weights\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/weights\"\n\n def load_weights(self, mask: Tensor, min_base_multiplier: float, max_base_multiplier: float, lock_min=False, lock_max=False):\n # normalize mask\n mask = mask.clone()\n x_min = 0.0 if lock_min else mask.min()\n x_max = 1.0 if lock_max else mask.max()\n if x_min == x_max:\n mask = torch.ones_like(mask) * max_base_multiplier\n else:\n mask = linear_conversion(mask, x_min, x_max, min_base_multiplier, max_base_multiplier)\n weights = ControlWeightsImport.universal_mask(weight_mask=mask)\n return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights)))"
},
{
"identifier": "ScaledSoftUniversalWeightsImport",
"path": "imports/AdvancedControlNet/weight_nodes.py",
"snippet": "class ScaledSoftUniversalWeightsImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"base_multiplier\": (\"FLOAT\", {\"default\": 0.825, \"min\": 0.0, \"max\": 1.0, \"step\": 0.001}, ),\n \"flip_weights\": (\"BOOLEAN\", {\"default\": False}),\n },\n }\n \n RETURN_TYPES = (\"CONTROL_NET_WEIGHTS\", \"TIMESTEP_KEYFRAME\",)\n RETURN_NAMES = WEIGHTS_RETURN_NAMES\n FUNCTION = \"load_weights\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/weights\"\n\n def load_weights(self, base_multiplier, flip_weights):\n weights = ControlWeightsImport.universal(base_multiplier=base_multiplier, flip_weights=flip_weights)\n return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights))) "
},
{
"identifier": "SoftControlNetWeightsImport",
"path": "imports/AdvancedControlNet/weight_nodes.py",
"snippet": "class SoftControlNetWeightsImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"weight_00\": (\"FLOAT\", {\"default\": 0.09941396206337118, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_01\": (\"FLOAT\", {\"default\": 0.12050177219802567, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_02\": (\"FLOAT\", {\"default\": 0.14606275417942507, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_03\": (\"FLOAT\", {\"default\": 0.17704576264172736, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_04\": (\"FLOAT\", {\"default\": 0.214600924414215, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_05\": (\"FLOAT\", {\"default\": 0.26012233262329093, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_06\": (\"FLOAT\", {\"default\": 0.3152997971191405, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_07\": (\"FLOAT\", {\"default\": 0.3821815722656249, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_08\": (\"FLOAT\", {\"default\": 0.4632503906249999, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_09\": (\"FLOAT\", {\"default\": 0.561515625, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_10\": (\"FLOAT\", {\"default\": 0.6806249999999999, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_11\": (\"FLOAT\", {\"default\": 0.825, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_12\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"flip_weights\": (\"BOOLEAN\", {\"default\": False}),\n },\n }\n \n RETURN_TYPES = (\"CONTROL_NET_WEIGHTS\", \"TIMESTEP_KEYFRAME\",)\n RETURN_NAMES = WEIGHTS_RETURN_NAMES\n FUNCTION = \"load_weights\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/weights/ControlNet\"\n\n def load_weights(self, weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, \n weight_07, weight_08, weight_09, weight_10, weight_11, weight_12, flip_weights):\n weights = [weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, \n weight_07, weight_08, weight_09, weight_10, weight_11, weight_12]\n weights = ControlWeightsImport.controlnet(weights, flip_weights=flip_weights)\n return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights)))"
},
{
"identifier": "CustomControlNetWeightsImport",
"path": "imports/AdvancedControlNet/weight_nodes.py",
"snippet": "class CustomControlNetWeightsImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"weight_00\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_01\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_02\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_03\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_04\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_05\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_06\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_07\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_08\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_09\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_10\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_11\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_12\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"flip_weights\": (\"BOOLEAN\", {\"default\": False}),\n }\n }\n \n RETURN_TYPES = (\"CONTROL_NET_WEIGHTS\", \"TIMESTEP_KEYFRAME\",)\n RETURN_NAMES = WEIGHTS_RETURN_NAMES\n FUNCTION = \"load_weights\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/weights/ControlNet\"\n\n def load_weights(self, weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, \n weight_07, weight_08, weight_09, weight_10, weight_11, weight_12, flip_weights):\n weights = [weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, \n weight_07, weight_08, weight_09, weight_10, weight_11, weight_12]\n weights = ControlWeightsImport.controlnet(weights, flip_weights=flip_weights)\n return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights)))"
},
{
"identifier": "SoftT2IAdapterWeightsImport",
"path": "imports/AdvancedControlNet/weight_nodes.py",
"snippet": "class SoftT2IAdapterWeightsImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"weight_00\": (\"FLOAT\", {\"default\": 0.25, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_01\": (\"FLOAT\", {\"default\": 0.62, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_02\": (\"FLOAT\", {\"default\": 0.825, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_03\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"flip_weights\": (\"BOOLEAN\", {\"default\": False}),\n },\n }\n \n RETURN_TYPES = (\"CONTROL_NET_WEIGHTS\", \"TIMESTEP_KEYFRAME\",)\n RETURN_NAMES = WEIGHTS_RETURN_NAMES\n FUNCTION = \"load_weights\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/weights/T2IAdapter\"\n\n def load_weights(self, weight_00, weight_01, weight_02, weight_03, flip_weights):\n weights = [weight_00, weight_01, weight_02, weight_03]\n weights = get_properly_arranged_t2i_weights(weights)\n weights = ControlWeightsImport.t2iadapter(weights, flip_weights=flip_weights)\n return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights)))"
},
{
"identifier": "CustomT2IAdapterWeightsImport",
"path": "imports/AdvancedControlNet/weight_nodes.py",
"snippet": "class CustomT2IAdapterWeightsImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"weight_00\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_01\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_02\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_03\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"flip_weights\": (\"BOOLEAN\", {\"default\": False}),\n },\n }\n \n RETURN_TYPES = (\"CONTROL_NET_WEIGHTS\", \"TIMESTEP_KEYFRAME\",)\n RETURN_NAMES = WEIGHTS_RETURN_NAMES\n FUNCTION = \"load_weights\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/weights/T2IAdapter\"\n\n def load_weights(self, weight_00, weight_01, weight_02, weight_03, flip_weights):\n weights = [weight_00, weight_01, weight_02, weight_03]\n weights = get_properly_arranged_t2i_weights(weights)\n weights = ControlWeightsImport.t2iadapter(weights, flip_weights=flip_weights)\n return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights)))"
},
{
"identifier": "LatentKeyframeGroupNodeImport",
"path": "imports/AdvancedControlNet/latent_keyframe_nodes.py",
"snippet": "class LatentKeyframeGroupNodeImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"index_strengths\": (\"STRING\", {\"multiline\": True, \"default\": \"\"}),\n },\n \"optional\": {\n \"prev_latent_kf\": (\"LATENT_KEYFRAME\", ),\n \"latent_optional\": (\"LATENT\", ),\n \"print_keyframes\": (\"BOOLEAN\", {\"default\": False})\n }\n }\n \n RETURN_NAMES = (\"LATENT_KF\", )\n RETURN_TYPES = (\"LATENT_KEYFRAME\", )\n FUNCTION = \"load_keyframes\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/keyframes\"\n\n def validate_index(self, index: int, latent_count: int = 0, is_range: bool = False, allow_negative = False) -> int:\n # if part of range, do nothing\n if is_range:\n return index\n # otherwise, validate index\n # validate not out of range - only when latent_count is passed in\n if latent_count > 0 and index > latent_count-1:\n raise IndexError(f\"Index '{index}' out of range for the total {latent_count} latents.\")\n # if negative, validate not out of range\n if index < 0:\n if not allow_negative:\n raise IndexError(f\"Negative indeces not allowed, but was {index}.\")\n conv_index = latent_count+index\n if conv_index < 0:\n raise IndexError(f\"Index '{index}', converted to '{conv_index}' out of range for the total {latent_count} latents.\")\n index = conv_index\n return index\n\n def convert_to_index_int(self, raw_index: str, latent_count: int = 0, is_range: bool = False, allow_negative = False) -> int:\n try:\n return self.validate_index(int(raw_index), latent_count=latent_count, is_range=is_range, allow_negative=allow_negative)\n except ValueError as e:\n raise ValueError(f\"index '{raw_index}' must be an integer.\", e)\n\n def convert_to_latent_keyframes(self, latent_indeces: str, latent_count: int) -> set[LatentKeyframeImport]:\n if not latent_indeces:\n return set()\n int_latent_indeces = [i for i in range(0, latent_count)]\n allow_negative = latent_count > 0\n chosen_indeces = set()\n # parse string - allow positive ints, negative ints, and ranges separated by ':'\n groups = latent_indeces.split(\",\")\n groups = [g.strip() for g in groups]\n for g in groups:\n # parse strengths - default to 1.0 if no strength given\n strength = 1.0\n if '=' in g:\n g, strength_str = g.split(\"=\", 1)\n g = g.strip()\n try:\n strength = float(strength_str.strip())\n except ValueError as e:\n raise ValueError(f\"strength '{strength_str}' must be a float.\", e)\n if strength < 0:\n raise ValueError(f\"Strength '{strength}' cannot be negative.\")\n # parse range of indeces (e.g. 2:16)\n if ':' in g:\n index_range = g.split(\":\", 1)\n index_range = [r.strip() for r in index_range]\n start_index = self.convert_to_index_int(index_range[0], latent_count=latent_count, is_range=True, allow_negative=allow_negative)\n end_index = self.convert_to_index_int(index_range[1], latent_count=latent_count, is_range=True, allow_negative=allow_negative)\n # if latents were passed in, base indeces on known latent count\n if len(int_latent_indeces) > 0:\n for i in int_latent_indeces[start_index:end_index]:\n chosen_indeces.add(LatentKeyframeImport(i, strength))\n # otherwise, assume indeces are valid\n else:\n for i in range(start_index, end_index):\n chosen_indeces.add(LatentKeyframeImport(i, strength))\n # parse individual indeces\n else:\n chosen_indeces.add(LatentKeyframeImport(self.convert_to_index_int(g, latent_count=latent_count, allow_negative=allow_negative), strength))\n return chosen_indeces\n\n def load_keyframes(self,\n index_strengths: str,\n prev_latent_kf: LatentKeyframeGroupImport=None,\n prev_latent_keyframe: LatentKeyframeGroupImport=None, # old name\n latent_image_opt=None,\n print_keyframes=False):\n prev_latent_keyframe = prev_latent_keyframe if prev_latent_keyframe else prev_latent_kf\n if not prev_latent_keyframe:\n prev_latent_keyframe = LatentKeyframeGroupImport()\n else:\n prev_latent_keyframe = prev_latent_keyframe.clone()\n curr_latent_keyframe = LatentKeyframeGroupImport()\n\n latent_count = -1\n if latent_image_opt:\n latent_count = latent_image_opt['samples'].size()[0]\n latent_keyframes = self.convert_to_latent_keyframes(index_strengths, latent_count=latent_count)\n\n for latent_keyframe in latent_keyframes:\n curr_latent_keyframe.add(latent_keyframe)\n \n if print_keyframes:\n for keyframe in curr_latent_keyframe.keyframes:\n logger.info(f\"keyframe {keyframe.batch_index}:{keyframe.strength}\")\n\n # replace values with prev_latent_keyframes\n for latent_keyframe in prev_latent_keyframe.keyframes:\n curr_latent_keyframe.add(latent_keyframe)\n\n return (curr_latent_keyframe,)"
},
{
"identifier": "LatentKeyframeInterpolationNodeImport",
"path": "imports/AdvancedControlNet/latent_keyframe_nodes.py",
"snippet": "class LatentKeyframeInterpolationNodeImport:\n \n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"batch_index_from\": (\"INT\", {\"default\": 0, \"min\": -10000, \"max\": 10000, \"step\": 1}),\n \"batch_index_to_excl\": (\"INT\", {\"default\": 0, \"min\": -10000, \"max\": 10000, \"step\": 1}),\n \"strength_from\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.0001}, ),\n \"strength_to\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.0001}, ),\n \"interpolation\": ([\"linear\", \"ease-in\", \"ease-out\", \"ease-in-out\"], ),\n \"revert_direction_at_midpoint\": (\"BOOLEAN\", {\"default\": False}),\n },\n \"optional\": {\n \"prev_latent_keyframe\": (\"LATENT_KEYFRAME\", ),\n }\n }\n\n RETURN_TYPES = (\"LATENT_KEYFRAME\", )\n FUNCTION = \"load_keyframe\"\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/keyframes\"\n\n def load_keyframe(self,\n batch_index_from: int,\n strength_from: float,\n batch_index_to_excl: int,\n strength_to: float,\n interpolation: str,\n revert_direction_at_midpoint: bool=False,\n last_key_frame_position: int=0,\n i=0,\n number_of_items=0,\n buffer=0,\n prev_latent_keyframe: LatentKeyframeGroupImport=None):\n\n\n\n if not prev_latent_keyframe:\n prev_latent_keyframe = LatentKeyframeGroupImport()\n else: \n prev_latent_keyframe = prev_latent_keyframe.clone()\n \n curr_latent_keyframe = LatentKeyframeGroupImport()\n\n weights, frame_numbers = calculate_weights(batch_index_from, batch_index_to_excl, strength_from, strength_to, interpolation, revert_direction_at_midpoint, last_key_frame_position,i,number_of_items, buffer)\n \n for i, frame_number in enumerate(frame_numbers):\n keyframe = LatentKeyframeImport(frame_number, float(weights[i])) \n curr_latent_keyframe.add(keyframe)\n\n for latent_keyframe in prev_latent_keyframe.keyframes:\n curr_latent_keyframe.add(latent_keyframe)\n\n\n return (weights, frame_numbers, curr_latent_keyframe,)"
},
{
"identifier": "LatentKeyframeBatchedGroupNodeImport",
"path": "imports/AdvancedControlNet/latent_keyframe_nodes.py",
"snippet": "class LatentKeyframeBatchedGroupNodeImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"float_strengths\": (\"FLOAT\", {\"default\": -1, \"min\": -1, \"step\": 0.001, \"forceInput\": True}),\n },\n \"optional\": {\n \"prev_latent_kf\": (\"LATENT_KEYFRAME\", ),\n \"print_keyframes\": (\"BOOLEAN\", {\"default\": False})\n }\n }\n\n RETURN_NAMES = (\"LATENT_KF\", )\n RETURN_TYPES = (\"LATENT_KEYFRAME\", )\n FUNCTION = \"load_keyframe\"\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/keyframes\"\n\n def load_keyframe(self, float_strengths: Union[float, list[float]],\n prev_latent_kf: LatentKeyframeGroupImport=None,\n prev_latent_keyframe: LatentKeyframeGroupImport=None, # old name\n print_keyframes=False):\n prev_latent_keyframe = prev_latent_keyframe if prev_latent_keyframe else prev_latent_kf\n if not prev_latent_keyframe:\n prev_latent_keyframe = LatentKeyframeGroupImport()\n else:\n prev_latent_keyframe = prev_latent_keyframe.clone()\n curr_latent_keyframe = LatentKeyframeGroupImport()\n\n # if received a normal float input, do nothing\n if type(float_strengths) in (float, int):\n logger.info(\"No batched float_strengths passed into Latent Keyframe Batch Group node; will not create any new keyframes.\")\n # if iterable, attempt to create LatentKeyframes with chosen strengths\n elif isinstance(float_strengths, Iterable):\n for idx, strength in enumerate(float_strengths):\n keyframe = LatentKeyframeImport(idx, strength)\n curr_latent_keyframe.add(keyframe)\n else:\n raise ValueError(f\"Expected strengths to be an iterable input, but was {type(float_strengths).__repr__}.\") \n\n if print_keyframes:\n for keyframe in curr_latent_keyframe.keyframes:\n logger.info(f\"keyframe {keyframe.batch_index}:{keyframe.strength}\")\n\n # replace values with prev_latent_keyframes\n for latent_keyframe in prev_latent_keyframe.keyframes:\n curr_latent_keyframe.add(latent_keyframe)\n\n return (curr_latent_keyframe,)"
},
{
"identifier": "LatentKeyframeNodeImport",
"path": "imports/AdvancedControlNet/latent_keyframe_nodes.py",
"snippet": "class LatentKeyframeNodeImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"batch_index\": (\"INT\", {\"default\": 0, \"min\": -1000, \"max\": 1000, \"step\": 1}),\n \"strength\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n },\n \"optional\": {\n \"prev_latent_kf\": (\"LATENT_KEYFRAME\", ),\n }\n }\n\n RETURN_NAMES = (\"LATENT_KF\", )\n RETURN_TYPES = (\"LATENT_KEYFRAME\", )\n FUNCTION = \"load_keyframe\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/keyframes\"\n\n def load_keyframe(self,\n batch_index: int,\n strength: float,\n prev_latent_kf: LatentKeyframeGroupImport=None,\n prev_latent_keyframe: LatentKeyframeGroupImport=None, # old name\n ):\n prev_latent_keyframe = prev_latent_keyframe if prev_latent_keyframe else prev_latent_kf\n if not prev_latent_keyframe:\n prev_latent_keyframe = LatentKeyframeGroupImport()\n else:\n prev_latent_keyframe = prev_latent_keyframe.clone()\n keyframe = LatentKeyframeImport(batch_index, strength)\n prev_latent_keyframe.add(keyframe)\n return (prev_latent_keyframe,)"
},
{
"identifier": "logger",
"path": "imports/AdvancedControlNet/logger.py",
"snippet": "class ColoredFormatter(logging.Formatter):\n COLORS = {\n \"DEBUG\": \"\\033[0;36m\", # CYAN\n \"INFO\": \"\\033[0;32m\", # GREEN\n \"WARNING\": \"\\033[0;33m\", # YELLOW\n \"ERROR\": \"\\033[0;31m\", # RED\n \"CRITICAL\": \"\\033[0;37;41m\", # WHITE ON RED\n \"RESET\": \"\\033[0m\", # RESET COLOR\n }\n def format(self, record):"
}
] | import numpy as np
import folder_paths
from torch import Tensor
from .control import load_controlnet, convert_to_advanced, ControlWeightsImport, ControlWeightTypeImport,\
LatentKeyframeGroupImport, TimestepKeyframeImport, TimestepKeyframeGroupImport, is_advanced_controlnet
from .control import StrengthInterpolationImport as SI
from .weight_nodes import DefaultWeightsImport, ScaledSoftMaskedUniversalWeightsImport, ScaledSoftUniversalWeightsImport, SoftControlNetWeightsImport, CustomControlNetWeightsImport, \
SoftT2IAdapterWeightsImport, CustomT2IAdapterWeightsImport
from .latent_keyframe_nodes import LatentKeyframeGroupNodeImport, LatentKeyframeInterpolationNodeImport, LatentKeyframeBatchedGroupNodeImport, LatentKeyframeNodeImport
from .logger import logger | 9,431 |
class TimestepKeyframeNodeImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, ),
},
"optional": {
"prev_timestep_kf": ("TIMESTEP_KEYFRAME", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
"cn_weights": ("CONTROL_NET_WEIGHTS", ),
"latent_keyframe": ("LATENT_KEYFRAME", ),
"null_latent_kf_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
"inherit_missing": ("BOOLEAN", {"default": True}, ),
"guarantee_usage": ("BOOLEAN", {"default": True}, ),
"mask_optional": ("MASK", ),
#"interpolation": ([SI.LINEAR, SI.EASE_IN, SI.EASE_OUT, SI.EASE_IN_OUT, SI.NONE], {"default": SI.NONE}, ),
}
}
RETURN_NAMES = ("TIMESTEP_KF", )
RETURN_TYPES = ("TIMESTEP_KEYFRAME", )
FUNCTION = "load_keyframe"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes"
def load_keyframe(self,
start_percent: float,
strength: float=1.0,
cn_weights: ControlWeightsImport=None, control_net_weights: ControlWeightsImport=None, # old name
latent_keyframe: LatentKeyframeGroupImport=None,
prev_timestep_kf: TimestepKeyframeGroupImport=None, prev_timestep_keyframe: TimestepKeyframeGroupImport=None, # old name
null_latent_kf_strength: float=0.0,
inherit_missing=True,
guarantee_usage=True,
mask_optional=None,
interpolation: str=SI.NONE,):
control_net_weights = control_net_weights if control_net_weights else cn_weights
prev_timestep_keyframe = prev_timestep_keyframe if prev_timestep_keyframe else prev_timestep_kf
if not prev_timestep_keyframe:
prev_timestep_keyframe = TimestepKeyframeGroupImport()
else:
prev_timestep_keyframe = prev_timestep_keyframe.clone()
|
class TimestepKeyframeNodeImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, ),
},
"optional": {
"prev_timestep_kf": ("TIMESTEP_KEYFRAME", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
"cn_weights": ("CONTROL_NET_WEIGHTS", ),
"latent_keyframe": ("LATENT_KEYFRAME", ),
"null_latent_kf_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
"inherit_missing": ("BOOLEAN", {"default": True}, ),
"guarantee_usage": ("BOOLEAN", {"default": True}, ),
"mask_optional": ("MASK", ),
#"interpolation": ([SI.LINEAR, SI.EASE_IN, SI.EASE_OUT, SI.EASE_IN_OUT, SI.NONE], {"default": SI.NONE}, ),
}
}
RETURN_NAMES = ("TIMESTEP_KF", )
RETURN_TYPES = ("TIMESTEP_KEYFRAME", )
FUNCTION = "load_keyframe"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes"
def load_keyframe(self,
start_percent: float,
strength: float=1.0,
cn_weights: ControlWeightsImport=None, control_net_weights: ControlWeightsImport=None, # old name
latent_keyframe: LatentKeyframeGroupImport=None,
prev_timestep_kf: TimestepKeyframeGroupImport=None, prev_timestep_keyframe: TimestepKeyframeGroupImport=None, # old name
null_latent_kf_strength: float=0.0,
inherit_missing=True,
guarantee_usage=True,
mask_optional=None,
interpolation: str=SI.NONE,):
control_net_weights = control_net_weights if control_net_weights else cn_weights
prev_timestep_keyframe = prev_timestep_keyframe if prev_timestep_keyframe else prev_timestep_kf
if not prev_timestep_keyframe:
prev_timestep_keyframe = TimestepKeyframeGroupImport()
else:
prev_timestep_keyframe = prev_timestep_keyframe.clone() | keyframe = TimestepKeyframeImport(start_percent=start_percent, strength=strength, interpolation=interpolation, null_latent_kf_strength=null_latent_kf_strength, | 5 | 2023-11-11 01:26:26+00:00 | 12k |
Zaloog/kanban-python | src/kanban_python/controls.py | [
{
"identifier": "cfg",
"path": "src/kanban_python/config.py",
"snippet": "class KanbanConfig:\n def __init__(self, path=CONFIG_FILE_PATH) -> None:\n def __repr__(self) -> str:\n def save(self):\n def config(self) -> configparser.ConfigParser:\n def active_board(self) -> str:\n def active_board(self, new_board):\n def kanban_boards(self) -> list:\n def kanban_boards_dict(self) -> dict:\n def kanban_boards_dict(self, board_name: str) -> dict:\n def active_board_path(self) -> str:\n def show_footer(self):\n def show_footer(self, visible):\n def col_min_width(self) -> int:\n def col_min_width(self, new_width: int) -> None:\n def kanban_columns_dict(self) -> dict:\n def kanban_columns_dict(self, updated_dict) -> dict:\n def vis_cols(self) -> list:\n def done_limit(self) -> int:\n def done_limit(self, new_limit: int) -> None:\n def scanned_files(self) -> list:\n def scanned_files(self, new_files_to_scan: str) -> None:\n def scanned_patterns(self) -> list:\n def scanned_patterns(self, new_patterns_to_scan: str) -> None:\ndef create_init_config(conf_path=CONFIG_PATH, data_path=DATA_PATH):\ndef delete_current_folder_board_from_config(\n cfg=cfg, curr_path: str = str(Path.cwd())\n) -> None:\ndef check_if_board_name_exists_in_config(boardname: str, cfg=cfg) -> bool:\ndef check_if_current_active_board_in_board_list(cfg=cfg) -> bool:\ndef delete_board_from_config(board_name, cfg=cfg) -> None:\ndef check_config_exists(path=CONFIG_FILE_PATH) -> bool:\ndef get_json_path(boardname: str):"
},
{
"identifier": "DUMMY_DB",
"path": "src/kanban_python/constants.py",
"snippet": "DUMMY_DB = {1: DUMMY_TASK}"
},
{
"identifier": "KANBAN_BOARDS_PATH",
"path": "src/kanban_python/constants.py",
"snippet": "KANBAN_BOARDS_PATH = DATA_PATH / KANBAN_BOARDS_FOLDER_NAME"
},
{
"identifier": "REPORT_FILE_NAME",
"path": "src/kanban_python/constants.py",
"snippet": "REPORT_FILE_NAME = \"pykanban.md\""
},
{
"identifier": "REPORT_FILE_PATH",
"path": "src/kanban_python/constants.py",
"snippet": "REPORT_FILE_PATH = DATA_PATH / REPORTS_FOLDER_NAME"
},
{
"identifier": "TASK_FILE_NAME",
"path": "src/kanban_python/constants.py",
"snippet": "TASK_FILE_NAME = \"pykanban.json\""
},
{
"identifier": "create_config_table",
"path": "src/kanban_python/interface.py",
"snippet": "def create_config_table():\n settings_table = Table(\n title=\":hammer_and_wrench: [grey69]Settings Overview[/]:hammer_and_wrench:\",\n highlight=True,\n show_header=True,\n caption=f\"Your config file is located under [light_green]{CONFIG_FILE_PATH}[/]\",\n )\n for col in [\"Option\", \"Current Value\"]:\n settings_table.add_column(\n header=col,\n header_style=\"bold\",\n justify=\"left\",\n overflow=\"fold\",\n min_width=30,\n )\n for section in cfg.config:\n if section:\n settings_table.add_section()\n settings_table.add_row(f\"[blue]{section}[/]\", \"\")\n for key, val in cfg.config[section].items():\n settings_table.add_row(key, val)\n\n return settings_table"
},
{
"identifier": "create_github_like_report_table",
"path": "src/kanban_python/interface.py",
"snippet": "def create_github_like_report_table(boards_dict: dict):\n done_tasks = []\n for _, task_dict in boards_dict.items():\n done_tasks += [task for _, task in task_dict.items() if task[\"Complete_Time\"]]\n\n max_val, report_dict = create_dict_for_report_view(done_tasks)\n current_year = datetime.now().year\n done_tasks_this_year = [\n task\n for task in done_tasks\n if datetime.strptime(task[\"Complete_Time\"], \"%Y-%m-%d %H:%M:%S\").year\n == current_year\n ]\n\n gh_table = Table(\n title=f\"[{REPORT_COLORS[4]}]{len(done_tasks_this_year)}[/] Tasks completed\"\n + f\" in [{REPORT_COLORS[4]}]{current_year}[/]\",\n title_justify=\"left\",\n highlight=True,\n padding=False,\n show_header=True,\n box=None,\n caption=\"\\nless\"\n + \" \".join([f\"[{scale} on {scale}] [/] \" for scale in REPORT_COLORS])\n + \" more\",\n caption_justify=\"right\",\n )\n for work_week in range(0, 53):\n gh_table.add_column(\n header=\"\" if (work_week % 5 or work_week == 0) else f\"{work_week}\",\n header_style=\"bold\",\n justify=\"left\",\n overflow=\"fold\",\n )\n\n for day in range(1, 8):\n day_name = calendar.day_abbr[day - 1] if day % 2 else \"\"\n day_row_vals = [report_dict[day].get(week, 0) for week in range(1, 53)]\n mapped_day_row_vals = create_color_mapping(day_row_vals, max_val=max_val)\n\n gh_table.add_row(\n day_name,\n *[\n f\"[{REPORT_COLORS[i]} on {REPORT_COLORS[i]}] [/]\"\n for i in mapped_day_row_vals\n ],\n )\n\n return gh_table"
},
{
"identifier": "create_table",
"path": "src/kanban_python/interface.py",
"snippet": "def create_table(data: dict) -> Table:\n status_dict = create_status_dict_for_rows(data=data, vis_cols=cfg.vis_cols)\n\n table_name = cfg.active_board\n table = Table(\n title=f\"[blue]Active Board: {table_name}[/]\",\n highlight=True,\n show_header=True,\n show_footer=True if cfg.show_footer == \"True\" else False,\n caption=BOARD_CAPTION_STRING,\n )\n\n for i, category in enumerate([COLOR_DICT.get(col, col) for col in cfg.vis_cols]):\n table.add_column(\n header=category + f\"\\t({len(status_dict[cfg.vis_cols[i]])} Task/s)\",\n header_style=\"bold\",\n justify=\"left\",\n overflow=\"fold\",\n footer=FOOTER[0]\n if i == 0\n else FOOTER[1]\n if i == len(cfg.vis_cols) - 1\n else \"\",\n min_width=cfg.col_min_width,\n )\n\n for row_tasks in zip_longest(*status_dict.values()):\n table.add_row(*row_tasks)\n\n return table"
},
{
"identifier": "input_ask_for_action",
"path": "src/kanban_python/interface.py",
"snippet": "def input_ask_for_action():\n console.print(\n \"[yellow]Whats up!?[/], how can I help you being productive today :rocket:?\"\n )\n console.print(\n \"\\t[1] :clipboard: [green]Create new Task[/]\"\n + 2 * \"\\t\"\n + \"[2] :clockwise_vertical_arrows: [bold cornflower_blue]Update/Check Task[/]\"\n )\n console.print(\n \"\\t[3] :bookmark_tabs: [bold yellow]Change Kanban Board[/]\"\n + \"\\t\"\n + \"[4] :magnifying_glass_tilted_left: [bold blue]Show Task Details[/]\"\n )\n console.print(\n \"\\t[5] :cross_mark: [red]Delete Kanban Board[/]\"\n + \"\\t\"\n + \"[6] :hammer_and_wrench: [grey69]Show Current Settings[/]\"\n )\n action = IntPrompt.ask(\n prompt=\"Choose wisely :books:\",\n choices=[\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n ],\n show_choices=False,\n )\n return action"
},
{
"identifier": "input_ask_for_action_settings",
"path": "src/kanban_python/interface.py",
"snippet": "def input_ask_for_action_settings() -> int:\n console.print(\n \"[yellow]Not happy with current settings!?[/],\"\n + \"which [blue]Section[/] do you want to change :hammer_and_wrench:?\"\n )\n console.print(\n \"\\t[1] :clipboard: [blue]settings.general[/]\"\n + 2 * \"\\t\"\n + \"[2] :eye: [blue]settings.columns.visibility[/]\"\n )\n console.print(\n \"\\t[3] :magnifying_glass_tilted_left: [blue]settings.scanner[/]\"\n + 2 * \"\\t\"\n + \"[4] :cross_mark: [red]Go back to Kanban Board[/]\"\n )\n action = IntPrompt.ask(\n prompt=\"Choose [blue]Section[/], where you want to change the Current Value\",\n choices=[\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n ],\n show_choices=False,\n )\n return action"
},
{
"identifier": "input_ask_for_change_board",
"path": "src/kanban_python/interface.py",
"snippet": "def input_ask_for_change_board(boards_dict: dict) -> str:\n boards = cfg.kanban_boards\n max_board_len = max([len(b) for b in cfg.kanban_boards])\n\n # if active Board is not in Board List dont show default\n try:\n active_board_idx = boards.index(cfg.active_board) + 1\n except ValueError:\n active_board_idx = None\n\n for idx, (board, board_data) in enumerate(boards_dict.items(), start=1):\n status_dict = create_status_dict_for_rows(board_data, cfg.vis_cols)\n days_left_list = [\n calculate_days_left_till_due(val[\"Due_Date\"])\n for val in board_data.values()\n if (val.get(\"Due_Date\") and (val[\"Status\"] in [\"Ready\", \"Doing\"]))\n ]\n # Use -9999 to as placeholder for no tasks to make comparison later\n days_left = min(days_left_list) if days_left_list else -9999\n console.print(\n f\"[{idx}] {board}\"\n + \" \" * ((max_board_len - len(board) + 1))\n + \" | \".join(\n [\n f\"{COLOR_DICT[col]}: {len(status_dict[col]):02d}\"\n for col in cfg.vis_cols\n ]\n )\n + (\n f\"\\t next due in {days_left} day/s\"\n if days_left > 0\n else f\"[red]\\t task {-days_left} day/s overdue[/]\"\n if days_left != -9999\n else \"\\t no dues present here\"\n )\n )\n\n answer = IntPrompt.ask(\n prompt=\"Which board to activate\",\n choices=[f\"{i}\" for i, _ in enumerate(boards, start=1)],\n show_choices=False,\n default=active_board_idx,\n show_default=True,\n )\n return boards[int(answer) - 1]"
},
{
"identifier": "input_ask_for_delete_board",
"path": "src/kanban_python/interface.py",
"snippet": "def input_ask_for_delete_board() -> str:\n boards = [b for b in cfg.kanban_boards]\n for idx, board in enumerate(boards, start=1):\n console.print(f\"[{idx}] {board}\")\n\n answer = IntPrompt.ask(\n prompt=\"Which board to delete\",\n choices=[f\"{i}\" for i, _ in enumerate(boards, start=1)],\n show_choices=False,\n )\n return boards[int(answer) - 1]"
},
{
"identifier": "input_ask_for_new_board_name",
"path": "src/kanban_python/interface.py",
"snippet": "def input_ask_for_new_board_name() -> str:\n return Prompt.ask(\n prompt=\"A new folder will be created for your board\\n\"\n + \":warning: [yellow]Only[/] use alpha-numeric characters or\"\n + \" [green]'-', '_', ' '[/] for new board names.\\n\"\n + \"What should the new board be called?\"\n )"
},
{
"identifier": "input_ask_which_task_to_update",
"path": "src/kanban_python/interface.py",
"snippet": "def input_ask_which_task_to_update(data: dict) -> str:\n choice_task_ids = [\n id for id, task in data.items() if task[\"Status\"] in cfg.vis_cols\n ]\n task_id_to_update = IntPrompt.ask(\n prompt=\"Which Task to update? Select an [[cyan]Id[/]]\",\n choices=choice_task_ids,\n show_choices=False,\n )\n return str(task_id_to_update)"
},
{
"identifier": "input_ask_which_tasks_to_show",
"path": "src/kanban_python/interface.py",
"snippet": "def input_ask_which_tasks_to_show(choices):\n return Prompt.ask(\n prompt=\"What Task/s to show? Select an [[cyan]Id[/]] or ([orange3]Tag[/])?\",\n default=False,\n show_default=False,\n choices=choices,\n show_choices=False,\n )"
},
{
"identifier": "input_change_column_settings",
"path": "src/kanban_python/interface.py",
"snippet": "def input_change_column_settings():\n updated_column_dict = {}\n for col, vis in cfg.kanban_columns_dict.items():\n new_visible = Confirm.ask(\n prompt=f\"Should Column {COLOR_DICT.get(col,col)} be visible?\",\n default=True if vis == \"True\" else False,\n show_default=True,\n )\n updated_column_dict[col] = \"True\" if new_visible else \"False\"\n\n return updated_column_dict"
},
{
"identifier": "input_change_done_limit_settings",
"path": "src/kanban_python/interface.py",
"snippet": "def input_change_done_limit_settings() -> int:\n done_limit = IntPrompt.ask(\n prompt=f\"What should the Limit of Tasks in {COLOR_DICT.get('Done','Done')} \"\n + f\"Column be, before moving to {COLOR_DICT.get('Archived','Archived')}?\",\n default=cfg.done_limit,\n show_default=True,\n )\n\n return str(done_limit)"
},
{
"identifier": "input_change_files_to_scan_settings",
"path": "src/kanban_python/interface.py",
"snippet": "def input_change_files_to_scan_settings():\n files_to_scan = Prompt.ask(\n prompt=\"Which Files to scan? Enter [green]' '[/] separated File Endings\",\n default=\" \".join(cfg.scanned_files),\n show_default=True,\n )\n\n return files_to_scan"
},
{
"identifier": "input_change_footer_settings",
"path": "src/kanban_python/interface.py",
"snippet": "def input_change_footer_settings():\n footer_visible = Confirm.ask(\n prompt=\"Should Footer be visible?\",\n default=True if cfg.show_footer == \"True\" else False,\n show_default=True,\n )\n\n return footer_visible"
},
{
"identifier": "input_change_min_col_width_settings",
"path": "src/kanban_python/interface.py",
"snippet": "def input_change_min_col_width_settings():\n new_min_col_width = IntPrompt.ask(\n prompt=\"What should the minimum Column Width be?\",\n default=cfg.col_min_width,\n show_default=True,\n )\n\n return new_min_col_width"
},
{
"identifier": "input_change_patterns_to_scan_settings",
"path": "src/kanban_python/interface.py",
"snippet": "def input_change_patterns_to_scan_settings():\n files_to_scan = Prompt.ask(\n prompt=\"Which Patterns to scan? Enter [green]','[/] separated Patterns\",\n default=\",\".join(cfg.scanned_patterns),\n show_default=True,\n )\n\n return files_to_scan"
},
{
"identifier": "input_confirm_add_todos_to_board",
"path": "src/kanban_python/interface.py",
"snippet": "def input_confirm_add_todos_to_board(todos: list) -> bool:\n # Question Also print tasks already in Board?\n console.print(f\"Found [blue]{len(todos)}[/] TODOs.\")\n if len(todos) > 10:\n if input_confirm_show_all_todos():\n print_all_todos(todos)\n else:\n print_all_todos(todos)\n\n return Confirm.ask(\n prompt=\"Add found Tasks to active board?\", default=False, show_default=True\n )"
},
{
"identifier": "input_confirm_delete_board",
"path": "src/kanban_python/interface.py",
"snippet": "def input_confirm_delete_board(name) -> bool:\n return Confirm.ask(\n f\"Are you sure you want to delete the Board '{name}':question_mark:\"\n )"
},
{
"identifier": "input_confirm_set_board_active",
"path": "src/kanban_python/interface.py",
"snippet": "def input_confirm_set_board_active(name) -> bool:\n return Confirm.ask(\n f\"Do you want to set the Board '{name}' as active:question_mark:\"\n )"
},
{
"identifier": "input_create_new_task",
"path": "src/kanban_python/interface.py",
"snippet": "def input_create_new_task() -> dict:\n title = Prompt.ask(\n prompt=\"[1/5] Add Task Title\",\n )\n\n description = Prompt.ask(\n prompt=\"[2/5] Add Task Description\",\n show_default=True,\n default=\"\",\n )\n\n tag = Prompt.ask(\n prompt=\"[3/5] Add a Tag\",\n show_default=True,\n default=\"ETC\",\n )\n\n while True:\n due_date = Prompt.ask(\n prompt=\"[4/5] Add a Due Date (YYYY-MM-DD)\",\n show_default=True,\n default=\"\",\n )\n if not due_date or check_due_date_format(date_str=due_date):\n break\n else:\n console.print(\n f\":warning: '{due_date}' has [red]not[/] \"\n + \"the right format YYYY-MM-DD\"\n )\n\n console.print(f\"\\t[1] {COLOR_DICT['Ready']}\")\n console.print(f\"\\t[2] {COLOR_DICT['Doing']}\")\n\n status = IntPrompt.ask(\n prompt=\"[5/5] Status of Task\",\n show_choices=False,\n choices=[\"1\", \"2\"],\n show_default=True,\n default=\"1\",\n )\n\n new_task = {\n \"Title\": title,\n \"Description\": description,\n \"Status\": \"Ready\" if str(status) == \"1\" else \"Doing\",\n \"Tag\": tag.upper(),\n \"Creation_Date\": current_time_to_str(),\n \"Due_Date\": due_date_date_to_datetime(due_date),\n \"Begin_Time\": current_time_to_str() if str(status) == \"2\" else \"\",\n \"Complete_Time\": \"\",\n \"Duration\": 0,\n }\n return new_task"
},
{
"identifier": "input_update_task",
"path": "src/kanban_python/interface.py",
"snippet": "def input_update_task(current_task: dict) -> dict:\n title = input_update_task_title(current_task[\"Title\"])\n description = input_update_task_description(current_task[\"Description\"])\n tag = input_update_task_tag(current_task[\"Tag\"])\n due_date = input_update_due_date(current_task.get(\"Due_Date\", \"\"))\n status = input_ask_to_what_status_to_move(current_task[\"Title\"])\n\n if (status == \"Doing\") and (current_task[\"Status\"] != \"Doing\"):\n start_doing = current_time_to_str()\n stop_doing = current_task.get(\"Complete_Time\", \"\")\n duration = current_task.get(\"Duration\", 0)\n elif (status != \"Doing\") and (current_task[\"Status\"] == \"Doing\"):\n start_doing = current_task.get(\"Begin_Time\", \"\")\n stop_doing = current_time_to_str()\n duration = calculate_time_delta_str(\n start_time_str=current_task.get(\"Begin_Time\", \"\"), end_time_str=stop_doing\n ) + current_task.get(\"Duration\", 0)\n else:\n start_doing = current_task.get(\"Begin_Time\", \"\")\n stop_doing = current_task.get(\"Complete_Time\", \"\")\n duration = current_task.get(\"Duration\", 0)\n\n if status == \"Done\":\n stop_doing = current_time_to_str()\n console.print(\n f\":sparkle: Congrats, you just completed '{title}'\"\n + f\" after {duration} minutes :muscle:\"\n )\n\n updated_task = {\n \"Title\": title,\n \"Description\": description,\n \"Status\": status,\n \"Tag\": tag.upper(),\n \"Due_Date\": due_date,\n \"Begin_Time\": start_doing,\n \"Complete_Time\": stop_doing,\n \"Duration\": duration,\n }\n current_task.update(updated_task)\n return current_task"
},
{
"identifier": "check_board_name_valid",
"path": "src/kanban_python/utils.py",
"snippet": "def get_motivational_quote() -> str:\ndef current_time_to_str() -> str:\ndef calculate_time_delta_str(start_time_str: str, end_time_str: str) -> float:\ndef create_status_dict_for_rows(data: dict, vis_cols: list) -> dict:\ndef check_if_done_col_leq_X(cfg, data: dict) -> bool:\ndef check_if_there_are_visible_tasks_in_board(data: dict, vis_cols: list) -> bool:\ndef move_first_done_task_to_archive(data: dict):\ndef delete_json_file(db_path: str) -> None:\ndef check_board_name_valid(boardname: str):\ndef scan_files(path=Path.cwd(), endings: list = [\".py\"]):\n def recursive_search(path, file_list: list, progress):\ndef scan_for_todos(\n file_paths: list, rel_path=Path.cwd(), patterns: list = [\"#TODO\", \"# TODO\"]\n) -> list:\ndef split_todo_in_tag_and_title(todo: str, patterns: list):\ndef get_tag_id_choices(data_dict: dict, vis_cols: list) -> list:\ndef check_scanner_files_valid(files: str) -> bool:\ndef check_scanner_patterns_valid(patterns: str) -> bool:\ndef get_iso_calender_info(date_str: str):\ndef create_dict_for_report_view(completed_tasks: list):\ndef create_color_mapping(amount_list: list, max_val: int):\ndef create_report_document(boards_dict: dict):\ndef check_due_date_format(date_str: str) -> bool:\ndef due_date_datetime_to_date(date_datetime: str) -> str:\ndef due_date_date_to_datetime(date_str: str) -> str:\ndef calculate_days_left_till_due(due_date: str):"
}
] | from json import dump, load
from rich.pretty import pprint
from .config import (
cfg,
check_if_board_name_exists_in_config,
check_if_current_active_board_in_board_list,
delete_board_from_config,
get_json_path,
)
from .constants import (
DUMMY_DB,
KANBAN_BOARDS_PATH,
REPORT_FILE_NAME,
REPORT_FILE_PATH,
TASK_FILE_NAME,
)
from .interface import (
create_config_table,
create_github_like_report_table,
create_table,
input_ask_for_action,
input_ask_for_action_settings,
input_ask_for_change_board,
input_ask_for_delete_board,
input_ask_for_new_board_name,
input_ask_which_task_to_update,
input_ask_which_tasks_to_show,
input_change_column_settings,
input_change_done_limit_settings,
input_change_files_to_scan_settings,
input_change_footer_settings,
input_change_min_col_width_settings,
input_change_patterns_to_scan_settings,
input_confirm_add_todos_to_board,
input_confirm_delete_board,
input_confirm_set_board_active,
input_create_new_task,
input_update_task,
)
from .utils import (
check_board_name_valid,
check_if_done_col_leq_X,
check_if_there_are_visible_tasks_in_board,
check_scanner_files_valid,
check_scanner_patterns_valid,
console,
create_report_document,
current_time_to_str,
delete_json_file,
get_tag_id_choices,
move_first_done_task_to_archive,
scan_files,
scan_for_todos,
split_todo_in_tag_and_title,
) | 7,223 | # Action 5
def delete_kanban_board():
board_to_delete = input_ask_for_delete_board()
if input_confirm_delete_board(board_to_delete):
board_to_delete_path = cfg.kanban_boards_dict[board_to_delete]
delete_json_file(board_to_delete_path)
delete_board_from_config(board_to_delete)
def show():
if not cfg.kanban_boards:
console.print(":warning: [red]No Boards created yet[/]:warning:")
console.print("Use 'kanban init' to create a new kanban board.")
raise KeyboardInterrupt
if not check_if_current_active_board_in_board_list():
console.print(
"[yellow]Hmm, Something went wrong.[/] "
+ f"The active board '{cfg.active_board}' is not in the list of boards."
)
change_kanban_board()
show()
return
db_data = read_db()
table = create_table(data=db_data)
console.print(table)
# Scan Functionality
#####################################################################################
def add_todos_to_board():
files = scan_files(endings=cfg.scanned_files)
todos = scan_for_todos(file_paths=files, patterns=cfg.scanned_patterns)
if not todos:
console.print(
":cross_mark: [red]Nothing found that "
+ "matches any of your provided patterns.[/]"
)
return
# TODO Write Docs for kanban scan functionality
# BUG This pattern also works
if input_confirm_add_todos_to_board(todos=todos):
todo_task_list = []
for task, file in todos:
tag, title = split_todo_in_tag_and_title(task, cfg.scanned_patterns)
new_task = {
"Title": title,
"Description": f"from {file}",
"Status": "Ready",
"Tag": tag,
"Creation_Date": current_time_to_str(),
"Begin_Time": "",
"Complete_Time": "",
"Duration": 0,
}
todo_task_list.append(new_task)
add_tasks_to_db(tasks=todo_task_list)
# Config Settings
#####################################################################################
def change_settings():
while True:
show_settings()
settings_selection = input_ask_for_action_settings()
if settings_selection == 1:
change_kanban_board()
new_min_col_widths = input_change_min_col_width_settings()
cfg.col_min_width = new_min_col_widths
done_limit = input_change_done_limit_settings()
cfg.done_limit = done_limit
footer_visible = input_change_footer_settings()
cfg.show_footer = "True" if footer_visible else "False"
if settings_selection == 2:
updated_col_config = input_change_column_settings()
cfg.kanban_columns_dict = updated_col_config
if settings_selection == 3:
while True:
new_files_to_scan = input_change_files_to_scan_settings()
if check_scanner_files_valid(new_files_to_scan):
cfg.scanned_files = new_files_to_scan
break
console.print(
f":warning: '{new_files_to_scan}' is [red]not[/] a valid."
)
while True:
new_patterns_to_scan = input_change_patterns_to_scan_settings()
if check_scanner_patterns_valid(new_patterns_to_scan):
cfg.scanned_patterns = new_patterns_to_scan
break
console.print(
f":warning: '{new_patterns_to_scan}' is [red]not[/] a valid."
)
if settings_selection == 4:
break
def show_settings():
settings_table = create_config_table()
console.print(settings_table)
# Report Creation
#####################################################################################
def create_report():
boards_dict = read_db("all")
gh_table = create_github_like_report_table(boards_dict)
console.print(gh_table)
if not REPORT_FILE_PATH.exists():
REPORT_FILE_PATH.mkdir(exist_ok=True)
| from __future__ import annotations
# DB Controls
#####################################################################################
def create_new_db() -> None:
while True:
while True:
new_board_name = input_ask_for_new_board_name()
if check_board_name_valid(new_board_name):
break
console.print(f":warning: '{new_board_name}' is [red]not[/] a valid Name.")
if not check_if_board_name_exists_in_config(new_board_name):
break
console.print(
f":warning: Board '{new_board_name}' already exists, choose another Name."
)
cfg.kanban_boards_dict = new_board_name
# Options:
# 1. ~/.kanban-python/<BOARDNAME>.json
# 2. ~/.kanban-python/kanban_boards/<BOARDNAME>.json
# 3. ~/.kanban-python/kanban_boards/<BOARDNAME>/pykanban.json <- THIS
# 4. ~/.kanban-python/kanban_boards/<BOARDNAME>/<BOARDNAME>.json
new_db_path = KANBAN_BOARDS_PATH / new_board_name
if not new_db_path.exists():
new_db_path.mkdir()
with open(get_json_path(new_board_name), "w", encoding="utf-8") as f:
dump(DUMMY_DB, f, ensure_ascii=False, indent=4)
console.print(
f"Created new [orange3]{TASK_FILE_NAME}[/] file at "
+ f"[orange3]{KANBAN_BOARDS_PATH / new_board_name}[/] to save tasks."
)
if input_confirm_set_board_active(name=new_board_name):
cfg.active_board = new_board_name
def save_db(data):
path = cfg.active_board_path
with open(path, "w", encoding="utf-8") as f:
dump(data, f, ensure_ascii=False, indent=4)
def add_tasks_to_db(tasks: dict | list[dict]) -> None:
db_data = read_db()
if isinstance(tasks, dict):
new_id = str(max(int(i) for i in db_data.keys()) + 1)
db_data[new_id] = tasks
else:
for task in tasks:
new_id = str(max(int(i) for i in db_data.keys()) + 1)
db_data[new_id] = task
save_db(data=db_data)
def read_db(path: str = None) -> dict:
if not path:
path = cfg.active_board_path
if path == "all":
board_dict = {
b: read_single_board(b_path) for b, b_path in cfg.kanban_boards_dict.items()
}
return board_dict
try:
data = read_single_board(path)
return data
except FileNotFoundError:
print(path)
console.print(f":warning: No [orange3]{TASK_FILE_NAME}[/] file here anymore.")
console.print("Please change to another board.")
change_kanban_board()
console.print(f"[red]Seems like the previous {TASK_FILE_NAME} file was deleted[/]")
console.print(f"Create new [orange3]{TASK_FILE_NAME}[/] file here.")
create_new_db()
return read_db()
def read_single_board(path):
with open(path, "r") as file:
data = load(file)
return data
# User Action Controls
#####################################################################################
# Get User Action
def get_user_action():
return input_ask_for_action()
# Action 1
def add_new_task_to_db():
new_task = input_create_new_task()
add_tasks_to_db(tasks=new_task)
# Action 2
def update_task_from_db():
db_data = read_db()
if not check_if_there_are_visible_tasks_in_board(db_data, cfg.vis_cols):
console.print(":cross_mark:[red]No Tasks available on this Kanban board[/]")
return
selected_id = input_ask_which_task_to_update(db_data)
updated_task = input_update_task(current_task=db_data[selected_id])
db_data[selected_id] = updated_task
while not check_if_done_col_leq_X(cfg=cfg, data=db_data):
first_task_id, archive_task = move_first_done_task_to_archive(data=db_data)
db_data[first_task_id] = archive_task
save_db(data=db_data)
# Action 3
def change_kanban_board():
boards_dict = read_db(path="all")
new_active_board = input_ask_for_change_board(boards_dict)
cfg.active_board = new_active_board
# Action 4
def show_tasks():
db_data = read_db()
choices = get_tag_id_choices(db_data, cfg.vis_cols)
selection_criteria = input_ask_which_tasks_to_show(choices)
for i, task in db_data.items():
if selection_criteria in [i, task["Tag"]]:
console.print(
20 * "[bold blue]#[/]" + f" Task {i} " + 20 * "[bold blue]#[/]"
)
pprint(
{
key: val
for key, val in task.items()
if key in ["Title", "Description", "Tag", "Status", "Due_Date"]
},
console=console,
expand_all=True,
)
# Action 5
def delete_kanban_board():
board_to_delete = input_ask_for_delete_board()
if input_confirm_delete_board(board_to_delete):
board_to_delete_path = cfg.kanban_boards_dict[board_to_delete]
delete_json_file(board_to_delete_path)
delete_board_from_config(board_to_delete)
def show():
if not cfg.kanban_boards:
console.print(":warning: [red]No Boards created yet[/]:warning:")
console.print("Use 'kanban init' to create a new kanban board.")
raise KeyboardInterrupt
if not check_if_current_active_board_in_board_list():
console.print(
"[yellow]Hmm, Something went wrong.[/] "
+ f"The active board '{cfg.active_board}' is not in the list of boards."
)
change_kanban_board()
show()
return
db_data = read_db()
table = create_table(data=db_data)
console.print(table)
# Scan Functionality
#####################################################################################
def add_todos_to_board():
files = scan_files(endings=cfg.scanned_files)
todos = scan_for_todos(file_paths=files, patterns=cfg.scanned_patterns)
if not todos:
console.print(
":cross_mark: [red]Nothing found that "
+ "matches any of your provided patterns.[/]"
)
return
# TODO Write Docs for kanban scan functionality
# BUG This pattern also works
if input_confirm_add_todos_to_board(todos=todos):
todo_task_list = []
for task, file in todos:
tag, title = split_todo_in_tag_and_title(task, cfg.scanned_patterns)
new_task = {
"Title": title,
"Description": f"from {file}",
"Status": "Ready",
"Tag": tag,
"Creation_Date": current_time_to_str(),
"Begin_Time": "",
"Complete_Time": "",
"Duration": 0,
}
todo_task_list.append(new_task)
add_tasks_to_db(tasks=todo_task_list)
# Config Settings
#####################################################################################
def change_settings():
while True:
show_settings()
settings_selection = input_ask_for_action_settings()
if settings_selection == 1:
change_kanban_board()
new_min_col_widths = input_change_min_col_width_settings()
cfg.col_min_width = new_min_col_widths
done_limit = input_change_done_limit_settings()
cfg.done_limit = done_limit
footer_visible = input_change_footer_settings()
cfg.show_footer = "True" if footer_visible else "False"
if settings_selection == 2:
updated_col_config = input_change_column_settings()
cfg.kanban_columns_dict = updated_col_config
if settings_selection == 3:
while True:
new_files_to_scan = input_change_files_to_scan_settings()
if check_scanner_files_valid(new_files_to_scan):
cfg.scanned_files = new_files_to_scan
break
console.print(
f":warning: '{new_files_to_scan}' is [red]not[/] a valid."
)
while True:
new_patterns_to_scan = input_change_patterns_to_scan_settings()
if check_scanner_patterns_valid(new_patterns_to_scan):
cfg.scanned_patterns = new_patterns_to_scan
break
console.print(
f":warning: '{new_patterns_to_scan}' is [red]not[/] a valid."
)
if settings_selection == 4:
break
def show_settings():
settings_table = create_config_table()
console.print(settings_table)
# Report Creation
#####################################################################################
def create_report():
boards_dict = read_db("all")
gh_table = create_github_like_report_table(boards_dict)
console.print(gh_table)
if not REPORT_FILE_PATH.exists():
REPORT_FILE_PATH.mkdir(exist_ok=True) | create_report_document(boards_dict=boards_dict) | 27 | 2023-11-11 14:43:55+00:00 | 12k |
AMAAI-Lab/mustango | diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py | [
{
"identifier": "ConfigMixin",
"path": "diffusers/src/diffusers/configuration_utils.py",
"snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. Stores all configuration parameters under `self.config` Also handles all\n methods for loading/downloading/saving classes inheriting from [`ConfigMixin`] with\n - [`~ConfigMixin.from_config`]\n - [`~ConfigMixin.save_config`]\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the init function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(f\"Can't set {key} with value {value} for {self}\")\n raise err\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class will be instantiated. Make sure to only load\n configuration files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it being loaded) and initiate the Python class.\n `**kwargs` will be directly passed to the underlying scheduler/model's `__init__` method and eventually\n overwrite same named arguments of `config`.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Instantiate a Python class from a config dictionary\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an\n organization name, like `google/ddpm-celebahq-256`.\n - A path to a *directory* containing model weights saved using [`~ConfigMixin.save_config`], e.g.,\n `./my_model_directory/`.\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received files. Will attempt to resume the download if such a\n file exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (i.e., do not try to download the model).\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated\n when running `transformers-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n In case the relevant files are located inside a subfolder of the model repo (either remote in\n huggingface.co or downloaded locally), you can specify the folder name here.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config shall be returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the commit_hash of the loaded configuration shall be returned.\n\n <Tip>\n\n It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated\n models](https://huggingface.co/docs/hub/models-gated#gated-models).\n\n </Tip>\n\n <Tip>\n\n Activate the special [\"offline-mode\"](https://huggingface.co/transformers/installation.html#offline-mode) to\n use this method in a firewalled environment.\n\n </Tip>\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if orig_cls_name != cls.__name__ and hasattr(diffusers_library, orig_cls_name):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes this instance to a JSON string.\n\n Returns:\n `str`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save this instance to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file in which this configuration instance's parameters will be saved.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())"
},
{
"identifier": "register_to_config",
"path": "diffusers/src/diffusers/configuration_utils.py",
"snippet": "def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(f\"Can't set {key} with value {value} for {self}\")\n raise err\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)"
},
{
"identifier": "KarrasDiffusionSchedulers",
"path": "diffusers/src/diffusers/schedulers/scheduling_utils.py",
"snippet": "class KarrasDiffusionSchedulers(Enum):\n DDIMScheduler = 1\n DDPMScheduler = 2\n PNDMScheduler = 3\n LMSDiscreteScheduler = 4\n EulerDiscreteScheduler = 5\n HeunDiscreteScheduler = 6\n EulerAncestralDiscreteScheduler = 7\n DPMSolverMultistepScheduler = 8\n DPMSolverSinglestepScheduler = 9\n KDPM2DiscreteScheduler = 10\n KDPM2AncestralDiscreteScheduler = 11\n DEISMultistepScheduler = 12\n UniPCMultistepScheduler = 13"
},
{
"identifier": "SchedulerMixin",
"path": "diffusers/src/diffusers/schedulers/scheduling_utils.py",
"snippet": "class SchedulerMixin:\n \"\"\"\n Mixin containing common functions for the schedulers.\n\n Class attributes:\n - **_compatibles** (`List[str]`) -- A list of classes that are compatible with the parent class, so that\n `from_config` can be used from a class different than the one used to save the config (should be overridden\n by parent class).\n \"\"\"\n\n config_name = SCHEDULER_CONFIG_NAME\n _compatibles = []\n has_compatibles = True\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Dict[str, Any] = None,\n subfolder: Optional[str] = None,\n return_unused_kwargs=False,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a Scheduler class from a pre-defined JSON configuration file inside a directory or Hub repo.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an\n organization name, like `google/ddpm-celebahq-256`.\n - A path to a *directory* containing the schedluer configurations saved using\n [`~SchedulerMixin.save_pretrained`], e.g., `./my_model_directory/`.\n subfolder (`str`, *optional*):\n In case the relevant files are located inside a subfolder of the model repo (either remote in\n huggingface.co or downloaded locally), you can specify the folder name here.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received files. Will attempt to resume the download if such a\n file exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (i.e., do not try to download the model).\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated\n when running `transformers-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n\n <Tip>\n\n It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated\n models](https://huggingface.co/docs/hub/models-gated#gated-models).\n\n </Tip>\n\n <Tip>\n\n Activate the special [\"offline-mode\"](https://huggingface.co/transformers/installation.html#offline-mode) to\n use this method in a firewalled environment.\n\n </Tip>\n\n \"\"\"\n config, kwargs, commit_hash = cls.load_config(\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n subfolder=subfolder,\n return_unused_kwargs=True,\n return_commit_hash=True,\n **kwargs,\n )\n return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs)\n\n def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a scheduler configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~SchedulerMixin.from_pretrained`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n \"\"\"\n self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)\n\n @property\n def compatibles(self):\n \"\"\"\n Returns all schedulers that are compatible with this scheduler\n\n Returns:\n `List[SchedulerMixin]`: List of compatible schedulers\n \"\"\"\n return self._get_compatibles()\n\n @classmethod\n def _get_compatibles(cls):\n compatible_classes_str = list(set([cls.__name__] + cls._compatibles))\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n compatible_classes = [\n getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)\n ]\n return compatible_classes"
},
{
"identifier": "SchedulerOutput",
"path": "diffusers/src/diffusers/schedulers/scheduling_utils.py",
"snippet": "class SchedulerOutput(BaseOutput):\n \"\"\"\n Base class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n \"\"\"\n\n prev_sample: torch.FloatTensor"
}
] | import math
import numpy as np
import torch
from typing import List, Optional, Tuple, Union
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput | 7,637 | # Copyright 2023 TSAIL Team and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
def alpha_bar(time_step):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32)
| # Copyright 2023 TSAIL Team and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
def alpha_bar(time_step):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32)
| class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): | 0 | 2023-11-14 23:29:31+00:00 | 12k |
BraveGroup/Drive-WM | tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py | [
{
"identifier": "TEXT_TO_IMAGE_BATCH_PARAMS",
"path": "tests/pipelines/pipeline_params.py",
"snippet": "TEXT_TO_IMAGE_BATCH_PARAMS = frozenset([\"prompt\", \"negative_prompt\"])"
},
{
"identifier": "TEXT_TO_IMAGE_IMAGE_PARAMS",
"path": "tests/pipelines/pipeline_params.py",
"snippet": "TEXT_TO_IMAGE_IMAGE_PARAMS = frozenset([])"
},
{
"identifier": "TEXT_TO_IMAGE_PARAMS",
"path": "tests/pipelines/pipeline_params.py",
"snippet": "TEXT_TO_IMAGE_PARAMS = frozenset(\n [\n \"prompt\",\n \"height\",\n \"width\",\n \"guidance_scale\",\n \"negative_prompt\",\n \"prompt_embeds\",\n \"negative_prompt_embeds\",\n \"cross_attention_kwargs\",\n ]\n)"
},
{
"identifier": "PipelineKarrasSchedulerTesterMixin",
"path": "tests/pipelines/test_pipelines_common.py",
"snippet": "class PipelineKarrasSchedulerTesterMixin:\n \"\"\"\n This mixin is designed to be used with unittest.TestCase classes.\n It provides a set of common tests for each PyTorch pipeline that makes use of KarrasDiffusionSchedulers\n equivalence of dict and tuple outputs, etc.\n \"\"\"\n\n def test_karras_schedulers_shape(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n\n # make sure that PNDM does not need warm-up\n pipe.scheduler.register_to_config(skip_prk_steps=True)\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n inputs = self.get_dummy_inputs(torch_device)\n inputs[\"num_inference_steps\"] = 2\n\n if \"strength\" in inputs:\n inputs[\"num_inference_steps\"] = 4\n inputs[\"strength\"] = 0.5\n\n outputs = []\n for scheduler_enum in KarrasDiffusionSchedulers:\n if \"KDPM2\" in scheduler_enum.name:\n inputs[\"num_inference_steps\"] = 5\n\n scheduler_cls = getattr(diffusers, scheduler_enum.name)\n pipe.scheduler = scheduler_cls.from_config(pipe.scheduler.config)\n output = pipe(**inputs)[0]\n outputs.append(output)\n\n if \"KDPM2\" in scheduler_enum.name:\n inputs[\"num_inference_steps\"] = 2\n\n assert check_same_shape(outputs)"
},
{
"identifier": "PipelineLatentTesterMixin",
"path": "tests/pipelines/test_pipelines_common.py",
"snippet": "class PipelineLatentTesterMixin:\n \"\"\"\n This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes.\n It provides a set of common tests for PyTorch pipeline that has vae, e.g.\n equivalence of different input and output types, etc.\n \"\"\"\n\n @property\n def image_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `image_params` in the child test class. \"\n \"`image_params` are tested for if all accepted input image types (i.e. `pt`,`pil`,`np`) are producing same results\"\n )\n\n @property\n def image_latents_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `image_latents_params` in the child test class. \"\n \"`image_latents_params` are tested for if passing latents directly are producing same results\"\n )\n\n def get_dummy_inputs_by_type(self, device, seed=0, input_image_type=\"pt\", output_type=\"np\"):\n inputs = self.get_dummy_inputs(device, seed)\n\n def convert_to_pt(image):\n if isinstance(image, torch.Tensor):\n input_image = image\n elif isinstance(image, np.ndarray):\n input_image = VaeImageProcessor.numpy_to_pt(image)\n elif isinstance(image, PIL.Image.Image):\n input_image = VaeImageProcessor.pil_to_numpy(image)\n input_image = VaeImageProcessor.numpy_to_pt(input_image)\n else:\n raise ValueError(f\"unsupported input_image_type {type(image)}\")\n return input_image\n\n def convert_pt_to_type(image, input_image_type):\n if input_image_type == \"pt\":\n input_image = image\n elif input_image_type == \"np\":\n input_image = VaeImageProcessor.pt_to_numpy(image)\n elif input_image_type == \"pil\":\n input_image = VaeImageProcessor.pt_to_numpy(image)\n input_image = VaeImageProcessor.numpy_to_pil(input_image)\n else:\n raise ValueError(f\"unsupported input_image_type {input_image_type}.\")\n return input_image\n\n for image_param in self.image_params:\n if image_param in inputs.keys():\n inputs[image_param] = convert_pt_to_type(\n convert_to_pt(inputs[image_param]).to(device), input_image_type\n )\n\n inputs[\"output_type\"] = output_type\n\n return inputs\n\n def test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4):\n self._test_pt_np_pil_outputs_equivalent(expected_max_diff=expected_max_diff)\n\n def _test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4, input_image_type=\"pt\"):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n output_pt = pipe(\n **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type=\"pt\")\n )[0]\n output_np = pipe(\n **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type=\"np\")\n )[0]\n output_pil = pipe(\n **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type=\"pil\")\n )[0]\n\n max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max()\n self.assertLess(\n max_diff, expected_max_diff, \"`output_type=='pt'` generate different results from `output_type=='np'`\"\n )\n\n max_diff = np.abs(np.array(output_pil[0]) - (output_np * 255).round()).max()\n self.assertLess(max_diff, 2.0, \"`output_type=='pil'` generate different results from `output_type=='np'`\")\n\n def test_pt_np_pil_inputs_equivalent(self):\n if len(self.image_params) == 0:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n out_input_pt = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pt\"))[0]\n out_input_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"np\"))[0]\n out_input_pil = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pil\"))[0]\n\n max_diff = np.abs(out_input_pt - out_input_np).max()\n self.assertLess(max_diff, 1e-4, \"`input_type=='pt'` generate different result from `input_type=='np'`\")\n max_diff = np.abs(out_input_pil - out_input_np).max()\n self.assertLess(max_diff, 1e-2, \"`input_type=='pt'` generate different result from `input_type=='np'`\")\n\n def test_latents_input(self):\n if len(self.image_latents_params) == 0:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.image_processor = VaeImageProcessor(do_resize=False, do_normalize=False)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n out = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pt\"))[0]\n\n vae = components[\"vae\"]\n inputs = self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pt\")\n generator = inputs[\"generator\"]\n for image_param in self.image_latents_params:\n if image_param in inputs.keys():\n inputs[image_param] = (\n vae.encode(inputs[image_param]).latent_dist.sample(generator) * vae.config.scaling_factor\n )\n out_latents_inputs = pipe(**inputs)[0]\n\n max_diff = np.abs(out - out_latents_inputs).max()\n self.assertLess(max_diff, 1e-4, \"passing latents as image input generate different result from passing image\")"
},
{
"identifier": "PipelineTesterMixin",
"path": "tests/pipelines/test_pipelines_common.py",
"snippet": "class PipelineTesterMixin:\n \"\"\"\n This mixin is designed to be used with unittest.TestCase classes.\n It provides a set of common tests for each PyTorch pipeline, e.g. saving and loading the pipeline,\n equivalence of dict and tuple outputs, etc.\n \"\"\"\n\n # Canonical parameters that are passed to `__call__` regardless\n # of the type of pipeline. They are always optional and have common\n # sense default values.\n required_optional_params = frozenset(\n [\n \"num_inference_steps\",\n \"num_images_per_prompt\",\n \"generator\",\n \"latents\",\n \"output_type\",\n \"return_dict\",\n ]\n )\n\n # set these parameters to False in the child class if the pipeline does not support the corresponding functionality\n test_attention_slicing = True\n\n test_xformers_attention = True\n\n def get_generator(self, seed):\n device = torch_device if torch_device != \"mps\" else \"cpu\"\n generator = torch.Generator(device).manual_seed(seed)\n return generator\n\n @property\n def pipeline_class(self) -> Union[Callable, DiffusionPipeline]:\n raise NotImplementedError(\n \"You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n def get_dummy_components(self):\n raise NotImplementedError(\n \"You need to implement `get_dummy_components(self)` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n def get_dummy_inputs(self, device, seed=0):\n raise NotImplementedError(\n \"You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n @property\n def params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `params` in the child test class. \"\n \"`params` are checked for if all values are present in `__call__`'s signature.\"\n \" You can set `params` using one of the common set of parameters defined in `pipeline_params.py`\"\n \" e.g., `TEXT_TO_IMAGE_PARAMS` defines the common parameters used in text to \"\n \"image pipelines, including prompts and prompt embedding overrides.\"\n \"If your pipeline's set of arguments has minor changes from one of the common sets of arguments, \"\n \"do not make modifications to the existing common sets of arguments. I.e. a text to image pipeline \"\n \"with non-configurable height and width arguments should set the attribute as \"\n \"`params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. \"\n \"See existing pipeline tests for reference.\"\n )\n\n @property\n def batch_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `batch_params` in the child test class. \"\n \"`batch_params` are the parameters required to be batched when passed to the pipeline's \"\n \"`__call__` method. `pipeline_params.py` provides some common sets of parameters such as \"\n \"`TEXT_TO_IMAGE_BATCH_PARAMS`, `IMAGE_VARIATION_BATCH_PARAMS`, etc... If your pipeline's \"\n \"set of batch arguments has minor changes from one of the common sets of batch arguments, \"\n \"do not make modifications to the existing common sets of batch arguments. I.e. a text to \"\n \"image pipeline `negative_prompt` is not batched should set the attribute as \"\n \"`batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {'negative_prompt'}`. \"\n \"See existing pipeline tests for reference.\"\n )\n\n @property\n def callback_cfg_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `callback_cfg_params` in the child test class that requires to run test_callback_cfg. \"\n \"`callback_cfg_params` are the parameters that needs to be passed to the pipeline's callback \"\n \"function when dynamically adjusting `guidance_scale`. They are variables that require special\"\n \"treatment when `do_classifier_free_guidance` is `True`. `pipeline_params.py` provides some common\"\n \" sets of parameters such as `TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS`. If your pipeline's \"\n \"set of cfg arguments has minor changes from one of the common sets of cfg arguments, \"\n \"do not make modifications to the existing common sets of cfg arguments. I.e. for inpaint pipeine, you \"\n \" need to adjust batch size of `mask` and `masked_image_latents` so should set the attribute as\"\n \"`callback_cfg_params = TEXT_TO_IMAGE_CFG_PARAMS.union({'mask', 'masked_image_latents'})`\"\n )\n\n def tearDown(self):\n # clean up the VRAM after each test in case of CUDA runtime errors\n super().tearDown()\n gc.collect()\n torch.cuda.empty_cache()\n\n def test_save_load_local(self, expected_max_difference=5e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output = pipe(**inputs)[0]\n\n logger = logging.get_logger(\"diffusers.pipelines.pipeline_utils\")\n logger.setLevel(diffusers.logging.INFO)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir, safe_serialization=False)\n\n with CaptureLogger(logger) as cap_logger:\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)\n\n for name in pipe_loaded.components.keys():\n if name not in pipe_loaded._optional_components:\n assert name in str(cap_logger)\n\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output_loaded = pipe_loaded(**inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n def test_pipeline_call_signature(self):\n self.assertTrue(\n hasattr(self.pipeline_class, \"__call__\"), f\"{self.pipeline_class} should have a `__call__` method\"\n )\n\n parameters = inspect.signature(self.pipeline_class.__call__).parameters\n\n optional_parameters = set()\n\n for k, v in parameters.items():\n if v.default != inspect._empty:\n optional_parameters.add(k)\n\n parameters = set(parameters.keys())\n parameters.remove(\"self\")\n parameters.discard(\"kwargs\") # kwargs can be added if arguments of pipeline call function are deprecated\n\n remaining_required_parameters = set()\n\n for param in self.params:\n if param not in parameters:\n remaining_required_parameters.add(param)\n\n self.assertTrue(\n len(remaining_required_parameters) == 0,\n f\"Required parameters not present: {remaining_required_parameters}\",\n )\n\n remaining_required_optional_parameters = set()\n\n for param in self.required_optional_params:\n if param not in optional_parameters:\n remaining_required_optional_parameters.add(param)\n\n self.assertTrue(\n len(remaining_required_optional_parameters) == 0,\n f\"Required optional parameters not present: {remaining_required_optional_parameters}\",\n )\n\n def test_inference_batch_consistent(self, batch_sizes=[2]):\n self._test_inference_batch_consistent(batch_sizes=batch_sizes)\n\n def _test_inference_batch_consistent(\n self, batch_sizes=[2], additional_params_copy_to_batched_inputs=[\"num_inference_steps\"]\n ):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n inputs[\"generator\"] = self.get_generator(0)\n\n logger = logging.get_logger(pipe.__module__)\n logger.setLevel(level=diffusers.logging.FATAL)\n\n # prepare batched inputs\n batched_inputs = []\n for batch_size in batch_sizes:\n batched_input = {}\n batched_input.update(inputs)\n\n for name in self.batch_params:\n if name not in inputs:\n continue\n\n value = inputs[name]\n if name == \"prompt\":\n len_prompt = len(value)\n # make unequal batch sizes\n batched_input[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]\n\n # make last batch super long\n batched_input[name][-1] = 100 * \"very long\"\n\n else:\n batched_input[name] = batch_size * [value]\n\n if \"generator\" in inputs:\n batched_input[\"generator\"] = [self.get_generator(i) for i in range(batch_size)]\n\n if \"batch_size\" in inputs:\n batched_input[\"batch_size\"] = batch_size\n\n batched_inputs.append(batched_input)\n\n logger.setLevel(level=diffusers.logging.WARNING)\n for batch_size, batched_input in zip(batch_sizes, batched_inputs):\n output = pipe(**batched_input)\n assert len(output[0]) == batch_size\n\n def test_inference_batch_single_identical(self, batch_size=3, expected_max_diff=1e-4):\n self._test_inference_batch_single_identical(batch_size=batch_size, expected_max_diff=expected_max_diff)\n\n def _test_inference_batch_single_identical(\n self,\n batch_size=2,\n expected_max_diff=1e-4,\n additional_params_copy_to_batched_inputs=[\"num_inference_steps\"],\n ):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for components in pipe.components.values():\n if hasattr(components, \"set_default_attn_processor\"):\n components.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is has been used in self.get_dummy_inputs\n inputs[\"generator\"] = self.get_generator(0)\n\n logger = logging.get_logger(pipe.__module__)\n logger.setLevel(level=diffusers.logging.FATAL)\n\n # batchify inputs\n batched_inputs = {}\n batched_inputs.update(inputs)\n\n for name in self.batch_params:\n if name not in inputs:\n continue\n\n value = inputs[name]\n if name == \"prompt\":\n len_prompt = len(value)\n batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]\n batched_inputs[name][-1] = 100 * \"very long\"\n\n else:\n batched_inputs[name] = batch_size * [value]\n\n if \"generator\" in inputs:\n batched_inputs[\"generator\"] = [self.get_generator(i) for i in range(batch_size)]\n\n if \"batch_size\" in inputs:\n batched_inputs[\"batch_size\"] = batch_size\n\n for arg in additional_params_copy_to_batched_inputs:\n batched_inputs[arg] = inputs[arg]\n\n output = pipe(**inputs)\n output_batch = pipe(**batched_inputs)\n\n assert output_batch[0].shape[0] == batch_size\n\n max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()\n assert max_diff < expected_max_diff\n\n def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n output = pipe(**self.get_dummy_inputs(generator_device))[0]\n output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_tuple)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n def test_components_function(self):\n init_components = self.get_dummy_components()\n init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float))}\n\n pipe = self.pipeline_class(**init_components)\n\n self.assertTrue(hasattr(pipe, \"components\"))\n self.assertTrue(set(pipe.components.keys()) == set(init_components.keys()))\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"float16 requires CUDA\")\n def test_float16_inference(self, expected_max_diff=5e-2):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n components = self.get_dummy_components()\n pipe_fp16 = self.pipeline_class(**components)\n for component in pipe_fp16.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe_fp16.to(torch_device, torch.float16)\n pipe_fp16.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is used inside dummy inputs\n if \"generator\" in inputs:\n inputs[\"generator\"] = self.get_generator(0)\n\n output = pipe(**inputs)[0]\n\n fp16_inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is used inside dummy inputs\n if \"generator\" in fp16_inputs:\n fp16_inputs[\"generator\"] = self.get_generator(0)\n\n output_fp16 = pipe_fp16(**fp16_inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_fp16)).max()\n self.assertLess(max_diff, expected_max_diff, \"The outputs of the fp16 and fp32 pipelines are too different.\")\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"float16 requires CUDA\")\n def test_save_load_float16(self, expected_max_diff=1e-2):\n components = self.get_dummy_components()\n for name, module in components.items():\n if hasattr(module, \"half\"):\n components[name] = module.to(torch_device).half()\n\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output = pipe(**inputs)[0]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir)\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16)\n for component in pipe_loaded.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n for name, component in pipe_loaded.components.items():\n if hasattr(component, \"dtype\"):\n self.assertTrue(\n component.dtype == torch.float16,\n f\"`{name}.dtype` switched from `float16` to {component.dtype} after loading.\",\n )\n\n inputs = self.get_dummy_inputs(torch_device)\n output_loaded = pipe_loaded(**inputs)[0]\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(\n max_diff, expected_max_diff, \"The output of the fp16 pipeline changed after saving and loading.\"\n )\n\n def test_save_load_optional_components(self, expected_max_difference=1e-4):\n if not hasattr(self.pipeline_class, \"_optional_components\"):\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n # set all optional components to None\n for optional_component in pipe._optional_components:\n setattr(pipe, optional_component, None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output = pipe(**inputs)[0]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir, safe_serialization=False)\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)\n for component in pipe_loaded.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n for optional_component in pipe._optional_components:\n self.assertTrue(\n getattr(pipe_loaded, optional_component) is None,\n f\"`{optional_component}` did not stay set to None after loading.\",\n )\n\n inputs = self.get_dummy_inputs(generator_device)\n output_loaded = pipe_loaded(**inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"CUDA and CPU are required to switch devices\")\n def test_to_device(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.set_progress_bar_config(disable=None)\n\n pipe.to(\"cpu\")\n model_devices = [component.device.type for component in components.values() if hasattr(component, \"device\")]\n self.assertTrue(all(device == \"cpu\" for device in model_devices))\n\n output_cpu = pipe(**self.get_dummy_inputs(\"cpu\"))[0]\n self.assertTrue(np.isnan(output_cpu).sum() == 0)\n\n pipe.to(\"cuda\")\n model_devices = [component.device.type for component in components.values() if hasattr(component, \"device\")]\n self.assertTrue(all(device == \"cuda\" for device in model_devices))\n\n output_cuda = pipe(**self.get_dummy_inputs(\"cuda\"))[0]\n self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)\n\n def test_to_dtype(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.set_progress_bar_config(disable=None)\n\n model_dtypes = [component.dtype for component in components.values() if hasattr(component, \"dtype\")]\n self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes))\n\n pipe.to(torch_dtype=torch.float16)\n model_dtypes = [component.dtype for component in components.values() if hasattr(component, \"dtype\")]\n self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes))\n\n def test_attention_slicing_forward_pass(self, expected_max_diff=1e-3):\n self._test_attention_slicing_forward_pass(expected_max_diff=expected_max_diff)\n\n def _test_attention_slicing_forward_pass(\n self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3\n ):\n if not self.test_attention_slicing:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output_without_slicing = pipe(**inputs)[0]\n\n pipe.enable_attention_slicing(slice_size=1)\n inputs = self.get_dummy_inputs(generator_device)\n output_with_slicing = pipe(**inputs)[0]\n\n if test_max_difference:\n max_diff = np.abs(to_np(output_with_slicing) - to_np(output_without_slicing)).max()\n self.assertLess(max_diff, expected_max_diff, \"Attention slicing should not affect the inference results\")\n\n if test_mean_pixel_difference:\n assert_mean_pixel_difference(to_np(output_with_slicing[0]), to_np(output_without_slicing[0]))\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_accelerate_available() or is_accelerate_version(\"<\", \"0.14.0\"),\n reason=\"CPU offload is only available with CUDA and `accelerate v0.14.0` or higher\",\n )\n def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output_without_offload = pipe(**inputs)[0]\n\n pipe.enable_sequential_cpu_offload()\n\n inputs = self.get_dummy_inputs(generator_device)\n output_with_offload = pipe(**inputs)[0]\n\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"CPU offloading should not affect the inference results\")\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_accelerate_available() or is_accelerate_version(\"<\", \"0.17.0\"),\n reason=\"CPU offload is only available with CUDA and `accelerate v0.17.0` or higher\",\n )\n def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4):\n generator_device = \"cpu\"\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(generator_device)\n output_without_offload = pipe(**inputs)[0]\n\n pipe.enable_model_cpu_offload()\n inputs = self.get_dummy_inputs(generator_device)\n output_with_offload = pipe(**inputs)[0]\n\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"CPU offloading should not affect the inference results\")\n offloaded_modules = [\n v\n for k, v in pipe.components.items()\n if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload\n ]\n (\n self.assertTrue(all(v.device.type == \"cpu\" for v in offloaded_modules)),\n f\"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}\",\n )\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_xformers_available(),\n reason=\"XFormers attention is only available with CUDA and `xformers` installed\",\n )\n def test_xformers_attention_forwardGenerator_pass(self):\n self._test_xformers_attention_forwardGenerator_pass()\n\n def _test_xformers_attention_forwardGenerator_pass(\n self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-4\n ):\n if not self.test_xformers_attention:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output_without_offload = pipe(**inputs)[0]\n output_without_offload = (\n output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload\n )\n\n pipe.enable_xformers_memory_efficient_attention()\n inputs = self.get_dummy_inputs(torch_device)\n output_with_offload = pipe(**inputs)[0]\n output_with_offload = (\n output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload\n )\n\n if test_max_difference:\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"XFormers attention should not affect the inference results\")\n\n if test_mean_pixel_difference:\n assert_mean_pixel_difference(output_with_offload[0], output_without_offload[0])\n\n def test_progress_bar(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.to(torch_device)\n\n inputs = self.get_dummy_inputs(torch_device)\n with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):\n _ = pipe(**inputs)\n stderr = stderr.getvalue()\n # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img,\n # so we just match \"5\" in \"#####| 1/5 [00:01<00:00]\"\n max_steps = re.search(\"/(.*?) \", stderr).group(1)\n self.assertTrue(max_steps is not None and len(max_steps) > 0)\n self.assertTrue(\n f\"{max_steps}/{max_steps}\" in stderr, \"Progress bar should be enabled and stopped at the max step\"\n )\n\n pipe.set_progress_bar_config(disable=True)\n with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):\n _ = pipe(**inputs)\n self.assertTrue(stderr.getvalue() == \"\", \"Progress bar should be disabled\")\n\n def test_num_images_per_prompt(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n\n if \"num_images_per_prompt\" not in sig.parameters:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n batch_sizes = [1, 2]\n num_images_per_prompts = [1, 2]\n\n for batch_size in batch_sizes:\n for num_images_per_prompt in num_images_per_prompts:\n inputs = self.get_dummy_inputs(torch_device)\n\n for key in inputs.keys():\n if key in self.batch_params:\n inputs[key] = batch_size * [inputs[key]]\n\n images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0]\n\n assert images.shape[0] == batch_size * num_images_per_prompt\n\n def test_cfg(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n\n if \"guidance_scale\" not in sig.parameters:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n\n inputs[\"guidance_scale\"] = 1.0\n out_no_cfg = pipe(**inputs)[0]\n\n inputs[\"guidance_scale\"] = 7.5\n out_cfg = pipe(**inputs)[0]\n\n assert out_cfg.shape == out_no_cfg.shape\n\n def test_callback_inputs(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n has_callback_tensor_inputs = \"callback_on_step_end_tensor_inputs\" in sig.parameters\n has_callback_step_end = \"callback_on_step_end\" in sig.parameters\n\n if not (has_callback_tensor_inputs and has_callback_step_end):\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n self.assertTrue(\n hasattr(pipe, \"_callback_tensor_inputs\"),\n f\" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs\",\n )\n\n def callback_inputs_subset(pipe, i, t, callback_kwargs):\n # interate over callback args\n for tensor_name, tensor_value in callback_kwargs.items():\n # check that we're only passing in allowed tensor inputs\n assert tensor_name in pipe._callback_tensor_inputs\n\n return callback_kwargs\n\n def callback_inputs_all(pipe, i, t, callback_kwargs):\n for tensor_name in pipe._callback_tensor_inputs:\n assert tensor_name in callback_kwargs\n\n # interate over callback args\n for tensor_name, tensor_value in callback_kwargs.items():\n # check that we're only passing in allowed tensor inputs\n assert tensor_name in pipe._callback_tensor_inputs\n\n return callback_kwargs\n\n inputs = self.get_dummy_inputs(torch_device)\n\n # Test passing in a subset\n inputs[\"callback_on_step_end\"] = callback_inputs_subset\n inputs[\"callback_on_step_end_tensor_inputs\"] = [\"latents\"]\n inputs[\"output_type\"] = \"latent\"\n output = pipe(**inputs)[0]\n\n # Test passing in a everything\n inputs[\"callback_on_step_end\"] = callback_inputs_all\n inputs[\"callback_on_step_end_tensor_inputs\"] = pipe._callback_tensor_inputs\n inputs[\"output_type\"] = \"latent\"\n output = pipe(**inputs)[0]\n\n def callback_inputs_change_tensor(pipe, i, t, callback_kwargs):\n is_last = i == (pipe.num_timesteps - 1)\n if is_last:\n callback_kwargs[\"latents\"] = torch.zeros_like(callback_kwargs[\"latents\"])\n return callback_kwargs\n\n inputs[\"callback_on_step_end\"] = callback_inputs_change_tensor\n inputs[\"callback_on_step_end_tensor_inputs\"] = pipe._callback_tensor_inputs\n inputs[\"output_type\"] = \"latent\"\n output = pipe(**inputs)[0]\n assert output.abs().sum() == 0\n\n def test_callback_cfg(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n has_callback_tensor_inputs = \"callback_on_step_end_tensor_inputs\" in sig.parameters\n has_callback_step_end = \"callback_on_step_end\" in sig.parameters\n\n if not (has_callback_tensor_inputs and has_callback_step_end):\n return\n\n if \"guidance_scale\" not in sig.parameters:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n self.assertTrue(\n hasattr(pipe, \"_callback_tensor_inputs\"),\n f\" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs\",\n )\n\n def callback_increase_guidance(pipe, i, t, callback_kwargs):\n pipe._guidance_scale += 1.0\n\n return callback_kwargs\n\n inputs = self.get_dummy_inputs(torch_device)\n\n # use cfg guidance because some pipelines modify the shape of the latents\n # outside of the denoising loop\n inputs[\"guidance_scale\"] = 2.0\n inputs[\"callback_on_step_end\"] = callback_increase_guidance\n inputs[\"callback_on_step_end_tensor_inputs\"] = pipe._callback_tensor_inputs\n _ = pipe(**inputs)[0]\n\n # we increase the guidance scale by 1.0 at every step\n # check that the guidance scale is increased by the number of scheduler timesteps\n # accounts for models that modify the number of inference steps based on strength\n assert pipe.guidance_scale == (inputs[\"guidance_scale\"] + pipe.num_timesteps)"
}
] | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNet2DConditionModel,
)
from diffusers.utils.testing_utils import (
load_numpy,
nightly,
numpy_cosine_similarity_distance,
require_torch_gpu,
skip_mps,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin | 9,835 | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
torch.backends.cuda.matmul.allow_tf32 = False
@skip_mps
class StableDiffusionAttendAndExcitePipelineFastTests(
| # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
torch.backends.cuda.matmul.allow_tf32 = False
@skip_mps
class StableDiffusionAttendAndExcitePipelineFastTests( | PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase | 4 | 2023-11-18 01:40:55+00:00 | 12k |
basnijholt/unidep | unidep/_cli.py | [
{
"identifier": "create_conda_env_specification",
"path": "unidep/_conda_env.py",
"snippet": "def create_conda_env_specification( # noqa: PLR0912\n resolved: dict[str, dict[Platform | None, dict[CondaPip, Spec]]],\n channels: list[str],\n platforms: list[Platform],\n selector: Literal[\"sel\", \"comment\"] = \"sel\",\n) -> CondaEnvironmentSpec:\n \"\"\"Create a conda environment specification from resolved requirements.\"\"\"\n if selector not in (\"sel\", \"comment\"): # pragma: no cover\n msg = f\"Invalid selector: {selector}, must be one of ['sel', 'comment']\"\n raise ValueError(msg)\n\n # Split in conda and pip dependencies and prefer conda over pip\n conda, pip = _extract_conda_pip_dependencies(resolved)\n\n conda_deps: list[str | dict[str, str]] = CommentedSeq()\n pip_deps: list[str] = CommentedSeq()\n seen_identifiers: set[str] = set()\n for platform_to_spec in conda.values():\n if len(platform_to_spec) > 1 and selector == \"sel\":\n # None has been expanded already if len>1\n _resolve_multiple_platform_conflicts(platform_to_spec)\n for _platform, spec in sorted(platform_to_spec.items()):\n dep_str = spec.name_with_pin()\n if len(platforms) != 1 and _platform is not None:\n if selector == \"sel\":\n sel = _conda_sel(_platform)\n dep_str = {f\"sel({sel})\": dep_str} # type: ignore[assignment]\n conda_deps.append(dep_str)\n if selector == \"comment\":\n _add_comment(conda_deps, _platform)\n else:\n conda_deps.append(dep_str)\n assert isinstance(spec.identifier, str)\n seen_identifiers.add(spec.identifier)\n\n for platform_to_spec in pip.values():\n spec_to_platforms: dict[Spec, list[Platform | None]] = {}\n for _platform, spec in platform_to_spec.items():\n spec_to_platforms.setdefault(spec, []).append(_platform)\n\n for spec, _platforms in spec_to_platforms.items():\n if spec.identifier in seen_identifiers:\n continue\n\n dep_str = spec.name_with_pin(is_pip=True)\n if _platforms != [None] and len(platforms) != 1:\n if selector == \"sel\":\n marker = build_pep508_environment_marker(_platforms) # type: ignore[arg-type]\n dep_str = f\"{dep_str}; {marker}\"\n pip_deps.append(dep_str)\n else:\n assert selector == \"comment\"\n # We can only add comments with a single platform because\n # `conda-lock` doesn't implement logic, e.g., [linux or win]\n # should be spread into two lines, one with [linux] and the\n # other with [win].\n for _platform in _platforms:\n pip_deps.append(dep_str)\n _add_comment(pip_deps, cast(Platform, _platform))\n else:\n pip_deps.append(dep_str)\n\n return CondaEnvironmentSpec(channels, platforms, conda_deps, pip_deps)"
},
{
"identifier": "write_conda_environment_file",
"path": "unidep/_conda_env.py",
"snippet": "def write_conda_environment_file(\n env_spec: CondaEnvironmentSpec,\n output_file: str | Path | None = \"environment.yaml\",\n name: str = \"myenv\",\n *,\n verbose: bool = False,\n) -> None:\n \"\"\"Generate a conda environment.yaml file or print to stdout.\"\"\"\n resolved_dependencies = deepcopy(env_spec.conda)\n if env_spec.pip:\n resolved_dependencies.append({\"pip\": env_spec.pip}) # type: ignore[arg-type, dict-item]\n env_data = CommentedMap({\"name\": name})\n if env_spec.channels:\n env_data[\"channels\"] = env_spec.channels\n if resolved_dependencies:\n env_data[\"dependencies\"] = resolved_dependencies\n if env_spec.platforms:\n env_data[\"platforms\"] = env_spec.platforms\n yaml = YAML(typ=\"rt\")\n yaml.default_flow_style = False\n yaml.width = 4096\n yaml.indent(mapping=2, sequence=2, offset=2)\n if output_file:\n if verbose:\n print(f\"📝 Generating environment file at `{output_file}`\")\n with open(output_file, \"w\") as f: # noqa: PTH123\n yaml.dump(env_data, f)\n if verbose:\n print(\"📝 Environment file generated successfully.\")\n add_comment_to_file(output_file)\n else:\n yaml.dump(env_data, sys.stdout)"
},
{
"identifier": "conda_lock_command",
"path": "unidep/_conda_lock.py",
"snippet": "def conda_lock_command(\n *,\n depth: int,\n directory: Path,\n platform: list[Platform],\n verbose: bool,\n only_global: bool,\n check_input_hash: bool,\n ignore_pins: list[str],\n skip_dependencies: list[str],\n overwrite_pins: list[str],\n lockfile: str = \"conda-lock.yml\",\n) -> None:\n \"\"\"Generate a conda-lock file a collection of `requirements.yaml` and/or `pyproject.toml` files.\"\"\" # noqa: E501\n conda_lock_output = _conda_lock_global(\n depth=depth,\n directory=directory,\n platform=platform,\n verbose=verbose,\n check_input_hash=check_input_hash,\n ignore_pins=ignore_pins,\n overwrite_pins=overwrite_pins,\n skip_dependencies=skip_dependencies,\n lockfile=lockfile,\n )\n if only_global:\n return\n sub_lock_files = _conda_lock_subpackages(\n directory=directory,\n depth=depth,\n conda_lock_file=conda_lock_output,\n )\n mismatches = _check_consistent_lock_files(\n global_lock_file=conda_lock_output,\n sub_lock_files=sub_lock_files,\n )\n if not mismatches:\n print(\"✅ Analyzed all lock files and found no inconsistencies.\")\n elif len(mismatches) > 1: # pragma: no cover\n print(\"❌ Complete table of package version mismatches:\")\n _mismatch_report(mismatches, raises=False)"
},
{
"identifier": "resolve_conflicts",
"path": "unidep/_conflicts.py",
"snippet": "def resolve_conflicts(\n requirements: dict[str, list[Spec]],\n platforms: list[Platform] | None = None,\n) -> dict[str, dict[Platform | None, dict[CondaPip, Spec]]]:\n \"\"\"Resolve conflicts in a dictionary of requirements.\n\n Uses the ``ParsedRequirements.requirements`` dict returned by\n `parse_requirements`.\n \"\"\"\n if platforms and not set(platforms).issubset(get_args(Platform)):\n msg = f\"Invalid platform: {platforms}, must contain only {get_args(Platform)}\"\n raise VersionConflictError(msg)\n\n prepared = _prepare_specs_for_conflict_resolution(requirements)\n for data in prepared.values():\n _pop_unused_platforms_and_maybe_expand_none(data, platforms)\n resolved = {\n pkg: _combine_pinning_within_platform(data) for pkg, data in prepared.items()\n }\n\n for _platforms in resolved.values():\n for _platform, sources in _platforms.items():\n _platforms[_platform] = _resolve_conda_pip_conflicts(sources)\n return resolved"
},
{
"identifier": "find_requirements_files",
"path": "unidep/_dependencies_parsing.py",
"snippet": "def find_requirements_files(\n base_dir: str | Path = \".\",\n depth: int = 1,\n *,\n verbose: bool = False,\n) -> list[Path]:\n \"\"\"Scan a directory for `requirements.yaml` and `pyproject.toml` files.\"\"\"\n base_path = Path(base_dir)\n found_files = []\n\n # Define a helper function to recursively scan directories\n def _scan_dir(path: Path, current_depth: int) -> None:\n if verbose:\n print(f\"🔍 Scanning in `{path}` at depth {current_depth}\")\n if current_depth > depth:\n return\n for child in path.iterdir():\n if child.is_dir():\n _scan_dir(child, current_depth + 1)\n elif child.name == \"requirements.yaml\":\n found_files.append(child)\n if verbose:\n print(f'🔍 Found `\"requirements.yaml\"` at `{child}`')\n elif child.name == \"pyproject.toml\" and unidep_configured_in_toml(child):\n if verbose:\n print(f'🔍 Found `\"pyproject.toml\"` with dependencies at `{child}`')\n found_files.append(child)\n\n _scan_dir(base_path, 0)\n return sorted(found_files)"
},
{
"identifier": "parse_local_dependencies",
"path": "unidep/_dependencies_parsing.py",
"snippet": "def parse_local_dependencies(\n *paths: Path,\n check_pip_installable: bool = True,\n verbose: bool = False,\n) -> dict[Path, list[Path]]:\n \"\"\"Extract local project dependencies from a list of `requirements.yaml` or `pyproject.toml` files.\n\n Works by loading the specified `local_dependencies` list.\n \"\"\" # noqa: E501\n dependencies: dict[str, set[str]] = defaultdict(set)\n\n for p in paths:\n if verbose:\n print(f\"🔗 Analyzing dependencies in `{p}`\")\n base_path = p.resolve().parent\n _extract_local_dependencies(\n path=p,\n base_path=base_path,\n processed=set(),\n dependencies=dependencies,\n check_pip_installable=check_pip_installable,\n verbose=verbose,\n )\n\n return {\n Path(k): sorted({Path(v) for v in v_set})\n for k, v_set in sorted(dependencies.items())\n }"
},
{
"identifier": "parse_requirements",
"path": "unidep/_dependencies_parsing.py",
"snippet": "def parse_requirements( # noqa: PLR0912\n *paths: Path,\n ignore_pins: list[str] | None = None,\n overwrite_pins: list[str] | None = None,\n skip_dependencies: list[str] | None = None,\n verbose: bool = False,\n) -> ParsedRequirements:\n \"\"\"Parse a list of `requirements.yaml` or `pyproject.toml` files.\"\"\"\n ignore_pins = ignore_pins or []\n skip_dependencies = skip_dependencies or []\n overwrite_pins_map = _parse_overwrite_pins(overwrite_pins or [])\n requirements: dict[str, list[Spec]] = defaultdict(list)\n channels: set[str] = set()\n platforms: set[Platform] = set()\n datas = []\n seen: set[Path] = set()\n yaml = YAML(typ=\"rt\")\n for p in paths:\n if verbose:\n print(f\"📄 Parsing `{p}`\")\n data = _load(p, yaml)\n datas.append(data)\n seen.add(p.resolve())\n\n # Handle \"local_dependencies\" (or old name \"includes\", changed in 0.42.0)\n for include in _get_local_dependencies(data):\n try:\n requirements_path = dependencies_filename(p.parent / include).resolve()\n except FileNotFoundError:\n # Means that this is a local package that is not managed by unidep.\n # We do not need to do anything here, just in `unidep install`.\n continue\n if requirements_path in seen:\n continue # Avoids circular local_dependencies\n if verbose:\n print(f\"📄 Parsing `{include}` from `local_dependencies`\")\n datas.append(_load(requirements_path, yaml))\n seen.add(requirements_path)\n\n identifier = -1\n for data in datas:\n for channel in data.get(\"channels\", []):\n channels.add(channel)\n for _platform in data.get(\"platforms\", []):\n platforms.add(_platform)\n if \"dependencies\" not in data:\n continue\n dependencies = data[\"dependencies\"]\n for i, dep in enumerate(data[\"dependencies\"]):\n identifier += 1\n if isinstance(dep, str):\n specs = _parse_dependency(\n dep,\n dependencies,\n i,\n \"both\",\n identifier,\n ignore_pins,\n overwrite_pins_map,\n skip_dependencies,\n )\n for spec in specs:\n requirements[spec.name].append(spec)\n continue\n assert isinstance(dep, dict)\n for which in [\"conda\", \"pip\"]:\n if which in dep:\n specs = _parse_dependency(\n dep[which],\n dep,\n which,\n which, # type: ignore[arg-type]\n identifier,\n ignore_pins,\n overwrite_pins_map,\n skip_dependencies,\n )\n for spec in specs:\n requirements[spec.name].append(spec)\n\n return ParsedRequirements(sorted(channels), sorted(platforms), dict(requirements))"
},
{
"identifier": "filter_python_dependencies",
"path": "unidep/_setuptools_integration.py",
"snippet": "def filter_python_dependencies(\n resolved: dict[str, dict[Platform | None, dict[CondaPip, Spec]]],\n) -> list[str]:\n \"\"\"Filter out conda dependencies and return only pip dependencies.\n\n Examples\n --------\n >>> requirements = parse_requirements(\"requirements.yaml\")\n >>> resolved = resolve_conflicts(\n ... requirements.requirements, requirements.platforms\n ... )\n >>> python_deps = filter_python_dependencies(resolved)\n \"\"\"\n pip_deps = []\n for platform_data in resolved.values():\n to_process: dict[Platform | None, Spec] = {} # platform -> Spec\n for _platform, sources in platform_data.items():\n pip_spec = sources.get(\"pip\")\n if pip_spec:\n to_process[_platform] = pip_spec\n if not to_process:\n continue\n\n # Check if all Spec objects are identical\n first_spec = next(iter(to_process.values()))\n if all(spec == first_spec for spec in to_process.values()):\n # Build a single combined environment marker\n dep_str = first_spec.name_with_pin(is_pip=True)\n if _platform is not None:\n selector = build_pep508_environment_marker(list(to_process.keys())) # type: ignore[arg-type]\n dep_str = f\"{dep_str}; {selector}\"\n pip_deps.append(dep_str)\n continue\n\n for _platform, pip_spec in to_process.items():\n dep_str = pip_spec.name_with_pin(is_pip=True)\n if _platform is not None:\n selector = build_pep508_environment_marker([_platform])\n dep_str = f\"{dep_str}; {selector}\"\n pip_deps.append(dep_str)\n return sorted(pip_deps)"
},
{
"identifier": "get_python_dependencies",
"path": "unidep/_setuptools_integration.py",
"snippet": "def get_python_dependencies(\n filename: str\n | Path\n | Literal[\"requirements.yaml\", \"pyproject.toml\"] = \"requirements.yaml\", # noqa: PYI051\n *,\n verbose: bool = False,\n ignore_pins: list[str] | None = None,\n overwrite_pins: list[str] | None = None,\n skip_dependencies: list[str] | None = None,\n platforms: list[Platform] | None = None,\n raises_if_missing: bool = True,\n) -> list[str]:\n \"\"\"Extract Python (pip) requirements from a `requirements.yaml` or `pyproject.toml` file.\"\"\" # noqa: E501\n p = Path(filename)\n if not p.exists():\n if raises_if_missing:\n msg = f\"File {filename} not found.\"\n raise FileNotFoundError(msg)\n return []\n\n requirements = parse_requirements(\n p,\n ignore_pins=ignore_pins,\n overwrite_pins=overwrite_pins,\n skip_dependencies=skip_dependencies,\n verbose=verbose,\n )\n resolved = resolve_conflicts(\n requirements.requirements,\n platforms or list(requirements.platforms),\n )\n return filter_python_dependencies(resolved)"
},
{
"identifier": "__version__",
"path": "unidep/_version.py",
"snippet": ""
},
{
"identifier": "Platform",
"path": "unidep/platform_definitions.py",
"snippet": "VALID_SELECTORS = get_args(Selector)\nPEP508_MARKERS = {\n \"linux-64\": \"sys_platform == 'linux' and platform_machine == 'x86_64'\",\n \"linux-aarch64\": \"sys_platform == 'linux' and platform_machine == 'aarch64'\",\n \"linux-ppc64le\": \"sys_platform == 'linux' and platform_machine == 'ppc64le'\",\n \"osx-64\": \"sys_platform == 'darwin' and platform_machine == 'x86_64'\",\n \"osx-arm64\": \"sys_platform == 'darwin' and platform_machine == 'arm64'\",\n \"win-64\": \"sys_platform == 'win32' and platform_machine == 'AMD64'\",\n (\"linux-64\", \"linux-aarch64\", \"linux-ppc64le\"): \"sys_platform == 'linux'\",\n (\"osx-64\", \"osx-arm64\"): \"sys_platform == 'darwin'\",\n (\n \"linux-64\",\n \"linux-aarch64\",\n \"linux-ppc64le\",\n \"osx-64\",\n \"osx-arm64\",\n ): \"sys_platform == 'linux' or sys_platform == 'darwin'\",\n}\nPLATFORM_SELECTOR_MAP: dict[Platform, list[Selector]] = {\n \"linux-64\": [\"linux64\", \"unix\", \"linux\"],\n \"linux-aarch64\": [\"aarch64\", \"unix\", \"linux\"],\n \"linux-ppc64le\": [\"ppc64le\", \"unix\", \"linux\"],\n # \"osx64\" is a selector unique to conda-build referring to\n # platforms on macOS and the Python architecture is x86-64\n \"osx-64\": [\"osx64\", \"osx\", \"macos\", \"unix\"],\n \"osx-arm64\": [\"arm64\", \"osx\", \"macos\", \"unix\"],\n \"win-64\": [\"win64\", \"win\"],\n}\nPLATFORM_SELECTOR_MAP_REVERSE: dict[Selector, set[Platform]] = {}\ndef validate_selector(selector: Selector) -> None:\ndef platforms_from_selector(selector: str) -> list[Platform]:\n def platforms(self) -> list[Platform] | None:\n def pprint(self) -> str:\n def name_with_pin(self, *, is_pip: bool = False) -> str:\nclass Spec(NamedTuple):"
},
{
"identifier": "add_comment_to_file",
"path": "unidep/utils.py",
"snippet": "def add_comment_to_file(\n filename: str | Path,\n extra_lines: list[str] | None = None,\n) -> None:\n \"\"\"Add a comment to the top of a file.\"\"\"\n if extra_lines is None:\n extra_lines = []\n with open(filename, \"r+\") as f: # noqa: PTH123\n content = f.read()\n f.seek(0, 0)\n command_line_args = \" \".join(sys.argv[1:])\n txt = [\n f\"# This file is created and managed by `unidep` {__version__}.\",\n \"# For details see https://github.com/basnijholt/unidep\",\n f\"# File generated with: `unidep {command_line_args}`\",\n *extra_lines,\n ]\n content = \"\\n\".join(txt) + \"\\n\\n\" + content\n f.write(content)"
},
{
"identifier": "dependencies_filename",
"path": "unidep/utils.py",
"snippet": "def dependencies_filename(folder_or_path: str | Path) -> Path:\n \"\"\"Get the path to `requirements.yaml` or `pyproject.toml` file.\"\"\"\n path = Path(folder_or_path)\n if path.is_dir():\n fname_yaml = path / \"requirements.yaml\"\n if fname_yaml.exists():\n return fname_yaml\n fname_toml = path / \"pyproject.toml\"\n if fname_toml.exists() and unidep_configured_in_toml(fname_toml):\n return fname_toml\n msg = (\n f\"File `{fname_yaml}` or `{fname_toml}` (with unidep configuration)\"\n f\" not found in `{folder_or_path}`.\"\n )\n raise FileNotFoundError(msg)\n if not path.exists():\n msg = f\"File `{path}` not found.\"\n raise FileNotFoundError(msg)\n return path"
},
{
"identifier": "escape_unicode",
"path": "unidep/utils.py",
"snippet": "def escape_unicode(string: str) -> str:\n \"\"\"Escape unicode characters.\"\"\"\n return codecs.decode(string, \"unicode_escape\")"
},
{
"identifier": "identify_current_platform",
"path": "unidep/utils.py",
"snippet": "def identify_current_platform() -> Platform:\n \"\"\"Detect the current platform.\"\"\"\n system = platform.system().lower()\n architecture = platform.machine().lower()\n\n if system == \"linux\":\n if architecture == \"x86_64\":\n return \"linux-64\"\n if architecture == \"aarch64\":\n return \"linux-aarch64\"\n if architecture == \"ppc64le\":\n return \"linux-ppc64le\"\n msg = f\"Unsupported Linux architecture `{architecture}`\"\n raise UnsupportedPlatformError(msg)\n if system == \"darwin\":\n if architecture == \"x86_64\":\n return \"osx-64\"\n if architecture == \"arm64\":\n return \"osx-arm64\"\n msg = f\"Unsupported macOS architecture `{architecture}`\"\n raise UnsupportedPlatformError(msg)\n if system == \"windows\":\n if \"64\" in architecture:\n return \"win-64\"\n msg = f\"Unsupported Windows architecture `{architecture}`\"\n raise UnsupportedPlatformError(msg)\n msg = f\"Unsupported operating system `{system}` with architecture `{architecture}`\"\n raise UnsupportedPlatformError(msg)"
},
{
"identifier": "is_pip_installable",
"path": "unidep/utils.py",
"snippet": "def is_pip_installable(folder: str | Path) -> bool: # pragma: no cover\n \"\"\"Determine if the project is pip installable.\n\n Checks for existence of setup.py or [build-system] in pyproject.toml.\n \"\"\"\n path = Path(folder)\n if (path / \"setup.py\").exists():\n return True\n\n # When toml makes it into the standard library, we can use that instead\n # For now this is good enough, except it doesn't handle the case where\n # [build-system] is inside of a multi-line literal string.\n pyproject_path = path / \"pyproject.toml\"\n if pyproject_path.exists():\n with pyproject_path.open(\"r\") as file:\n for line in file:\n if line.strip().startswith(\"[build-system]\"):\n return True\n return False"
},
{
"identifier": "parse_package_str",
"path": "unidep/utils.py",
"snippet": "def parse_package_str(package_str: str) -> ParsedPackageStr:\n \"\"\"Splits a string into package name, version pinning, and platform selector.\"\"\"\n # Regex to match package name, version pinning, and optionally platform selector\n name_pattern = r\"[a-zA-Z0-9_-]+\"\n version_pin_pattern = r\".*?\"\n selector_pattern = r\"[a-z0-9\\s]+\"\n pattern = rf\"({name_pattern})\\s*({version_pin_pattern})?(:({selector_pattern}))?$\"\n match = re.match(pattern, package_str)\n\n if match:\n package_name = match.group(1).strip()\n version_pin = match.group(2).strip() if match.group(2) else None\n selector = match.group(4).strip() if match.group(4) else None\n\n if selector is not None:\n for s in selector.split():\n validate_selector(cast(Selector, s))\n\n return ParsedPackageStr(\n package_name,\n version_pin,\n selector,\n )\n\n msg = f\"Invalid package string: '{package_str}'\"\n raise ValueError(msg)"
},
{
"identifier": "warn",
"path": "unidep/utils.py",
"snippet": "def warn(\n message: str | Warning,\n category: type[Warning] = UserWarning,\n stacklevel: int = 1,\n) -> None:\n \"\"\"Emit a warning with a custom format specific to this package.\"\"\"\n original_format = warnings.formatwarning\n warnings.formatwarning = _simple_warning_format\n try:\n warnings.warn(message, category, stacklevel=stacklevel + 1)\n finally:\n warnings.formatwarning = original_format"
}
] | import argparse
import importlib.util
import os
import shutil
import subprocess
import sys
from pathlib import Path
from unidep._conda_env import (
create_conda_env_specification,
write_conda_environment_file,
)
from unidep._conda_lock import conda_lock_command
from unidep._conflicts import resolve_conflicts
from unidep._dependencies_parsing import (
find_requirements_files,
parse_local_dependencies,
parse_requirements,
)
from unidep._setuptools_integration import (
filter_python_dependencies,
get_python_dependencies,
)
from unidep._version import __version__
from unidep.platform_definitions import Platform
from unidep.utils import (
add_comment_to_file,
dependencies_filename,
escape_unicode,
identify_current_platform,
is_pip_installable,
parse_package_str,
warn,
)
from typing import Literal, get_args
from typing_extensions import Literal, get_args
from rich_argparse import RichHelpFormatter
from argparse import HelpFormatter as _HelpFormatter # type: ignore[assignment] | 8,069 | parser_lock,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
# Subparser for the 'pip-compile' command
pip_compile_help = (
"Generate a fully pinned `requirements.txt` file from one or more"
f" {_DEP_FILES}"
" files using `pip-compile` from `pip-tools`. This"
f" command consolidates all pip dependencies defined in the {_DEP_FILES}"
" files and compiles them into a single `requirements.txt` file, taking"
" into account the specific versions and dependencies of each package."
)
pip_compile_example = (
" Example usage: `unidep pip-compile --directory ./projects` to generate"
f" a `requirements.txt` file for all {_DEP_FILES}"
" files in the"
" `./projects` directory. Use `--output-file requirements.txt` to specify a"
" different output file."
)
parser_pip_compile = subparsers.add_parser(
"pip-compile",
help=pip_compile_help,
description=pip_compile_help + pip_compile_example,
formatter_class=_HelpFormatter,
)
parser_pip_compile.add_argument(
"-o",
"--output-file",
type=Path,
default=None,
help="Output file for the pip requirements, by default `requirements.txt`",
)
_add_common_args(
parser_pip_compile,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
parser_pip_compile.add_argument(
"extra_flags",
nargs=argparse.REMAINDER,
help="Extra flags to pass to `pip-compile`. These flags are passed directly"
" and should be provided in the format expected by `pip-compile`. For example,"
" `unidep pip-compile -- --generate-hashes --allow-unsafe`. Note that the"
" `--` is required to separate the flags for `unidep` from the flags for"
" `pip-compile`.",
)
# Subparser for the 'pip' and 'conda' command
help_str = "Get the {} requirements for the current platform only."
help_example = (
" Example usage: `unidep {which} --file folder1 --file"
" folder2/requirements.yaml --seperator ' ' --platform linux-64` to"
" extract all the {which} dependencies specific to the linux-64 platform. Note"
" that the `--file` argument can be used multiple times to specify multiple"
f" {_DEP_FILES}"
" files and that --file can also be a folder that contains"
f" a {_DEP_FILES} file."
)
parser_pip = subparsers.add_parser(
"pip",
help=help_str.format("pip"),
description=help_str.format("pip") + help_example.format(which="pip"),
formatter_class=_HelpFormatter,
)
parser_conda = subparsers.add_parser(
"conda",
help=help_str.format("conda"),
description=help_str.format("conda") + help_example.format(which="conda"),
formatter_class=_HelpFormatter,
)
for sub_parser in [parser_pip, parser_conda]:
_add_common_args(
sub_parser,
{
"verbose",
"platform",
"file",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
sub_parser.add_argument(
"--separator",
type=str,
default=" ",
help="The separator between the dependencies, by default ` `",
)
# Subparser for the 'version' command
parser_merge = subparsers.add_parser(
"version",
help="Print version information of unidep.",
formatter_class=_HelpFormatter,
)
args = parser.parse_args()
if args.command is None: # pragma: no cover
parser.print_help()
sys.exit(1)
if "file" in args and args.file.is_dir(): # pragma: no cover
| #!/usr/bin/env python3
"""unidep - Unified Conda and Pip requirements management.
This module provides a command-line tool for managing conda environment.yaml files.
"""
from __future__ import annotations
if sys.version_info >= (3, 8):
else: # pragma: no cover
try: # pragma: no cover
class _HelpFormatter(RichHelpFormatter):
def _get_help_string(self, action: argparse.Action) -> str | None:
# escapes "[" in text, otherwise e.g., [linux] is removed
if action.help is not None:
return action.help.replace("[", r"\[")
return None
except ImportError: # pragma: no cover
_DEP_FILES = "`requirements.yaml` or `pyproject.toml`"
def _add_common_args( # noqa: PLR0912
sub_parser: argparse.ArgumentParser,
options: set[str],
) -> None: # pragma: no cover
if "directory" in options:
sub_parser.add_argument(
"-d",
"--directory",
type=Path,
default=".",
help=f"Base directory to scan for {_DEP_FILES} file(s), by default `.`",
)
if "file" in options:
sub_parser.add_argument(
"-f",
"--file",
type=Path,
default=".",
help=f"The {_DEP_FILES} file to parse, or folder"
" that contains that file, by default `.`",
)
if "verbose" in options:
sub_parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Print verbose output",
)
if "platform" in options:
current_platform = identify_current_platform()
sub_parser.add_argument(
"--platform",
"-p",
type=str,
action="append", # Allow multiple instances of -p
default=None, # Default is a list with the current platform set in `main`
choices=get_args(Platform),
help="The platform(s) to get the requirements for. "
"Multiple platforms can be specified. "
f"By default, the current platform (`{current_platform}`) is used.",
)
if "editable" in options:
sub_parser.add_argument(
"-e",
"--editable",
action="store_true",
help="Install the project in editable mode",
)
if "depth" in options:
sub_parser.add_argument(
"--depth",
type=int,
default=1,
help=f"Maximum depth to scan for {_DEP_FILES} files, by default 1",
)
if "*files" in options:
sub_parser.add_argument(
"files",
type=Path,
nargs="+",
help=f"The {_DEP_FILES} file(s) to parse"
" or folder(s) that contain"
" those file(s), by default `.`",
default=None, # default is "." set in `main`
)
if "skip-local" in options:
sub_parser.add_argument(
"--skip-local",
action="store_true",
help="Skip installing local dependencies",
)
if "skip-pip" in options:
sub_parser.add_argument(
"--skip-pip",
action="store_true",
help=f"Skip installing pip dependencies from {_DEP_FILES}",
)
if "skip-conda" in options:
sub_parser.add_argument(
"--skip-conda",
action="store_true",
help=f"Skip installing conda dependencies from {_DEP_FILES}",
)
if "skip-dependency" in options:
sub_parser.add_argument(
"--skip-dependency",
type=str,
action="append",
default=[],
help="Skip installing a specific dependency that is in one of the"
f" {_DEP_FILES}"
" files. This option can be used multiple times, each"
" time specifying a different package to skip."
" For example, use `--skip-dependency pandas` to skip installing pandas.",
)
if "no-dependencies" in options:
sub_parser.add_argument(
"--no-dependencies",
action="store_true",
help=f"Skip installing dependencies from {_DEP_FILES}"
" file(s) and only install local package(s). Useful after"
" installing a `conda-lock.yml` file because then all"
" dependencies have already been installed.",
)
if "conda-executable" in options:
sub_parser.add_argument(
"--conda-executable",
type=str,
choices=("conda", "mamba", "micromamba"),
help="The conda executable to use",
default=None,
)
if "dry-run" in options:
sub_parser.add_argument(
"--dry-run",
"--dry",
action="store_true",
help="Only print the commands that would be run",
)
if "ignore-pin" in options:
sub_parser.add_argument(
"--ignore-pin",
type=str,
action="append",
default=[],
help="Ignore the version pin for a specific package,"
" e.g., `--ignore-pin numpy`. This option can be repeated"
" to ignore multiple packages.",
)
if "overwrite-pin" in options:
sub_parser.add_argument(
"--overwrite-pin",
type=str,
action="append",
default=[],
help="Overwrite the version pin for a specific package,"
" e.g., `--overwrite-pin 'numpy==1.19.2'`. This option can be repeated"
" to overwrite the pins of multiple packages.",
)
def _parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Unified Conda and Pip requirements management.",
formatter_class=_HelpFormatter,
)
subparsers = parser.add_subparsers(dest="command", help="Subcommands")
# Subparser for the 'merge' command
merge_help = (
f"Combine multiple (or a single) {_DEP_FILES}"
" files into a"
" single Conda installable `environment.yaml` file."
)
merge_example = (
" Example usage: `unidep merge --directory . --depth 1 --output environment.yaml`" # noqa: E501
f" to search for {_DEP_FILES}"
" files in the current directory and its"
" subdirectories and create `environment.yaml`. These are the defaults, so you"
" can also just run `unidep merge`."
)
parser_merge = subparsers.add_parser(
"merge",
help=merge_help,
description=merge_help + merge_example,
formatter_class=_HelpFormatter,
)
parser_merge.add_argument(
"-o",
"--output",
type=Path,
default="environment.yaml",
help="Output file for the conda environment, by default `environment.yaml`",
)
parser_merge.add_argument(
"-n",
"--name",
type=str,
default="myenv",
help="Name of the conda environment, by default `myenv`",
)
parser_merge.add_argument(
"--stdout",
action="store_true",
help="Output to stdout instead of a file",
)
parser_merge.add_argument(
"--selector",
type=str,
choices=("sel", "comment"),
default="sel",
help="The selector to use for the environment markers, if `sel` then"
" `- numpy # [linux]` becomes `sel(linux): numpy`, if `comment` then"
" it remains `- numpy # [linux]`, by default `sel`",
)
_add_common_args(
parser_merge,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
# Subparser for the 'install' command
install_help = (
f"Automatically install all dependencies from one or more {_DEP_FILES} files."
" This command first installs dependencies"
" with Conda, then with Pip. Finally, it installs local packages"
f" (those containing the {_DEP_FILES} files)"
" using `pip install [-e] ./project`."
)
install_example = (
" Example usage: `unidep install .` for a single project."
" For multiple projects: `unidep install ./project1 ./project2`."
" The command accepts both file paths and directories containing"
f" a {_DEP_FILES} file. Use `--editable` or"
" `-e` to install the local packages in editable mode. See"
f" `unidep install-all` to install all {_DEP_FILES} files in and below the"
" current folder."
)
parser_install = subparsers.add_parser(
"install",
help=install_help,
description=install_help + install_example,
formatter_class=_HelpFormatter,
)
# Add positional argument for the file
_add_common_args(
parser_install,
{
"*files",
"conda-executable",
"dry-run",
"editable",
"skip-local",
"skip-pip",
"skip-conda",
"no-dependencies",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
"verbose",
},
)
install_all_help = (
f"Install dependencies from all {_DEP_FILES}"
" files found in the current"
" directory or specified directory. This command first installs dependencies"
" using Conda, then Pip, and finally the local packages."
)
install_all_example = (
" Example usage: `unidep install-all` to install dependencies from all"
f" {_DEP_FILES}"
" files in the current directory. Use"
" `--directory ./path/to/dir` to specify a different directory. Use"
" `--depth` to control the depth of directory search. Add `--editable`"
" or `-e` for installing local packages in editable mode."
)
parser_install_all = subparsers.add_parser(
"install-all",
help=install_all_help,
description=install_all_help + install_all_example,
formatter_class=_HelpFormatter,
)
# Add positional argument for the file
_add_common_args(
parser_install_all,
{
"conda-executable",
"dry-run",
"editable",
"depth",
"directory",
"skip-local",
"skip-pip",
"skip-conda",
"no-dependencies",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
"verbose",
},
)
# Subparser for the 'conda-lock' command
conda_lock_help = (
"Generate a global `conda-lock.yml` file for a collection of"
f" {_DEP_FILES}"
" files. Additionally, create individual"
f" `conda-lock.yml` files for each {_DEP_FILES} file"
" consistent with the global lock file."
)
conda_lock_example = (
" Example usage: `unidep conda-lock --directory ./projects` to generate"
f" conda-lock files for all {_DEP_FILES}"
" files in the `./projects`"
" directory. Use `--only-global` to generate only the global lock file."
" The `--check-input-hash` option can be used to avoid regenerating lock"
" files if the input hasn't changed."
)
parser_lock = subparsers.add_parser(
"conda-lock",
help=conda_lock_help,
description=conda_lock_help + conda_lock_example,
formatter_class=_HelpFormatter,
)
parser_lock.add_argument(
"--only-global",
action="store_true",
help="Only generate the global lock file",
)
parser_lock.add_argument(
"--lockfile",
type=Path,
default="conda-lock.yml",
help="Specify a path for the global lockfile (default: `conda-lock.yml`"
" in current directory). Path should be relative, e.g.,"
" `--lockfile ./locks/example.conda-lock.yml`.",
)
parser_lock.add_argument(
"--check-input-hash",
action="store_true",
help="Check existing input hashes in lockfiles before regenerating lock files."
" This flag is directly passed to `conda-lock`.",
)
_add_common_args(
parser_lock,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
# Subparser for the 'pip-compile' command
pip_compile_help = (
"Generate a fully pinned `requirements.txt` file from one or more"
f" {_DEP_FILES}"
" files using `pip-compile` from `pip-tools`. This"
f" command consolidates all pip dependencies defined in the {_DEP_FILES}"
" files and compiles them into a single `requirements.txt` file, taking"
" into account the specific versions and dependencies of each package."
)
pip_compile_example = (
" Example usage: `unidep pip-compile --directory ./projects` to generate"
f" a `requirements.txt` file for all {_DEP_FILES}"
" files in the"
" `./projects` directory. Use `--output-file requirements.txt` to specify a"
" different output file."
)
parser_pip_compile = subparsers.add_parser(
"pip-compile",
help=pip_compile_help,
description=pip_compile_help + pip_compile_example,
formatter_class=_HelpFormatter,
)
parser_pip_compile.add_argument(
"-o",
"--output-file",
type=Path,
default=None,
help="Output file for the pip requirements, by default `requirements.txt`",
)
_add_common_args(
parser_pip_compile,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
parser_pip_compile.add_argument(
"extra_flags",
nargs=argparse.REMAINDER,
help="Extra flags to pass to `pip-compile`. These flags are passed directly"
" and should be provided in the format expected by `pip-compile`. For example,"
" `unidep pip-compile -- --generate-hashes --allow-unsafe`. Note that the"
" `--` is required to separate the flags for `unidep` from the flags for"
" `pip-compile`.",
)
# Subparser for the 'pip' and 'conda' command
help_str = "Get the {} requirements for the current platform only."
help_example = (
" Example usage: `unidep {which} --file folder1 --file"
" folder2/requirements.yaml --seperator ' ' --platform linux-64` to"
" extract all the {which} dependencies specific to the linux-64 platform. Note"
" that the `--file` argument can be used multiple times to specify multiple"
f" {_DEP_FILES}"
" files and that --file can also be a folder that contains"
f" a {_DEP_FILES} file."
)
parser_pip = subparsers.add_parser(
"pip",
help=help_str.format("pip"),
description=help_str.format("pip") + help_example.format(which="pip"),
formatter_class=_HelpFormatter,
)
parser_conda = subparsers.add_parser(
"conda",
help=help_str.format("conda"),
description=help_str.format("conda") + help_example.format(which="conda"),
formatter_class=_HelpFormatter,
)
for sub_parser in [parser_pip, parser_conda]:
_add_common_args(
sub_parser,
{
"verbose",
"platform",
"file",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
sub_parser.add_argument(
"--separator",
type=str,
default=" ",
help="The separator between the dependencies, by default ` `",
)
# Subparser for the 'version' command
parser_merge = subparsers.add_parser(
"version",
help="Print version information of unidep.",
formatter_class=_HelpFormatter,
)
args = parser.parse_args()
if args.command is None: # pragma: no cover
parser.print_help()
sys.exit(1)
if "file" in args and args.file.is_dir(): # pragma: no cover | args.file = dependencies_filename(args.file) | 12 | 2023-11-16 04:23:01+00:00 | 12k |
BAAI-DCAI/SegVol | inference_demo.py | [
{
"identifier": "sam_model_registry",
"path": "segment_anything_volumetric/build_sam.py",
"snippet": "def build_sam_vit_3d(args, checkpoint=None):\ndef _build_sam(\n image_encoder_type,\n embed_dim,\n patch_size,\n checkpoint,\n image_size,\n):"
},
{
"identifier": "SegVol",
"path": "network/model.py",
"snippet": "class SegVol(nn.Module):\n def __init__(self, \n image_encoder, \n mask_decoder,\n prompt_encoder,\n clip_ckpt,\n roi_size,\n patch_size,\n test_mode=False,\n ):\n super().__init__()\n self.image_encoder = image_encoder\n self.mask_decoder = mask_decoder\n self.prompt_encoder = prompt_encoder\n self.text_encoder = TextEncoder(clip_ckpt)\n self.feat_shape = np.array(roi_size)/np.array(patch_size)\n self.test_mode = test_mode\n self.dice_loss = BinaryDiceLoss().cuda()\n self.bce_loss = BCELoss().cuda()\n self.decoder_iter = 6\n\n def forward(self, image, text=None, boxes=None, points=None, **kwargs):\n bs = image.shape[0]\n img_shape = (image.shape[2], image.shape[3], image.shape[4])\n image_embedding, _ = self.image_encoder(image)\n image_embedding = image_embedding.transpose(1, 2).view(bs, -1, \n int(self.feat_shape[0]), int(self.feat_shape[1]), int(self.feat_shape[2]))\n # test mode\n if self.test_mode:\n return self.forward_decoder(image_embedding, img_shape, text, boxes, points)\n \n # train mode\n ## sl\n sl_loss = self.supervised_forward(image, image_embedding, img_shape, kwargs['train_organs'], kwargs['train_labels'])\n ## ssl\n ssl_loss = self.unsupervised_forward(image, image_embedding, kwargs['pseudo_seg_cleaned'], img_shape)\n return sl_loss, ssl_loss\n\n def forward_decoder(self, image_embedding, img_shape, text=None, boxes=None, points=None):\n with torch.no_grad():\n if boxes is not None:\n if len(boxes.shape) == 2:\n boxes = boxes[:, None, :] # (B, 1, 6)\n if text is not None:\n text_embedding = self.text_encoder(text) # (B, 768)\n else:\n text_embedding = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=None,\n text_embedding=text_embedding,\n )\n\n dense_pe = self.prompt_encoder.get_dense_pe()\n low_res_masks, _ = self.mask_decoder(\n image_embeddings=image_embedding,\n text_embedding = text_embedding,\n image_pe=dense_pe,\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=False,\n )\n logits = F.interpolate(low_res_masks, size=img_shape, mode='trilinear', align_corners=False)\n return logits\n\n def supervised_forward(self, image, image_embedding, img_shape, training_organs, train_labels):\n iter_points, iter_bboxes, iter_organs = self.build_prompt_label(image.shape[0], training_organs, train_labels)\n # select prompt\n prompt_options = [[None, iter_points, iter_organs], [iter_bboxes, None, iter_organs], \n [None, None, iter_organs], [iter_bboxes, None, None], [None, iter_points, None],\n [iter_bboxes, iter_points, None]]\n sl_loss = 0\n for prompt in prompt_options:\n bboxes, points, organs = prompt\n logits = self.forward_decoder(image_embedding, img_shape, text=organs, boxes=bboxes, points=points)\n # cal loss\n sl_loss_dice = self.dice_loss.forward(logits.squeeze().float(), train_labels.squeeze().float())\n sl_loss_bce = self.bce_loss.forward(logits.squeeze().float(), train_labels.squeeze().float())\n sl_loss += sl_loss_dice + sl_loss_bce\n return sl_loss\n \n def unsupervised_forward(self, image, image_embedding, pseudo_seg_cleaned, img_shape):\n sll_loss = 0\n for iter in range(self.decoder_iter):\n if iter % 2 == 0:\n pseudo_labels, pseudo_points_prompt = self.build_pseudo_point_prompt_label(image.shape, pseudo_seg_cleaned)\n logits = self.forward_decoder(image_embedding, img_shape, text=None, boxes=None, points=pseudo_points_prompt)\n else:\n pseudo_labels, pseudo_bboxes_prompt = self.build_pseudo_box_prompt_label(image.shape, pseudo_seg_cleaned)\n logits = self.forward_decoder(image_embedding, img_shape, text=None, boxes=pseudo_bboxes_prompt, points=None)\n # cal loss\n sll_loss_dice = self.dice_loss.forward(logits.squeeze().float(), pseudo_labels.squeeze().float())\n sll_loss_bce = self.bce_loss.forward(logits.squeeze().float(), pseudo_labels.squeeze().float())\n sll_loss += sll_loss_dice + sll_loss_bce\n return sll_loss\n\n def build_prompt_label(self, bs, training_organs, train_labels):\n # generate prompt & label\n iter_organs = []\n iter_bboxes = []\n iter_points_ax = []\n iter_point_labels = []\n for sample_idx in range(bs):\n # organ prompt\n iter_organs.append(training_organs)\n # box prompt\n box = generate_box(train_labels[sample_idx])\n iter_bboxes.append(box)\n # point prompt\n num_positive_extra_max, num_negative_extra_max = 10, 10\n num_positive_extra = random.randint(0, num_positive_extra_max)\n num_negative_extra = random.randint(0, num_negative_extra_max)\n point, point_label = select_points(\n train_labels[sample_idx],\n num_positive_extra=num_positive_extra,\n num_negative_extra=num_negative_extra,\n fix_extra_point_num=num_positive_extra_max + num_negative_extra_max)\n iter_points_ax.append(point)\n iter_point_labels.append(point_label)\n # batched prompt\n iter_points_ax = torch.stack(iter_points_ax, dim=0).cuda()\n iter_point_labels = torch.stack(iter_point_labels, dim=0).cuda()\n iter_points = (iter_points_ax, iter_point_labels)\n iter_bboxes = torch.stack(iter_bboxes, dim=0).float().cuda()\n return iter_points, iter_bboxes, iter_organs\n \n def build_pseudo_point_prompt_label(self, input_shape, seg_labels):\n pseudo_labels = torch.zeros(input_shape).cuda()\n # generate points\n points = []\n point_labels = []\n for batch_idx in range(input_shape[0]):\n # generate pseudo label\n unique_ids = torch.unique(seg_labels[batch_idx])\n unique_ids = unique_ids[unique_ids != -1]\n region_id = random.choice(unique_ids).item()\n pseudo_labels[batch_idx][seg_labels[batch_idx]==region_id] = 1\n # generate point prompt\n num_positive_extra_max, num_negative_extra_max = 10, 10\n num_positive_extra = random.randint(4, num_positive_extra_max)\n num_negative_extra = random.randint(0, num_negative_extra_max)\n assert len(pseudo_labels[batch_idx][0].shape) == 3\n point, point_label = select_points(\n pseudo_labels[batch_idx][0],\n num_positive_extra=num_positive_extra,\n num_negative_extra=num_negative_extra,\n fix_extra_point_num=num_positive_extra_max + num_negative_extra_max)\n points.append(point)\n point_labels.append(point_label)\n points = torch.stack(points, dim=0).cuda()\n point_labels = torch.stack(point_labels, dim=0).cuda()\n pseudo_points_prompt = (points, point_labels)\n return pseudo_labels, pseudo_points_prompt\n\n def build_pseudo_box_prompt_label(self, input_shape, seg_labels_cleaned):\n pseudo_labels = torch.zeros(input_shape).cuda()\n iter_bboxes = []\n # generate boxes\n for batch_idx in range(input_shape[0]):\n # generate ori pseudo label\n unique_ids = torch.unique(seg_labels_cleaned[batch_idx])\n unique_ids = unique_ids[unique_ids != -1]\n region_id = random.choice(unique_ids).item()\n pseudo_labels[batch_idx][seg_labels_cleaned[batch_idx]==region_id] = 1\n # generate box prompt\n box = generate_box(pseudo_labels[batch_idx][0])\n iter_bboxes.append(box)\n # refine pseudo label\n x_min, y_min, z_min, x_max, y_max, z_max = box\n binary_cube = torch.zeros_like(pseudo_labels[batch_idx][0]).int()\n binary_cube[x_min:x_max+1, y_min:y_max+1, z_min:z_max+1] = 1\n # cal iou\n mask_label = seg_labels_cleaned[batch_idx][0]\n assert binary_cube.shape == mask_label.shape, str(binary_cube.shape) + ' ' + str(mask_label.shape)\n mask_values_in_binary_cube = mask_label[binary_cube == 1]\n unique_mask_values = torch.unique(mask_values_in_binary_cube)\n # print('unique_mask_values ', unique_mask_values)\n for value in unique_mask_values:\n if value == -1: continue\n mask_area = (mask_label == value)\n intersection = (binary_cube & mask_area)\n iou = intersection.float().sum() / mask_area.float().sum()\n if iou > 0.90:\n # print(f\"Mask value {value} has IOU > 0.90 in binary cube.\")\n pseudo_labels[batch_idx][seg_labels_cleaned[batch_idx]==value] = 1\n\n bboxes = torch.stack(iter_bboxes, dim=0).float().cuda()\n return pseudo_labels, bboxes"
},
{
"identifier": "process_ct_gt",
"path": "data_process/demo_data_process.py",
"snippet": "def process_ct_gt(case_path, label_path, category, spatial_size):\n print('Data preprocessing...')\n # transform\n img_loader = transforms.LoadImage()\n transform = transforms.Compose(\n [\n transforms.Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n ForegroundNormalization(keys=[\"image\"]),\n DimTranspose(keys=[\"image\", \"label\"]),\n MinMaxNormalization(),\n transforms.SpatialPadd(keys=[\"image\", \"label\"], spatial_size=spatial_size, mode='constant'),\n transforms.CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n transforms.ToTensord(keys=[\"image\", \"label\"]),\n ]\n )\n zoom_out_transform = transforms.Resized(keys=[\"image\", \"label\"], spatial_size=spatial_size, mode='nearest-exact')\n\n ###\n item = {}\n # generate ct_voxel_ndarray\n ct_voxel_ndarray, _ = img_loader(case_path)\n print(type(ct_voxel_ndarray))\n ct_voxel_ndarray = np.array(ct_voxel_ndarray).squeeze()\n ct_shape = ct_voxel_ndarray.shape\n ct_voxel_ndarray = np.expand_dims(ct_voxel_ndarray, axis=0)\n item['image'] = ct_voxel_ndarray\n\n # generate gt_voxel_ndarray\n gt_voxel_ndarray, _ = img_loader(label_path)\n gt_voxel_ndarray = np.array(gt_voxel_ndarray)\n present_categories = np.unique(gt_voxel_ndarray)\n gt_masks = []\n for cls_idx in range(len(category)):\n # ignore background\n cls = cls_idx + 1\n if cls not in present_categories:\n gt_voxel_ndarray_category = np.zeros(ct_shape)\n gt_masks.append(gt_voxel_ndarray_category)\n else:\n gt_voxel_ndarray_category = gt_voxel_ndarray.copy()\n gt_voxel_ndarray_category[gt_voxel_ndarray != cls] = 0\n gt_voxel_ndarray_category[gt_voxel_ndarray == cls] = 1\n gt_masks.append(gt_voxel_ndarray_category)\n gt_voxel_ndarray = np.stack(gt_masks, axis=0)\n assert gt_voxel_ndarray.shape[0] == len(category) and gt_voxel_ndarray.shape[1:] == ct_voxel_ndarray.shape[1:]\n item['label'] = gt_voxel_ndarray.astype(np.int32)\n\n # transform\n item = transform(item)\n item_zoom_out = zoom_out_transform(item)\n item['zoom_out_image'] = item_zoom_out['image']\n item['zoom_out_label'] = item_zoom_out['label']\n print( 'Zoom_in image shape: ', item['image'].shape, \n '\\nZoom_in label shape: ', item['label'].shape,\n '\\nZoom_out image shape: ', item['zoom_out_image'].shape,\n '\\nZoom_out label shape: ', item['zoom_out_label'].shape,\n )\n return item"
},
{
"identifier": "sliding_window_inference",
"path": "utils/monai_inferers_utils.py",
"snippet": "def sliding_window_inference(\n inputs: torch.Tensor,\n prompt_reflection: Union[torch.Tensor, Tuple[torch.Tensor, ...]],\n roi_size: Union[Sequence[int], int],\n sw_batch_size: int,\n predictor: Callable[..., Union[torch.Tensor, Sequence[torch.Tensor], Dict[Any, torch.Tensor]]],\n overlap: float = 0.25,\n mode: Union[BlendMode, str] = BlendMode.CONSTANT,\n sigma_scale: Union[Sequence[float], float] = 0.125,\n padding_mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT,\n cval: float = 0.0,\n sw_device: Union[torch.device, str, None] = None,\n device: Union[torch.device, str, None] = None,\n progress: bool = False,\n roi_weight_map: Union[torch.Tensor, None] = None,\n *args: Any,\n **kwargs: Any,\n) -> Union[torch.Tensor, Tuple[torch.Tensor, ...], Dict[Any, torch.Tensor]]:\n \"\"\"\n Sliding window inference on `inputs` with `predictor`.\n\n The outputs of `predictor` could be a tensor, a tuple, or a dictionary of tensors.\n Each output in the tuple or dict value is allowed to have different resolutions with respect to the input.\n e.g., the input patch spatial size is [128,128,128], the output (a tuple of two patches) patch sizes\n could be ([128,64,256], [64,32,128]).\n In this case, the parameter `overlap` and `roi_size` need to be carefully chosen to ensure the output ROI is still\n an integer. If the predictor's input and output spatial sizes are not equal, we recommend choosing the parameters\n so that `overlap*roi_size*output_size/input_size` is an integer (for each spatial dimension).\n\n When roi_size is larger than the inputs' spatial size, the input image are padded during inference.\n To maintain the same spatial sizes, the output image will be cropped to the original input size.\n\n Args:\n inputs: input image to be processed (assuming NCHW[D])\n roi_size: the spatial window size for inferences.\n When its components have None or non-positives, the corresponding inputs dimension will be used.\n if the components of the `roi_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n sw_batch_size: the batch size to run window slices.\n predictor: given input tensor ``patch_data`` in shape NCHW[D],\n The outputs of the function call ``predictor(patch_data)`` should be a tensor, a tuple, or a dictionary\n with Tensor values. Each output in the tuple or dict value should have the same batch_size, i.e. NM'H'W'[D'];\n where H'W'[D'] represents the output patch's spatial size, M is the number of output channels,\n N is `sw_batch_size`, e.g., the input shape is (7, 1, 128,128,128),\n the output could be a tuple of two tensors, with shapes: ((7, 5, 128, 64, 256), (7, 4, 64, 32, 128)).\n In this case, the parameter `overlap` and `roi_size` need to be carefully chosen\n to ensure the scaled output ROI sizes are still integers.\n If the `predictor`'s input and output spatial sizes are different,\n we recommend choosing the parameters so that ``overlap*roi_size*zoom_scale`` is an integer for each dimension.\n overlap: Amount of overlap between scans.\n mode: {``\"constant\"``, ``\"gaussian\"``}\n How to blend output of overlapping windows. Defaults to ``\"constant\"``.\n\n - ``\"constant``\": gives equal weight to all predictions.\n - ``\"gaussian``\": gives less weight to predictions on edges of windows.\n\n sigma_scale: the standard deviation coefficient of the Gaussian window when `mode` is ``\"gaussian\"``.\n Default: 0.125. Actual window sigma is ``sigma_scale`` * ``dim_size``.\n When sigma_scale is a sequence of floats, the values denote sigma_scale at the corresponding\n spatial dimensions.\n padding_mode: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}\n Padding mode for ``inputs``, when ``roi_size`` is larger than inputs. Defaults to ``\"constant\"``\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html\n cval: fill value for 'constant' padding mode. Default: 0\n sw_device: device for the window data.\n By default the device (and accordingly the memory) of the `inputs` is used.\n Normally `sw_device` should be consistent with the device where `predictor` is defined.\n device: device for the stitched output prediction.\n By default the device (and accordingly the memory) of the `inputs` is used. If for example\n set to device=torch.device('cpu') the gpu memory consumption is less and independent of the\n `inputs` and `roi_size`. Output is on the `device`.\n progress: whether to print a `tqdm` progress bar.\n roi_weight_map: pre-computed (non-negative) weight map for each ROI.\n If not given, and ``mode`` is not `constant`, this map will be computed on the fly.\n args: optional args to be passed to ``predictor``.\n kwargs: optional keyword args to be passed to ``predictor``.\n\n Note:\n - input must be channel-first and have a batch dim, supports N-D sliding window.\n\n \"\"\"\n print('sliding window inference for ROI')\n text = kwargs['text']\n use_box = kwargs['use_box']\n use_point = kwargs['use_point']\n assert not (use_box and use_point)\n compute_dtype = inputs.dtype\n num_spatial_dims = len(inputs.shape) - 2\n if overlap < 0 or overlap >= 1:\n raise ValueError(\"overlap must be >= 0 and < 1.\")\n\n # determine image spatial size and batch size\n # Note: all input images must have the same image size and batch size\n batch_size, _, *image_size_ = inputs.shape\n\n if device is None:\n device = inputs.device\n if sw_device is None:\n sw_device = inputs.device\n\n roi_size = fall_back_tuple(roi_size, image_size_)\n # in case that image size is smaller than roi size\n image_size = tuple(max(image_size_[i], roi_size[i]) for i in range(num_spatial_dims))\n pad_size = []\n for k in range(len(inputs.shape) - 1, 1, -1):\n diff = max(roi_size[k - 2] - inputs.shape[k], 0)\n half = diff // 2\n pad_size.extend([half, diff - half])\n inputs = F.pad(inputs, pad=pad_size, mode=look_up_option(padding_mode, PytorchPadMode).value, value=cval)\n #############\n if use_point or use_box:\n binary_prompt_map, global_preds = prompt_reflection\n global_preds = F.pad(global_preds, pad=pad_size, mode=look_up_option(padding_mode, PytorchPadMode).value, value=cval)\n #############\n scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims, overlap)\n\n # Store all slices in list\n slices = dense_patch_slices(image_size, roi_size, scan_interval)\n num_win = len(slices) # number of windows per image\n total_slices = num_win * batch_size # total number of windows\n\n # Create window-level importance map\n valid_patch_size = get_valid_patch_size(image_size, roi_size)\n if valid_patch_size == roi_size and (roi_weight_map is not None):\n importance_map = roi_weight_map\n else:\n try:\n importance_map = compute_importance_map(valid_patch_size, mode=mode, sigma_scale=sigma_scale, device=device)\n except BaseException as e:\n raise RuntimeError(\n \"Seems to be OOM. Please try smaller patch size or mode='constant' instead of mode='gaussian'.\"\n ) from e\n importance_map = convert_data_type(importance_map, torch.Tensor, device, compute_dtype)[0] # type: ignore\n # handle non-positive weights\n min_non_zero = max(importance_map[importance_map != 0].min().item(), 1e-3)\n importance_map = torch.clamp(importance_map.to(torch.float32), min=min_non_zero).to(compute_dtype)\n\n # Perform predictions\n dict_key, output_image_list, count_map_list = None, [], []\n _initialized_ss = -1\n is_tensor_output = True # whether the predictor's output is a tensor (instead of dict/tuple)\n\n # for each patch\n for slice_g in tqdm(range(0, total_slices, sw_batch_size)) if progress else range(0, total_slices, sw_batch_size):\n slice_range = range(slice_g, min(slice_g + sw_batch_size, total_slices))\n unravel_slice = [\n [slice(int(idx / num_win), int(idx / num_win) + 1), slice(None)] + list(slices[idx % num_win])\n for idx in slice_range\n ]\n window_data = torch.cat([inputs[win_slice] for win_slice in unravel_slice]).to(sw_device)\n #############\n \n boxes = None\n points = None\n if use_point:\n window_binary_prompt_map = torch.cat([binary_prompt_map[win_slice] for win_slice in unravel_slice]).to(sw_device)\n point, point_label = select_points(window_binary_prompt_map.squeeze())\n points = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda()) \n pseudo_label = torch.cat([global_preds[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(pseudo_label.squeeze()).unsqueeze(0).float().cuda()\n if use_box:\n if num_win == 1:\n window_binary_prompt_map = torch.cat([binary_prompt_map[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(window_binary_prompt_map.squeeze()).unsqueeze(0).float().cuda()\n else:\n pseudo_label = torch.cat([global_preds[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(pseudo_label.squeeze()).unsqueeze(0).float().cuda()\n seg_prob_out = predictor(window_data, text, boxes, points) # batched patch segmentation\n #############\n # convert seg_prob_out to tuple seg_prob_tuple, this does not allocate new memory.\n seg_prob_tuple: Tuple[torch.Tensor, ...]\n if isinstance(seg_prob_out, torch.Tensor):\n seg_prob_tuple = (seg_prob_out,)\n elif isinstance(seg_prob_out, Mapping):\n if dict_key is None:\n dict_key = sorted(seg_prob_out.keys()) # track predictor's output keys\n seg_prob_tuple = tuple(seg_prob_out[k] for k in dict_key)\n is_tensor_output = False\n else:\n seg_prob_tuple = ensure_tuple(seg_prob_out)\n is_tensor_output = False\n\n # for each output in multi-output list\n for ss, seg_prob in enumerate(seg_prob_tuple):\n seg_prob = seg_prob.to(device) # BxCxMxNxP or BxCxMxN\n\n # compute zoom scale: out_roi_size/in_roi_size\n zoom_scale = []\n for axis, (img_s_i, out_w_i, in_w_i) in enumerate(\n zip(image_size, seg_prob.shape[2:], window_data.shape[2:])\n ):\n _scale = out_w_i / float(in_w_i)\n if not (img_s_i * _scale).is_integer():\n warnings.warn(\n f\"For spatial axis: {axis}, output[{ss}] will have non-integer shape. Spatial \"\n f\"zoom_scale between output[{ss}] and input is {_scale}. Please pad inputs.\"\n )\n zoom_scale.append(_scale)\n\n if _initialized_ss < ss: # init. the ss-th buffer at the first iteration\n # construct multi-resolution outputs\n output_classes = seg_prob.shape[1]\n output_shape = [batch_size, output_classes] + [\n int(image_size_d * zoom_scale_d) for image_size_d, zoom_scale_d in zip(image_size, zoom_scale)\n ]\n # allocate memory to store the full output and the count for overlapping parts\n output_image_list.append(torch.zeros(output_shape, dtype=compute_dtype, device=device))\n count_map_list.append(torch.zeros([1, 1] + output_shape[2:], dtype=compute_dtype, device=device))\n _initialized_ss += 1\n\n # resizing the importance_map\n resizer = Resize(spatial_size=seg_prob.shape[2:], mode=\"nearest\", anti_aliasing=False)\n\n # store the result in the proper location of the full output. Apply weights from importance map.\n for idx, original_idx in zip(slice_range, unravel_slice):\n # zoom roi\n original_idx_zoom = list(original_idx) # 4D for 2D image, 5D for 3D image\n for axis in range(2, len(original_idx_zoom)):\n zoomed_start = original_idx[axis].start * zoom_scale[axis - 2]\n zoomed_end = original_idx[axis].stop * zoom_scale[axis - 2]\n if not zoomed_start.is_integer() or (not zoomed_end.is_integer()):\n warnings.warn(\n f\"For axis-{axis-2} of output[{ss}], the output roi range is not int. \"\n f\"Input roi range is ({original_idx[axis].start}, {original_idx[axis].stop}). \"\n f\"Spatial zoom_scale between output[{ss}] and input is {zoom_scale[axis - 2]}. \"\n f\"Corresponding output roi range is ({zoomed_start}, {zoomed_end}).\\n\"\n f\"Please change overlap ({overlap}) or roi_size ({roi_size[axis-2]}) for axis-{axis-2}. \"\n \"Tips: if overlap*roi_size*zoom_scale is an integer, it usually works.\"\n )\n original_idx_zoom[axis] = slice(int(zoomed_start), int(zoomed_end), None)\n importance_map_zoom = resizer(importance_map.unsqueeze(0))[0].to(compute_dtype)\n # store results and weights\n output_image_list[ss][original_idx_zoom] += importance_map_zoom * seg_prob[idx - slice_g]\n count_map_list[ss][original_idx_zoom] += (\n importance_map_zoom.unsqueeze(0).unsqueeze(0).expand(count_map_list[ss][original_idx_zoom].shape)\n )\n\n # account for any overlapping sections\n for ss in range(len(output_image_list)):\n output_image_list[ss] = (output_image_list[ss] / count_map_list.pop(0)).to(compute_dtype)\n\n # remove padding if image_size smaller than roi_size\n for ss, output_i in enumerate(output_image_list):\n if torch.isnan(output_i).any() or torch.isinf(output_i).any():\n warnings.warn(\"Sliding window inference results contain NaN or Inf.\")\n\n zoom_scale = [\n seg_prob_map_shape_d / roi_size_d for seg_prob_map_shape_d, roi_size_d in zip(output_i.shape[2:], roi_size)\n ]\n\n final_slicing: List[slice] = []\n for sp in range(num_spatial_dims):\n slice_dim = slice(pad_size[sp * 2], image_size_[num_spatial_dims - sp - 1] + pad_size[sp * 2])\n slice_dim = slice(\n int(round(slice_dim.start * zoom_scale[num_spatial_dims - sp - 1])),\n int(round(slice_dim.stop * zoom_scale[num_spatial_dims - sp - 1])),\n )\n final_slicing.insert(0, slice_dim)\n while len(final_slicing) < len(output_i.shape):\n final_slicing.insert(0, slice(None))\n output_image_list[ss] = output_i[final_slicing]\n\n if dict_key is not None: # if output of predictor is a dict\n final_output = dict(zip(dict_key, output_image_list))\n else:\n final_output = tuple(output_image_list) # type: ignore\n return final_output[0] if is_tensor_output else final_output # type: ignore"
},
{
"identifier": "generate_box",
"path": "utils/monai_inferers_utils.py",
"snippet": "def generate_box(pred_pre, bbox_shift=None):\n meaning_post_label = pred_pre # [h, w, d]\n ones_idx = (meaning_post_label > 0).nonzero(as_tuple=True)\n if all(tensor.nelement() == 0 for tensor in ones_idx):\n bboxes = torch.tensor([-1,-1,-1,-1,-1,-1])\n # print(bboxes, bboxes.shape)\n return bboxes\n min_coords = [dim.min() for dim in ones_idx] # [x_min, y_min, z_min]\n max_coords = [dim.max() for dim in ones_idx] # [x_max, y_max, z_max]\n\n\n if bbox_shift is None:\n corner_min = []\n corner_max = []\n shape = meaning_post_label.shape\n for coor in min_coords:\n coor_ = max(0, coor)\n corner_min.append(coor_)\n for idx, coor in enumerate(max_coords):\n coor_ = min(shape[idx], coor)\n corner_max.append(coor_)\n corner_min = torch.tensor(corner_min)\n corner_max = torch.tensor(corner_max)\n return torch.cat((corner_min, corner_max), dim=0)\n else:\n # add perturbation to bounding box coordinates\n corner_min = []\n corner_max = []\n shape = meaning_post_label.shape\n for coor in min_coords:\n coor_ = max(0, coor + random.randint(-bbox_shift, bbox_shift))\n corner_min.append(coor_)\n for idx, coor in enumerate(max_coords):\n coor_ = min(shape[idx], coor + random.randint(-bbox_shift, bbox_shift))\n corner_max.append(coor_)\n corner_min = torch.tensor(corner_min)\n corner_max = torch.tensor(corner_max)\n return torch.cat((corner_min, corner_max), dim=0)"
},
{
"identifier": "select_points",
"path": "utils/monai_inferers_utils.py",
"snippet": "def select_points(preds, num_positive_extra=4, num_negative_extra=0, fix_extra_point_num=None):\n spacial_dim = 3\n points = torch.zeros((0, 3))\n labels = torch.zeros((0))\n pos_thred = 0.9\n neg_thred = 0.1\n \n # get pos/net indices\n positive_indices = torch.nonzero(preds > pos_thred, as_tuple=True) # ([pos x], [pos y], [pos z])\n negative_indices = torch.nonzero(preds < neg_thred, as_tuple=True)\n\n ones_idx = (preds > pos_thred).nonzero(as_tuple=True)\n if all(tmp.nelement() == 0 for tmp in ones_idx):\n # all neg\n num_positive_extra = 0\n selected_positive_point = torch.tensor([-1,-1,-1]).unsqueeze(dim=0)\n points = torch.cat((points, selected_positive_point), dim=0)\n labels = torch.cat((labels, torch.tensor([-1]).reshape(1)))\n else:\n # random select a pos point\n random_idx = torch.randint(len(positive_indices[0]), (1,))\n selected_positive_point = torch.tensor([positive_indices[i][random_idx] for i in range(spacial_dim)]).unsqueeze(dim=0)\n points = torch.cat((points, selected_positive_point), dim=0)\n labels = torch.cat((labels, torch.ones((1))))\n\n if num_positive_extra > 0:\n pos_idx_list = torch.randperm(len(positive_indices[0]))[:num_positive_extra]\n extra_positive_points = []\n for pos_idx in pos_idx_list:\n extra_positive_points.append([positive_indices[i][pos_idx] for i in range(spacial_dim)])\n extra_positive_points = torch.tensor(extra_positive_points).reshape(-1, 3)\n points = torch.cat((points, extra_positive_points), dim=0)\n labels = torch.cat((labels, torch.ones((extra_positive_points.shape[0]))))\n\n if num_negative_extra > 0:\n neg_idx_list = torch.randperm(len(negative_indices[0]))[:num_negative_extra]\n extra_negative_points = []\n for neg_idx in neg_idx_list:\n extra_negative_points.append([negative_indices[i][neg_idx] for i in range(spacial_dim)])\n extra_negative_points = torch.tensor(extra_negative_points).reshape(-1, 3)\n points = torch.cat((points, extra_negative_points), dim=0)\n labels = torch.cat((labels, torch.zeros((extra_negative_points.shape[0]))))\n # print('extra_negative_points ', extra_negative_points, extra_negative_points.shape)\n # print('==> points ', points.shape, labels)\n \n if fix_extra_point_num is None:\n left_point_num = num_positive_extra + num_negative_extra + 1 - labels.shape[0]\n else:\n left_point_num = fix_extra_point_num + 1 - labels.shape[0]\n\n for _ in range(left_point_num):\n ignore_point = torch.tensor([-1,-1,-1]).unsqueeze(dim=0)\n points = torch.cat((points, ignore_point), dim=0)\n labels = torch.cat((labels, torch.tensor([-1]).reshape(1)))\n\n return (points, labels)"
},
{
"identifier": "build_binary_cube",
"path": "utils/monai_inferers_utils.py",
"snippet": "def build_binary_cube(bbox, binary_cube_shape):\n min_coord = bbox[0][:3].int().tolist()\n max_coord = bbox[0][3:].int().tolist()\n binary_cube = torch.zeros(binary_cube_shape)\n binary_cube[min_coord[0]:max_coord[0]+1, min_coord[1]:max_coord[1]+1, min_coord[2]:max_coord[2]+1] = 1\n return binary_cube"
},
{
"identifier": "build_binary_points",
"path": "utils/monai_inferers_utils.py",
"snippet": "def build_binary_points(points, labels, shape):\n binary_points = torch.zeros(shape, dtype=torch.int16)\n binary_points[points[labels == 1, 0].long(), points[labels == 1, 1].long(), points[labels == 1, 2].long()] = 1\n return binary_points"
},
{
"identifier": "logits2roi_coor",
"path": "utils/monai_inferers_utils.py",
"snippet": "def logits2roi_coor(spatial_size, logits_global_single):\n # crop predict\n pred_global_single = torch.sigmoid(logits_global_single) > 0.5\n ## get all pos idx\n nonzero_indices = torch.nonzero(pred_global_single)\n if nonzero_indices.shape[0] == 0:\n return None, None, None, None, None, None\n ## get boundary\n min_d, max_d = nonzero_indices[:, 0].min(), nonzero_indices[:, 0].max()\n min_h, max_h = nonzero_indices[:, 1].min(), nonzero_indices[:, 1].max()\n min_w, max_w = nonzero_indices[:, 2].min(), nonzero_indices[:, 2].max()\n ## padding\n crop_d, crop_h, crop_w = max_d - min_d + 1, max_h - min_h + 1, max_w - min_w + 1,\n window_d, window_h, window_w = spatial_size\n padding_d, padding_h, padding_w = max(0, window_d-crop_d), max(0, window_h-crop_h), max(0, window_w-crop_w)\n global_d, global_h, global_w = logits_global_single.shape\n min_d = max(0, min_d - int(padding_d)//2)\n min_h = max(0, min_h - int(padding_h)//2)\n min_w = max(0, min_w - int(padding_w)//2)\n max_d = min(global_d, max_d + int(padding_d)//2)\n max_h = min(global_h, max_h + int(padding_h)//2)\n max_w = min(global_w, max_w + int(padding_w)//2)\n return min_d, min_h, min_w, max_d, max_h, max_w"
},
{
"identifier": "draw_result",
"path": "utils/visualize.py",
"snippet": "def draw_result(category, image, bboxes, points, logits, gt3D, spatial_size, work_dir):\n zoom_out_transform = transforms.Compose([\n transforms.AddChanneld(keys=[\"image\", \"label\", \"logits\"]),\n transforms.Resized(keys=[\"image\", \"label\", \"logits\"], spatial_size=spatial_size, mode='nearest-exact')\n ])\n post_item = zoom_out_transform({\n 'image': image,\n 'label': gt3D,\n 'logits': logits\n })\n image, gt3D, logits = post_item['image'][0], post_item['label'][0], post_item['logits'][0]\n preds = torch.sigmoid(logits)\n preds = (preds > 0.5).int()\n\n root_dir=os.path.join(work_dir, f'fig_examples/{category}/') \n\n if not os.path.exists(root_dir):\n os.makedirs(root_dir)\n if bboxes is not None:\n x1, y1, z1, x2, y2, z2 = bboxes[0].cpu().numpy()\n if points is not None:\n points = (points[0].cpu().numpy(), points[1].cpu().numpy())\n points_ax = points[0][0] # [n, 3]\n points_label = points[1][0] # [n]\n\n for j in range(image.shape[0]):\n img_2d = image[j, :, :].detach().cpu().numpy()\n preds_2d = preds[j, :, :].detach().cpu().numpy()\n label_2d = gt3D[j, :, :].detach().cpu().numpy()\n if np.sum(label_2d) == 0 or np.sum(preds_2d) == 0:\n continue\n\n img_2d = img_2d * 255\n # orginal img\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n ax1.imshow(img_2d, cmap='gray')\n ax1.set_title('Image with prompt') \n ax1.axis('off')\n\n # gt\n ax2.imshow(img_2d, cmap='gray')\n show_mask(label_2d, ax2)\n ax2.set_title('Ground truth') \n ax2.axis('off')\n\n # preds\n ax3.imshow(img_2d, cmap='gray')\n show_mask(preds_2d, ax3)\n ax3.set_title('Prediction') \n ax3.axis('off')\n\n # boxes\n if bboxes is not None:\n if j >= x1 and j <= x2:\n show_box((z1, y1, z2, y2), ax1)\n # points\n if points is not None:\n for point_idx in range(points_label.shape[0]):\n point = points_ax[point_idx]\n label = points_label[point_idx] # [1]\n if j == point[0]:\n show_points(point, label, ax1)\n \n fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)\n plt.savefig(os.path.join(root_dir, f'{category}_{j}.png'), bbox_inches='tight')\n plt.close()"
}
] | import argparse
import os
import torch
import torch.nn.functional as F
import json
import monai.transforms as transforms
from segment_anything_volumetric import sam_model_registry
from network.model import SegVol
from data_process.demo_data_process import process_ct_gt
from utils.monai_inferers_utils import sliding_window_inference, generate_box, select_points, build_binary_cube, build_binary_points, logits2roi_coor
from utils.visualize import draw_result | 10,312 |
def set_parse():
# %% set up parser
parser = argparse.ArgumentParser()
parser.add_argument("--test_mode", default=True, type=bool)
parser.add_argument("--resume", type = str, default = '')
parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap")
parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple)
parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple)
parser.add_argument('-work_dir', type=str, default='./work_dir')
### demo
parser.add_argument('--demo_config', type=str, required=True)
parser.add_argument("--clip_ckpt", type = str, default = './config/clip')
args = parser.parse_args()
return args
def dice_score(preds, labels): # on GPU
assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match\n" + str(preds.shape) + str(labels.shape)
predict = preds.view(1, -1)
target = labels.view(1, -1)
if target.shape[1] < 1e8:
predict = predict.cuda()
target = target.cuda()
predict = torch.sigmoid(predict)
predict = torch.where(predict > 0.5, 1., 0.)
tp = torch.sum(torch.mul(predict, target))
den = torch.sum(predict) + torch.sum(target) + 1
dice = 2 * tp / den
if target.shape[1] < 1e8:
predict = predict.cpu()
target = target.cpu()
return dice
def zoom_in_zoom_out(args, segvol_model, image, image_resize, gt3D, gt3D_resize, categories=None):
logits_labels_record = {}
image_single_resize = image_resize
image_single = image[0,0]
ori_shape = image_single.shape
for item_idx in range(len(categories)):
# get label to generate prompts
label_single = gt3D[0][item_idx]
label_single_resize = gt3D_resize[0][item_idx]
# skip meaningless categories
if torch.sum(label_single) == 0:
print('No object, skip')
continue
# generate prompts
text_single = categories[item_idx] if args.use_text_prompt else None
if categories is not None: print(f'inference |{categories[item_idx]}| target...')
points_single = None
box_single = None
if args.use_point_prompt:
point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3)
points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda())
binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape)
if args.use_box_prompt:
|
def set_parse():
# %% set up parser
parser = argparse.ArgumentParser()
parser.add_argument("--test_mode", default=True, type=bool)
parser.add_argument("--resume", type = str, default = '')
parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap")
parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple)
parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple)
parser.add_argument('-work_dir', type=str, default='./work_dir')
### demo
parser.add_argument('--demo_config', type=str, required=True)
parser.add_argument("--clip_ckpt", type = str, default = './config/clip')
args = parser.parse_args()
return args
def dice_score(preds, labels): # on GPU
assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match\n" + str(preds.shape) + str(labels.shape)
predict = preds.view(1, -1)
target = labels.view(1, -1)
if target.shape[1] < 1e8:
predict = predict.cuda()
target = target.cuda()
predict = torch.sigmoid(predict)
predict = torch.where(predict > 0.5, 1., 0.)
tp = torch.sum(torch.mul(predict, target))
den = torch.sum(predict) + torch.sum(target) + 1
dice = 2 * tp / den
if target.shape[1] < 1e8:
predict = predict.cpu()
target = target.cpu()
return dice
def zoom_in_zoom_out(args, segvol_model, image, image_resize, gt3D, gt3D_resize, categories=None):
logits_labels_record = {}
image_single_resize = image_resize
image_single = image[0,0]
ori_shape = image_single.shape
for item_idx in range(len(categories)):
# get label to generate prompts
label_single = gt3D[0][item_idx]
label_single_resize = gt3D_resize[0][item_idx]
# skip meaningless categories
if torch.sum(label_single) == 0:
print('No object, skip')
continue
# generate prompts
text_single = categories[item_idx] if args.use_text_prompt else None
if categories is not None: print(f'inference |{categories[item_idx]}| target...')
points_single = None
box_single = None
if args.use_point_prompt:
point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3)
points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda())
binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape)
if args.use_box_prompt: | box_single = generate_box(label_single_resize).unsqueeze(0).float().cuda() | 4 | 2023-11-10 08:25:37+00:00 | 12k |
xk-huang/segment-caption-anything | tests/models/sca/test_modeling_sca.py | [
{
"identifier": "ScaConfig",
"path": "src/models/sca/configuration_sca.py",
"snippet": "class ScaConfig(PretrainedConfig):\n model_type = \"sca\"\n is_composition = True\n\n def __init__(\n self,\n vision_config=None,\n prompt_encoder_config=None,\n mask_caption_decoder_config=None,\n text_config=None,\n initializer_range=0.02,\n # NOTE: for recoginition pretrain\n num_task_tokens: int = 6,\n **kwargs,\n ):\n super().__init__(**kwargs)\n vision_config = vision_config if vision_config is not None else {}\n prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {}\n mask_caption_decoder_config = mask_caption_decoder_config if mask_caption_decoder_config is not None else {}\n text_config = text_config if text_config is not None else {}\n\n if isinstance(vision_config, SamVisionConfig):\n self.vision = vision_config.to_dict()\n if isinstance(prompt_encoder_config, SamPromptEncoderConfig):\n self.prompt_encoder = prompt_encoder_config.to_dict()\n if isinstance(mask_caption_decoder_config, ScaMaskCaptionDecoderConfig):\n self.mask_caption_decoder_config = mask_caption_decoder_config.to_dict()\n\n text_model_type = text_config[\"model_type\"] if \"model_type\" in text_config else \"gpt2\"\n # NOTE(xiaoke): use_decoder_only_language_model only return the model class like GPT2, rather the task model class\n # like GPT2forCausalLM. We need the task model class to load the pretrained weights for the task.\n self.text_config = CONFIG_MAPPING[text_model_type](**text_config)\n\n self.tie_word_embeddings = self.text_config.tie_word_embeddings\n self.is_encoder_decoder = self.text_config.is_encoder_decoder\n self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\n\n self.vision_config = SamVisionConfig(**vision_config)\n self.prompt_encoder_config = SamPromptEncoderConfig(**prompt_encoder_config)\n self.mask_caption_decoder_config = ScaMaskCaptionDecoderConfig(**mask_caption_decoder_config)\n self.initializer_range = initializer_range\n\n self.num_task_tokens = num_task_tokens\n\n def to_dict(self):\n output = copy.deepcopy(self.__dict__)\n output[\"vision_config\"] = self.vision_config.to_dict()\n output[\"prompt_encoder_config\"] = self.prompt_encoder_config.to_dict()\n output[\"mask_caption_decoder_config\"] = self.mask_caption_decoder_config.to_dict()\n output[\"text_config\"] = self.text_config.to_dict()\n output[\"model_type\"] = self.__class__.model_type\n return output\n\n @classmethod\n def from_sam_text_configs(\n cls,\n sam_config: SamConfig,\n text_config: Optional[PretrainedConfig] = None,\n additional_num_hidden_layers: Optional[int] = None,\n num_caption_tokens: Optional[int] = None,\n num_task_tokens: Optional[int] = None,\n num_caption_heads: Optional[int] = None,\n vl_projector_type: Optional[str] = None,\n vl_projector_norm_type: Optional[str] = None,\n **kwargs,\n ):\n if additional_num_hidden_layers is None:\n logger.warning(\"additional_num_hidden_layers is not set, using default value: 2. Make sure it is correct!\")\n additional_num_hidden_layers = 2\n if num_caption_tokens is None:\n logger.warning(\"num_caption_tokens is not set, using default value: 1. Make sure it is correct!\")\n num_caption_tokens: int = 1\n if num_task_tokens is None:\n logger.warning(\"num_task_tokens is not set, using default value: 6. Make sure it is correct!\")\n num_task_tokens = 6\n if num_caption_heads is None:\n logger.warning(\"num_caption_heads is not set, using default value: 1. Make sure it is correct!\")\n num_caption_heads = 1\n if vl_projector_type is None:\n logger.warning(\"vl_projector_type is not set, using default value: linear. Make sure it is correct!\")\n vl_projector_type = \"linear\"\n if vl_projector_norm_type is None:\n logger.warning(\"vl_projector_norm_type is not set, using default value: none. Make sure it is correct!\")\n vl_projector_norm_type = \"none\"\n\n return cls(\n vision_config=sam_config.vision_config.to_dict(),\n prompt_encoder_config=sam_config.prompt_encoder_config.to_dict(),\n mask_caption_decoder_config={\n **sam_config.mask_decoder_config.to_dict(),\n \"additional_num_hidden_layers\": additional_num_hidden_layers,\n \"num_caption_tokens\": num_caption_tokens,\n \"num_caption_heads\": num_caption_heads,\n },\n text_config=text_config.to_dict() if text_config is not None else None,\n num_task_tokens=num_task_tokens,\n vl_projector_type=vl_projector_type,\n vl_projector_norm_type=vl_projector_norm_type,\n **kwargs,\n )"
},
{
"identifier": "ScaProcessor",
"path": "src/models/sca/processing_sca.py",
"snippet": "class ScaProcessor(ProcessorMixin):\n attributes = [\"tokenizer\"]\n tokenizer_class = \"AutoTokenizer\"\n\n def __init__(self, sam_processor, tokenizer):\n super().__init__(tokenizer)\n self.sam_processor: SamProcessor = sam_processor\n\n def __call__(\n self,\n # from ../sam/processing_sam.py\n images=None,\n input_points=None,\n input_labels=None,\n input_boxes=None,\n original_sizes=None,\n # from transformers.models.blip.processing_blip.py\n text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,\n add_special_tokens: bool = True,\n padding: Union[bool, str, PaddingStrategy] = False,\n truncation: Union[bool, str, TruncationStrategy] = None,\n max_length: Optional[int] = None,\n stride: int = 0,\n pad_to_multiple_of: Optional[int] = None,\n return_attention_mask: Optional[bool] = None,\n return_overflowing_tokens: bool = False,\n return_special_tokens_mask: bool = False,\n return_offsets_mapping: bool = False,\n return_token_type_ids: bool = False,\n return_length: bool = False,\n verbose: bool = True,\n return_tensors=None,\n **kwargs,\n ):\n if images is None and original_sizes is None:\n raise ValueError(f\"images and original_sizes cannot both be None.\")\n\n if images is not None:\n input_encoding = self.sam_processor(\n images=images,\n input_points=input_points,\n input_labels=input_labels,\n input_boxes=input_boxes,\n return_tensors=return_tensors,\n **kwargs,\n )\n images = make_list_of_images(images)\n input_encoding[\"images\"] = make_list_of_images(images)\n else:\n input_encoding = self.sam_processor.process_prompts(\n original_sizes=original_sizes,\n input_points=input_points,\n input_labels=input_labels,\n input_boxes=input_boxes,\n return_tensors=return_tensors,\n )\n\n if text is not None:\n text_encoding = self.tokenizer(\n text=text,\n add_special_tokens=add_special_tokens,\n padding=padding,\n truncation=truncation,\n max_length=max_length,\n stride=stride,\n pad_to_multiple_of=pad_to_multiple_of,\n return_attention_mask=return_attention_mask,\n return_overflowing_tokens=return_overflowing_tokens,\n return_special_tokens_mask=return_special_tokens_mask,\n return_offsets_mapping=return_offsets_mapping,\n return_token_type_ids=return_token_type_ids,\n return_length=return_length,\n verbose=verbose,\n return_tensors=return_tensors,\n **kwargs,\n )\n else:\n text_encoding = {}\n input_encoding.update(text_encoding)\n\n return input_encoding\n\n def post_process_masks(self, *args, **kwargs):\n return self.sam_processor.post_process_masks(*args, **kwargs)\n\n @classmethod\n def from_sam_text_pretrained(cls, sam_pretrained_model_name_or_path, text_pretrained_model_name_or_path, **kwargs):\n sam_processor = SamProcessor.from_pretrained(sam_pretrained_model_name_or_path, **kwargs)\n # NOTE: To be compatible with OpenLLAMA which uses the slow tokenizer to avoid a bug.\n # Ref: https://github.com/openlm-research/open_llama#loading-the-weights-with-hugging-face-transformers\n if \"open_llama\" in text_pretrained_model_name_or_path:\n logger.warning(f\"Using slow tokenizer for {text_pretrained_model_name_or_path}.\")\n use_fast = False\n else:\n use_fast = True\n captioner_processor = AutoProcessor.from_pretrained(\n text_pretrained_model_name_or_path, use_fast=use_fast, **kwargs\n )\n return cls(sam_processor, captioner_processor)\n\n @property\n def model_input_names(self):\n tokenizer_input_names = self.tokenizer.model_input_names\n sam_processor_input_names = self.sam_processor.model_input_names\n return list(dict.fromkeys(tokenizer_input_names + sam_processor_input_names))"
},
{
"identifier": "ScaModel",
"path": "src/models/sca/modeling_sca.py",
"snippet": "class ScaModel(ScaPretrainedModel):\n _keys_to_ignore_on_load_missing = [r\"prompt_encoder.shared_embedding.positional_embedding\"]\n\n def __init__(self, config: ScaConfig, language_model: nn.Module = None):\n super().__init__(config)\n self.shared_image_embedding = SamPositionalEmbedding(config.vision_config)\n\n self.vision_encoder = SamVisionEncoder(config.vision_config)\n self.prompt_encoder = SamPromptEncoder(config.prompt_encoder_config, self.shared_image_embedding)\n # NOTE(xiaoke): Modified. We need to outputs one more tensor: `query_outputs` for captioning\n # Thus its real name is `mask_caption_decoder`, but we keep the name `mask_decoder` for loading SAM weights.\n self.mask_decoder = ScaMaskCaptionDecoder(config.mask_caption_decoder_config)\n\n self.language_project = nn.Linear(\n config.mask_caption_decoder_config.hidden_size, config.text_config.hidden_size\n )\n if language_model is None:\n if config.use_decoder_only_language_model:\n language_model = AutoModelForCausalLM.from_config(config.text_config)\n else:\n raise ValueError(\"Only decoder only language model is supported.\")\n self.language_model = language_model\n\n if config.text_config != self.language_model.config:\n text_config_dict = config.text_config.to_dict()\n language_model_config_dict = self.language_model.config.to_dict()\n all_keys = set(text_config_dict.keys()) | set(language_model_config_dict.keys())\n diff_kv = {}\n for k in all_keys:\n if k not in text_config_dict and k in language_model_config_dict:\n diff_kv[k] = (None, language_model_config_dict[k])\n elif k in text_config_dict and k not in language_model_config_dict:\n diff_kv[k] = (text_config_dict[k], None)\n else:\n if text_config_dict[k] != language_model_config_dict[k]:\n diff_kv[k] = (text_config_dict[k], language_model_config_dict[k])\n logger.warning(\n \"The text config is different from the original config and the language model config. The following keys have different \"\n \"values: {}\".format(diff_kv)\n )\n # NOTE: To support gradient checkpoint for LM: https://github.com/huggingface/transformers/pull/19990/files\n self.supports_gradient_checkpointing = True\n\n # Find generation config in language model\n def search_generation_config(obj, parent_key=\"base\"):\n generation_configs = []\n for attr in dir(obj):\n if attr.startswith(\"_\"):\n continue\n elif attr == \"generation_config\" and getattr(obj, attr) is not None:\n generation_configs.append((f\"{parent_key}-{attr}\", getattr(obj, attr)))\n elif isinstance(getattr(obj, attr), (nn.Module, PreTrainedModel)):\n # skip self reference to avoid infinite recursion\n if obj == getattr(obj, attr):\n continue\n generation_configs.extend(\n search_generation_config(getattr(obj, attr), parent_key=f\"{parent_key}-{attr}\")\n )\n return generation_configs\n\n generation_configs = search_generation_config(self.language_model, parent_key=\"captioner\")\n if len(generation_configs) != 1:\n logger.warning(f\"generation_configs: {generation_configs} has to be of length 1, we use the first one\")\n generation_config = generation_configs[0][1]\n if generation_config is not None:\n self.generation_config = generation_config\n logger.info(f\"generation_config: {generation_config} is used for `generate`\")\n\n self.config_parameters()\n self.post_init()\n\n # Copied from ..sam.modeling_sam.SamModel\n def get_input_embeddings(self):\n return self.vision_encoder.get_input_embeddings()\n\n def get_image_wide_positional_embeddings(self):\n size = self.config.prompt_encoder_config.image_embedding_size\n target_device = self.shared_image_embedding.positional_embedding.device\n target_dtype = self.shared_image_embedding.positional_embedding.dtype\n grid = torch.ones((size, size), device=target_device, dtype=target_dtype)\n y_embed = grid.cumsum(dim=0) - 0.5\n x_embed = grid.cumsum(dim=1) - 0.5\n y_embed = y_embed / size\n x_embed = x_embed / size\n\n positional_embedding = self.shared_image_embedding(torch.stack([x_embed, y_embed], dim=-1))\n return positional_embedding.permute(2, 0, 1).unsqueeze(0) # channel x height x width\n\n @torch.no_grad()\n def get_image_embeddings(\n self,\n pixel_values,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ):\n r\"\"\"\n Returns the image embeddings by passing the pixel values through the vision encoder.\n\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Input pixel values\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\n \"\"\"\n vision_output = self.vision_encoder(\n pixel_values,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n image_embeddings = vision_output[0]\n return image_embeddings\n\n @torch.no_grad()\n def get_prompt_embeddings(\n self,\n input_points: Optional[torch.FloatTensor] = None,\n input_labels: Optional[torch.LongTensor] = None,\n input_boxes: Optional[torch.FloatTensor] = None,\n input_masks: Optional[torch.LongTensor] = None,\n ):\n r\"\"\"\n Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder.\n\n Args:\n input_points (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`):\n Optional input points for the prompt encoder. The padding of the point is automatically done by the\n processor. `point_batch_size` refers to the number of masks that we want the model to predict per\n point. The model will output `point_batch_size` times 3 masks in total.\n input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points_per_image)`):\n Optional input labels for the prompt encoder. The padding of the labels is automatically done by the\n processor, or can be fed by the user.\n input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes_per_image, 4)`):\n Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the\n processor. users can also pass manually the input boxes.\n input_masks (`torch.LongTensor` of shape `(batch_size, image_size, image_size)`):\n Optional input masks for the prompt encoder.\n \"\"\"\n prompt_output = self.prompt_encoder(\n input_points=input_points,\n input_labels=input_labels,\n input_boxes=input_boxes,\n input_masks=input_masks,\n )\n return prompt_output\n\n # NOTE(xiaoke). Modified from ..sam.modeling_sam.SamModel\n def forward(\n self,\n mode=\"train\",\n pixel_values: Optional[torch.FloatTensor] = None,\n input_points: Optional[torch.FloatTensor] = None,\n input_labels: Optional[torch.LongTensor] = None,\n input_boxes: Optional[torch.FloatTensor] = None,\n input_masks: Optional[torch.LongTensor] = None,\n image_embeddings: Optional[torch.FloatTensor] = None,\n multimask_output: bool = True,\n attention_similarity: Optional[torch.FloatTensor] = None,\n target_embedding: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict=None,\n # segmentation arguments\n mask_labels: Optional[torch.LongTensor] = None,\n # language model arguments\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n # legacy arguments for catching the inputs for sam captioner\n images=None,\n original_sizes=None,\n reshaped_input_sizes=None,\n **kwargs,\n ) -> List[Dict[str, torch.Tensor]]:\n r\"\"\"\n Example:\n\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoModel, AutoProcessor\n\n >>> model = AutoModel.from_pretrained(\"facebook/sam-vit-base\")\n >>> processor = AutoProcessor.from_pretrained(\"facebook/sam-vit-base\")\n\n >>> img_url = \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png\"\n >>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert(\"RGB\")\n >>> input_points = [[[400, 650]]] # 2D location of a window on the car\n >>> inputs = processor(images=raw_image, input_points=input_points, return_tensors=\"pt\")\n\n >>> # Get segmentation mask\n >>> outputs = model(**inputs)\n\n >>> # Postprocess masks\n >>> masks = processor.post_process_masks(\n ... outputs.pred_masks, inputs[\"original_sizes\"], inputs[\"reshaped_input_sizes\"]\n ... )\n ```\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if pixel_values is None and image_embeddings is None:\n raise ValueError(\"Either pixel_values or image_embeddings must be provided.\")\n\n if pixel_values is not None and image_embeddings is not None:\n raise ValueError(\"Only one of pixel_values and image_embeddings can be provided.\")\n\n if input_points is not None and len(input_points.shape) != 4:\n raise ValueError(\n \"The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.\",\n \" got {}.\".format(input_points.shape),\n )\n if input_boxes is not None and len(input_boxes.shape) != 3:\n raise ValueError(\n \"The input_points must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`.\",\n \" got {}.\".format(input_boxes.shape),\n )\n if input_points is not None and input_boxes is not None:\n point_batch_size = input_points.shape[1]\n box_batch_size = input_boxes.shape[1]\n if point_batch_size != box_batch_size:\n raise ValueError(\n \"You should provide as many bounding boxes as input points per box. Got {} and {}.\".format(\n point_batch_size, box_batch_size\n )\n )\n\n image_positional_embeddings = self.get_image_wide_positional_embeddings()\n # repeat with batch size\n batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings.shape[0]\n image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1)\n\n vision_attentions = None\n vision_hidden_states = None\n\n if pixel_values is not None:\n vision_outputs = self.vision_encoder(\n pixel_values,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n image_embeddings = vision_outputs[0]\n\n if output_hidden_states:\n vision_hidden_states = vision_outputs[1]\n if output_attentions:\n vision_attentions = vision_outputs[-1]\n\n if input_points is not None and input_labels is None:\n input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device)\n\n if input_points is not None and image_embeddings.shape[0] != input_points.shape[0]:\n raise ValueError(\n \"The batch size of the image embeddings and the input points must be the same. \",\n \"Got {} and {} respectively.\".format(image_embeddings.shape[0], input_points.shape[0]),\n \" if you want to pass multiple points for the same image, make sure that you passed \",\n \" input_points of shape (batch_size, point_batch_size, num_points_per_image, 3) and \",\n \" input_labels of shape (batch_size, point_batch_size, num_points_per_image)\",\n )\n\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n input_points=input_points,\n input_labels=input_labels,\n input_boxes=input_boxes,\n input_masks=input_masks,\n )\n\n # NOTE(xiaoke): Modified. We need to outputs one more tensor: `query_outputs`\n low_res_masks, iou_predictions, query_outputs, mask_decoder_attentions = self.mask_decoder(\n image_embeddings=image_embeddings,\n image_positional_embeddings=image_positional_embeddings,\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n attention_similarity=attention_similarity,\n target_embedding=target_embedding,\n output_attentions=output_attentions,\n )\n\n # low_res_masks: (batch_size, num_masks, num_output_heads, logits_height, logits_width)\n # iou_predictions: (batch_size, num_masks, num_output_heads)\n # query_outputs: (batch_size, num_masks, num_output_heads, num_caption_tokens, hidden_size)\n batch_size, num_masks, num_output_heads, num_caption_tokens, hidden_size = query_outputs.shape\n # NOTE(xiaoke): We use `expand` instead of `repeat` to avoid copying the tensor.\n # So now we need to `reshape` the tensor to the original shape due to the mismatched stride.\n query_outputs = query_outputs.reshape(\n -1, num_caption_tokens, hidden_size\n ) # (batch_size * num_masks * num_output_heads, num_caption_tokens, hidden_size)\n\n language_model_inputs = self.language_project(\n query_outputs\n ) # (batch_size * num_masks * num_output_heads, num_caption_tokens, hidden_size)\n language_model_attention_mask = torch.ones(\n language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device\n ) # (batch_size * num_masks * num_output_heads, 1)\n\n # NOTE(xiaoke): Handle the edge case. If in train mode, and one of the input_ids and attention_mask is None, we should set the labels to None explicitly.\n if mode == \"train\" and (input_ids is None or attention_mask is None):\n logger.info(\n \"In train mode, and one of the input_ids and attention_mask is None. Set them and labels to None.\"\n )\n input_ids = None\n attention_mask = None\n labels = None\n\n if mode == \"train\" and (input_ids is not None and attention_mask is not None):\n # input_ids: (batch_size, num_masks, PADDED_length)\n # attention_mask: (batch_size, num_masks, PADDED_length)\n # NOTE(xiaoke): Copy from ..sam_captioner.modeling_sam_captioner.SamCaptionerModel\n input_ids = input_ids.unsqueeze(-2).repeat_interleave(num_output_heads, dim=-2).flatten(0, 2)\n attention_mask = (\n attention_mask.unsqueeze(-2).repeat_interleave(num_output_heads, dim=-2).flatten(0, 2)\n ) # (batch_size * num_masks * num_output_heads, PADDED_length)\n\n # TODO(xiaoke): Now we repeat the labels num_output_heads times. Is this correct?\n # Shall we follow SAM to backpropagate the loss for the head with the lowest IoU?\n if labels is not None:\n labels = labels.unsqueeze(-2).repeat_interleave(num_output_heads, dim=-2).flatten(0, 2)\n\n inputs_embeds = self.language_model.get_input_embeddings()(input_ids)\n inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)\n\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n expected_device = language_model_attention_mask.device\n attention_mask = torch.cat([language_model_attention_mask, attention_mask.to(expected_device)], dim=1)\n else:\n inputs_embeds = language_model_inputs\n attention_mask = language_model_attention_mask\n\n if self.config.use_decoder_only_language_model:\n if mode == \"train\":\n outputs = self.language_model(\n inputs_embeds=inputs_embeds,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n logits = outputs.logits if return_dict else outputs[0]\n loss = None\n # we compute the loss here since we need to take into account the sequence length of the query embeds\n if labels is not None:\n # TODO(xiaoke): Now we repeat the labels num_output_heads times. Is this correct?\n # Shall we follow SAM to backpropagate the loss for the head with the lowest IoU?\n labels = labels.to(logits.device)\n logits = logits[:, -labels.size(1) :, :]\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous().to(logits.device)\n\n # Flatten the tokens\n loss_fct = CrossEntropyLoss(reduction=\"mean\")\n\n loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))\n else:\n for key in list(kwargs.keys()):\n # remove the keys that are not used by captioner.generate.\n # Or it will raise error in `transformers/generation/utils.py:_validate_model_kwargs`\n # they are used for post-processing\n if key in UNUSED_KEYS_IN_GENERATE:\n kwargs.pop(key)\n language_model_generate_ids = self.language_model.generate(\n inputs_embeds=inputs_embeds, attention_mask=attention_mask, **kwargs\n )\n sam_output = SamImageSegmentationOutput(iou_scores=iou_predictions, pred_masks=low_res_masks)\n language_model_generate_ids = language_model_generate_ids.view(\n batch_size, num_masks, num_output_heads, -1\n )\n query_outputs = query_outputs.view(batch_size, num_masks, num_output_heads, 1, -1)\n language_model_inputs = language_model_inputs.view(batch_size, num_masks, num_output_heads, 1, -1)\n return language_model_generate_ids, sam_output, query_outputs, language_model_inputs\n else:\n raise ValueError(\"Only decoder only language model is supported.\")\n\n if not return_dict:\n sam_output = (iou_predictions, low_res_masks)\n if output_hidden_states:\n sam_output = sam_output + (vision_hidden_states,)\n\n if output_attentions:\n sam_output = sam_output + (vision_attentions, mask_decoder_attentions)\n output = (loss, logits) + sam_output + outputs + (query_outputs, language_model_inputs)\n return output\n\n sam_output = SamImageSegmentationOutput(\n iou_scores=iou_predictions,\n pred_masks=low_res_masks,\n vision_hidden_states=vision_hidden_states,\n vision_attentions=vision_attentions,\n mask_decoder_attentions=mask_decoder_attentions,\n )\n return ScaForConditionalGnerationModelOutput(\n loss=loss,\n logits=logits,\n segmentation_outputs=sam_output,\n language_model_outputs=outputs,\n query_logits=query_outputs,\n projected_query_logits=language_model_inputs,\n )\n\n @classmethod\n def from_sam_text_pretrained(\n cls,\n sam_pretrained_model_name_or_path: str = None,\n text_pretrained_model_name_or_path: str = None,\n additional_num_hidden_layers: int = 2,\n num_caption_tokens: int = 1,\n **kwargs,\n ):\n sam_config = transformers.AutoConfig.from_pretrained(sam_pretrained_model_name_or_path, **kwargs)\n sam_architectures = sam_config.architectures\n if len(sam_architectures) != 1:\n logger.warning(f\"sam_architectures: {sam_architectures} has to be of length 1\")\n text_config = transformers.AutoConfig.from_pretrained(text_pretrained_model_name_or_path, **kwargs)\n config = ScaConfig.from_sam_text_configs(\n sam_config=sam_config,\n text_config=text_config,\n additional_num_hidden_layers=additional_num_hidden_layers,\n num_caption_tokens=num_caption_tokens,\n **kwargs,\n )\n language_model = AutoModelForCausalLM.from_pretrained(text_pretrained_model_name_or_path, **kwargs)\n sca_model = cls.from_pretrained(\n sam_pretrained_model_name_or_path, config=config, language_model=language_model, **kwargs\n )\n # NOTE(xiaoke): Validate the unloaded weights in the model by calling\n # `set([\".\".join(i.split(\".\")[0:2]) for i in unloaded_weights])`\n # There should be no weights left in the pretrained weights that are unloaded.\n return sca_model\n\n @torch.no_grad()\n def generate(self, *args, **kwargs):\n language_model_generate_ids, sam_output, query_outputs, language_model_inputs = self.forward(\n \"inference\", *args, **kwargs\n )\n return ScaForConditionalGnerationModelOutput(\n sequences=language_model_generate_ids,\n segmentation_outputs=sam_output,\n query_logits=query_outputs,\n projected_query_logits=language_model_inputs,\n iou_scores=sam_output.iou_scores,\n pred_masks=sam_output.pred_masks,\n )\n\n def config_parameters(self):\n # NOTE(xiaoke): By default we freeze all the parameters in the config.\n # HF transformers trainer use requires_grad=True to filter out the parameters that need to be optimized.\n for param in self.parameters():\n param.requires_grad = False\n\n # Turn on the parameters that need to be optimized.\n TO_BE_OPTIMIZED = [\n self.mask_decoder.additional_transformer,\n self.mask_decoder.caption_tokens,\n self.language_project,\n ]\n for module in TO_BE_OPTIMIZED:\n for param in module.parameters():\n param.requires_grad = True\n\n # NOTE: To support gradient checkpoint for LM: https://github.com/huggingface/transformers/pull/19990/files\n def _set_gradient_checkpointing(self, module, value=False):\n # NOTE: Most language models in HF supprots gradient checkpointing\n # e.g., OpenLLAMA: https://github.com/huggingface/transformers/blob/5a4f340df74b42b594aedf60199eea95cdb9bed0/src/transformers/models/deprecated/open_llama/modeling_open_llama.py#L464C9-L464C36\n # gpt2: https://github.com/huggingface/transformers/blob/5a4f340df74b42b594aedf60199eea95cdb9bed0/src/transformers/models/gpt2/modeling_gpt2.py#L483C9-L483C36\n self.language_model._set_gradient_checkpointing(module, value=value)\n\n # NOTE: SAM vision encoder supports gradient checkponit\n # https://github.com/huggingface/transformers/blob/5a4f340df74b42b594aedf60199eea95cdb9bed0/src/transformers/models/sam/modeling_sam.py#L1012C14-L1012C37\n self.vision_encoder.gradient_checkpointing = value"
}
] | import sys
import pytest
import requests
import torch
import time
import numpy as np
import torch
import transformers
from PIL import Image
from src.models.sca import ScaConfig, ScaModel, ScaProcessor
from typing import Sequence
from torch.nn.utils.rnn import pad_sequence | 7,921 |
sys.path.append(".")
cache_dir = ".model.cache"
device = "cuda" if torch.cuda.is_available() else "cpu"
sam_model_name = "facebook/sam-vit-base"
text_model_name = "gpt2"
additional_num_hidden_layers = 2
@pytest.fixture
def model():
|
sys.path.append(".")
cache_dir = ".model.cache"
device = "cuda" if torch.cuda.is_available() else "cpu"
sam_model_name = "facebook/sam-vit-base"
text_model_name = "gpt2"
additional_num_hidden_layers = 2
@pytest.fixture
def model(): | model = ScaModel.from_sam_text_pretrained( | 2 | 2023-11-17 14:10:41+00:00 | 12k |
artwalker/EasyTranslator | easy_translator.py | [
{
"identifier": "CommandArgs",
"path": "command_args.py",
"snippet": "class CommandArgs:\r\n \"\"\"A class to read the arguments from command line .\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"Initialize command arguments.\"\"\"\r\n # Use the argparse module in the Python standard library to parse command-line arguments.\r\n parser = argparse.ArgumentParser()\r\n # Receive the parameter of the file name to be translated in the command line.\r\n parser.add_argument(\"filename\", help=\"Name of the input file\")\r\n # Select if show the text and the translated text in the console.\r\n parser.add_argument(\"--show\", help=\"Show the text and the translated text in the console\", action=\"store_true\")\r\n # Select if use the api from Azure.\r\n parser.add_argument(\"--azure\", help=\"Use the api from Azure.\", action=\"store_true\")\r\n # The test mode: only translate the first 3 short texts\r\n parser.add_argument(\"--test\", help=\"Only translate the first 3 short texts\", action=\"store_true\")\r\n # If use the translated name table\r\n parser.add_argument(\"--tlist\", help=\"Use the translated name table\", action=\"store_true\")\r\n \r\n self.args = parser.parse_args()\r"
},
{
"identifier": "ParameterReader",
"path": "parameter_reader.py",
"snippet": "class ParameterReader:\n \"\"\"A class to read the parameters from the settings.cfg file and the .env file.\"\"\"\n\n def __init__(self, commandArgs):\n \"\"\"Read the parameters from the settings.cfg file and the .env file.\"\"\"\n # The command line arguments\n self.commandArgs = commandArgs\n self.filename = \"\"\n self.show = \"\"\n self.azure = \"\"\n self.test = \"\"\n self.tlist = \"\"\n self.base_filename = \"\"\n self.file_extension = \"\"\n self.new_filename = \"\"\n self.new_filenametxt = \"\"\n self.jsonfile = \"\"\n self.translated_dict = {}\n self.api_proxy_url = \"\"\n self.gpt_model = \"\"\n self.openai_api_engine_azure = \"\"\n self.openai_api_model_azure = \"\"\n self.client = \"\"\n self.non_azure_client = \"\"\n self.gpt_temperature = \"\"\n \n # The arguments from the settings.cfg file\n self.language = \"\"\n self.prompt_template = \"\"\n self.prompt = \"\"\n self.bilingual_output = \"\"\n self.language_code = \"\"\n self.api_proxy = \"\"\n self.startpage = \"\"\n self.endpage = \"\"\n self.transliteration_list_file = \"\"\n self.transliteration_word_capi_low = \"\"\n\n # 1. Set the parameters from the command line.\n self._set_args_from_command()\n # 3. Set the parameters from the settings.cfg file and the .env file.\n self._set_args_from_parameter_reader()\n # 2. Set the OpenAI API key.\n self._access_openai_key()\n # 4. Load the translated dictionary from the json file.\n self._load_tranlated_dict()\n\n def _access_openai_key(self):\n \"\"\"set the OpenAI API key.\"\"\"\n _ = load_dotenv(find_dotenv(), override=True)\n self.gpt_temperature = float(os.getenv('GPT_TEMPERATURE'))\n if self.azure:\n # imort the azure.identity package\n from openai import AzureOpenAI\n\n # Set the Azure OpenAI parameters\n self.client = AzureOpenAI(\n api_version=os.getenv('OPENAI_API_VERSION_AZURE'),\n azure_endpoint=os.getenv('OPENAI_API_ENDPOINT_AZURE'),\n api_key=os.getenv('OPENAI_API_KEY_AZURE'),\n )\n\n self.openai_api_model_azure = os.getenv('OPENAI_API_MODEL_AZURE')\n else:\n # Get the OpenAI API keys from the .env file\n key_sets = os.getenv('OPENAI_API_KEY')\n # If there are multiple keys, split them into an array\n key_array = key_sets.split(',')\n\n if len(self.api_proxy) == 0:\n # Set the OpenAI API key\n openai.api_key = random.choice(key_array)\n else:\n # Create an OpenAI client with proxy\n api_key = random.choice(key_array)\n self.api_proxy_url = self.api_proxy\n base_url = os.environ.get(\"OPENAI_API_URL\", self.api_proxy_url)\n self.non_azure_client = openai.OpenAI(api_key=api_key, base_url=base_url)\n print(\"-\" * 3)\n print(f\"\\033[1;32mUsing OpenAI API proxy, the proxy address is: {base_url}\\033[0m\")\n\n self.gpt_model = os.getenv('GPT_MODEL')\n\n def _set_args_from_parameter_reader(self):\n \"\"\"Get the settings from the settings.cfg file.\"\"\"\n with open('settings.cfg', 'rb') as f:\n content = f.read()\n self.encoding = chardet.detect(content)['encoding']\n\n with open('settings.cfg', encoding=self.encoding) as f:\n config_text = f.read()\n self.config = configparser.ConfigParser()\n self.config.read_string(config_text)\n \n # Get the settings from the settings.cfg file\n self.language = self.config.get('config', 'language')\n self.prompt_template = self.config.get('config', 'prompt')\n self.prompt = self.prompt_template.format(self.language)\n self.bilingual_output = self.config.get('config', 'bilingual-output')\n self.language_code = self.config.get('config', 'langcode')\n self.api_proxy=self.config.get('config', 'openai-proxy')\n # Get the start and end page of the PDF file\n self.startpage = self.config.getint('config', 'startpage', fallback=1)\n self.endpage = self.config.getint('config', 'endpage', fallback=-1)\n # Get the transliteration list file\n self.transliteration_list_file = self.config.get('config', 'transliteration-list')\n # Get the setting of case to determine whether to do transliteration\n self.transliteration_word_capi_low = self.config.get('config', 'transliteration-word-capi-low')\n\n def _set_args_from_command(self):\n \"\"\"Set arguments from the command line.\"\"\"\n self.filename = self.commandArgs.args.filename\n self.show = self.commandArgs.args.show\n self.test = self.commandArgs.args.test\n self.tlist = self.commandArgs.args.tlist\n self.azure = self.commandArgs.args.azure\n\n self.base_filename, self.file_extension = os.path.splitext(self.filename)\n self.new_filename = self.base_filename + \"_translated.epub\"\n self.new_filenametxt = self.base_filename + \"_translated.txt\"\n self.jsonfile = self.base_filename + \"_process.json\"\n\n def _load_tranlated_dict(self):\n \"\"\"\n Load the translated dictionary from the json file.\n Such as the translation stoped in the middle, \n and the translated dictionary is saved in the json file.\n So we can continue the translation from the last stop.\n \"\"\"\n try:\n if os.path.getsize(self.jsonfile) > 0:\n with open(self.jsonfile, \"r\", encoding=\"utf-8\") as f:\n self.translated_dict = json.load(f)\n except Exception as e:\n #print(e)\n pass"
},
{
"identifier": "ProcessFile",
"path": "process_file.py",
"snippet": "class ProcessFile:\r\n \"\"\"A class about according to the file extension, use the corresponding function to convert the file to text.\"\"\"\r\n\r\n def __init__(self, parameterReader):\r\n \"\"\"Initialize the title of filename and text which receives the contents of file.\"\"\"\r\n self.filename = \"\"\r\n self.start_page = 0\r\n self.end_page = 0\r\n self.total_pages = 0\r\n self.transliteration_list_file = \"\"\r\n self.transliteration_word_capi_low = \"\"\r\n self.bilingual_output = \"\"\r\n self.prompt = \"\"\r\n self.language_code = \"\"\r\n self.jsonfile = \"\"\r\n self.translated_dict = \"\"\r\n self.new_filename = \"\"\r\n self.new_filenametxt = \"\"\r\n self.show = \"\"\r\n self.azure = \"\"\r\n self.tlist = \"\"\r\n self.test = \"\"\r\n self.gpt_model = \"\"\r\n self.gpt_temperature = \"\"\r\n\r\n self.title = \"\"\r\n self.text = \"\"\r\n self.book = \"\"\r\n self.total_tokens = 0\r\n self.completion_tokens = 0\r\n self.prompt_tokens = 0\r\n self.short_text_list = \"\"\r\n self.translated_text = \"\"\r\n self.translated_short_text = \"\"\r\n self.count = 0\r\n self.messages = \"\"\r\n\r\n self.client = \"\"\r\n self.non_azure_client = \"\"\r\n\r\n self._set_args_from_parameterReader(parameterReader)\r\n\r\n def _set_args_from_parameterReader(self, parameterReader):\r\n \"\"\"Set args from parameterReader.\"\"\"\r\n self.filename = parameterReader.filename\r\n self.start_page = parameterReader.startpage\r\n self.end_page = parameterReader.endpage\r\n self.total_pages = 0\r\n self.transliteration_list_file = parameterReader.transliteration_list_file\r\n self.transliteration_word_capi_low = parameterReader.transliteration_word_capi_low\r\n self.bilingual_output = parameterReader.bilingual_output\r\n self.prompt = parameterReader.prompt\r\n self.language_code = parameterReader.language_code\r\n self.jsonfile = parameterReader.jsonfile\r\n self.translated_dict = parameterReader.translated_dict\r\n self.new_filename = parameterReader.new_filename\r\n self.new_filenametxt = parameterReader.new_filenametxt\r\n self.show = parameterReader.show\r\n self.tlist = parameterReader.tlist\r\n self.test = parameterReader.test\r\n self.gpt_model = parameterReader.gpt_model\r\n self.gpt_temperature = parameterReader.gpt_temperature\r\n self.api_proxy = parameterReader.api_proxy\r\n\r\n self.azure = parameterReader.azure\r\n if self.azure:\r\n self.client = parameterReader.client\r\n self.openai_api_model_azure = parameterReader.openai_api_model_azure\r\n \r\n if len(self.api_proxy) != 0:\r\n self.non_azure_client = parameterReader.non_azure_client\r\n\r\n\r\n def _get_pdf_total_pages(self):\r\n \"\"\"Get total pages.\"\"\"\r\n with open(self.filename, 'rb') as file:\r\n parser = PDFParser(file)\r\n document = PDFDocument(parser)\r\n self.total_pages = len(list(PDFPage.create_pages(document)))\r\n\r\n def _convert_pdf_to_text(self):\r\n \"\"\"Access the contents of the PDF file and convert it to text.\"\"\"\r\n print(\"\\033[1;32mINFO:Converting pdf to text.\\033[0m\")\r\n self.text = pdfminer.high_level.extract_text(self.filename, page_numbers=list(range(self.start_page - 1, self.end_page)))\r\n print(\"-\" * 3)\r\n if self.show:\r\n print(\"*\" * 3)\r\n print(self.text)\r\n print(\"*\" * 3)\r\n print(\"\\033[1;32mINFO:Finished converting pdf to text\\033[0m\")\r\n\r\n def _convert_mobi_to_text(self):\r\n \"\"\"Access the content fo mobi and then convert it to text.\"\"\"\r\n # Extract MOBI contents to a temporary directory\r\n with tempfile.TemporaryDirectory() as tempdir:\r\n tempdir, filepath = mobi.extract(self.filename)\r\n\r\n # Find the HTML file in the temporary directory\r\n for root, _, files in os.walk(tempdir):\r\n for file in files:\r\n if file.endswith(\".html\"):\r\n html_file = os.path.join(root, file)\r\n break\r\n else:\r\n continue\r\n break\r\n else:\r\n raise FileNotFoundError(\"ERROR:HTML file not found in the extracted MOBI contents\")\r\n\r\n # Parse the HTML file with BeautifulSoup to get the text\r\n with open(html_file, \"r\", encoding=\"utf-8\") as f:\r\n soup = BeautifulSoup(f.read(), \"html.parser\")\r\n self.text = soup.get_text()\r\n\r\n def _convert_docx_to_text(self):\r\n \"\"\"Access the content of docx and then convert it to text.\"\"\"\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the DOCX content.\\033[0m\")\r\n doc = docx.Document(self.filename)\r\n\r\n for paragraph in doc.paragraphs:\r\n self.text += paragraph.text + \"\\n\"\r\n\r\n def _convert_epub_to_text(self):\r\n \"\"\"Convert epub to text.\"\"\"\r\n # Access all contents\r\n for item in self.book.get_items():\r\n if item.get_type() == ebooklib.ITEM_DOCUMENT:\r\n # Use BeautifulSoup to extract the original text\r\n soup = BeautifulSoup(item.get_content(), 'html.parser')\r\n self.text += re.sub(r'\\n+', '\\n', soup.get_text().strip())\r\n\r\n def _text_replace(self):\r\n \"\"\"Replace the text according to the transliteration table.\"\"\"\r\n # Read the excel file and store the first column and the second column as two lists\r\n df = pd.read_excel(self.transliteration_list_file)\r\n old_words = df.iloc[:, 0].tolist()\r\n new_words = df.iloc[:, 1].tolist()\r\n # Order the old word list in descending order of length and synchronize the new word list\r\n old_words, new_words = zip(*sorted(zip(old_words, new_words), key=lambda x: len(x[0]), reverse=True))\r\n # Iterate through two lists and replace strings\r\n for i in range(len(old_words)):\r\n # If ingore the case, convert the string and the word to be replaced to lowercase\r\n if not self.transliteration_word_capi_low:\r\n lower_old_word = old_words[i].lower()\r\n # Use the regular expression to replace, note that the original string case is retained\r\n self.text = re.sub(r\"\\b\" + lower_old_word + r\"\\b\", new_words[i], self.text, flags=re.IGNORECASE)\r\n else:\r\n # If care about the case, just use the regular expression to replace\r\n self.text = re.sub(r\"\\b\" + old_words[i] + r\"\\b\", new_words[i], self.text)\r\n\r\n def _text_replace_reverse(self, text):\r\n \"\"\"Replace the text according to the transliteration table in reverse order.\"\"\"\r\n # Read the excel file and store the first column and the second column as two lists\r\n df = pd.read_excel(self.transliteration_list_file)\r\n old_words = df.iloc[:, 0].tolist() # Swapped\r\n new_words = df.iloc[:, 1].tolist() # Swapped\r\n # Order the new word list in descending order of length and synchronize the old word list\r\n new_words, old_words = zip(*sorted(zip(new_words, old_words), key=lambda x: len(x[0]), reverse=True))\r\n # Iterate through two lists and replace strings\r\n for i in range(len(new_words)):\r\n # If ignore the case, convert the string and the word to be replaced to lowercase\r\n if not self.transliteration_word_capi_low:\r\n lower_new_word = new_words[i].lower()\r\n # Use the regular expression to replace, note that the original string case is retained\r\n text = re.sub(r\"\\b\" + lower_new_word + r\"\\b\", old_words[i], text, flags=re.IGNORECASE)\r\n else:\r\n # If care about the case, just use the regular expression to replace\r\n text = re.sub(r\"\\b\" + new_words[i] + r\"\\b\", old_words[i], text)\r\n\r\n return text\r\n\r\n def _reverse_text_replace_reverse(self, text):\r\n \"\"\"Reverse the text according to the transliteration table in reverse order.\"\"\"\r\n # Read the excel file and store the first column and the second column as two lists\r\n df = pd.read_excel(self.transliteration_list_file)\r\n new_words = df.iloc[:, 0].tolist() # Swapped\r\n old_words = df.iloc[:, 1].tolist() # Swapped\r\n # Order the new word list in descending order of length and synchronize the old word list\r\n new_words, old_words = zip(*sorted(zip(new_words, old_words), key=lambda x: len(x[0]), reverse=True))\r\n # Iterate through two lists and replace strings\r\n for i in range(len(new_words)):\r\n # If ignore the case, convert the string and the word to be replaced to lowercase\r\n if not self.transliteration_word_capi_low:\r\n lower_new_word = new_words[i].lower()\r\n # Use the regular expression to replace, note that the original string case is retained\r\n text = re.sub(r\"\\b\" + lower_new_word + r\"\\b\", old_words[i], text, flags=re.IGNORECASE)\r\n else:\r\n # If care about the case, just use the regular expression to replace\r\n text = re.sub(r\"\\b\" + new_words[i] + r\"\\b\", old_words[i], text)\r\n\r\n return text\r\n\r\n def _split_text(self):\r\n \"\"\"Divide the text into a list of short texts with no more than 1024 characters.\"\"\"\r\n # Use the regular expression to split the text into a list of sentences\r\n sentence_list = re.findall(r'.+?[。!?!?.]', self.text)\r\n # Initialize the short text list\r\n self.short_text_list = []\r\n # Initialize the current short text\r\n short_text = \"\"\r\n # Iterate through the sentence list\r\n for s in sentence_list:\r\n # If the current short plus the length of the new sentence is not greater than 1024, add the new sentence to the current short\r\n if len(short_text + s) <= 1024:\r\n short_text += s\r\n # If the current short plus the length of the new sentence is greater than 1024, add the current short to the short text list and reset the current short to the new sentence\r\n else:\r\n self.short_text_list.append(short_text)\r\n short_text = s\r\n # Add the last short text to the short text list\r\n self.short_text_list.append(short_text)\r\n\r\n def _replace_sign(self, text):\r\n \"\"\"Replace the period with a period plus line break.\"\"\"\r\n text = text.replace(\". \", \".\\n\")\r\n text = text.replace(\"。\", \"。\\n\")\r\n text = text.replace(\"?\", \"?\\n\")\r\n text = text.replace(\"?\", \"?\\n\")\r\n text = text.replace(\"!\", \"!\\n\")\r\n text = text.replace(\"。\\n”\", \"。”\\n\")\r\n text = text.replace(\"!\\n”\", \"!”\\n\")\r\n text = text.replace(\"?\\n”\", \"?”\\n\")\r\n return text\r\n\r\n def _get_completion_from_messages(self):\r\n \"\"\"Get completion from messages.\"\"\"\r\n if len(self.api_proxy) == 0:\r\n response = openai.chat.completions.create(\r\n model=self.gpt_model,\r\n messages=self.messages,\r\n temperature=self.gpt_temperature,\r\n )\r\n else:\r\n response = self.non_azure_client.chat.completions.create(\r\n model=self.gpt_model,\r\n messages=self.messages,\r\n temperature=self.gpt_temperature,\r\n )\r\n\r\n content = response.choices[0].message.content\r\n\r\n token_dict = {\r\n 'prompt_tokens':response.usage.prompt_tokens,\r\n 'completion_tokens':response.usage.completion_tokens,\r\n 'total_tokens':response.usage.total_tokens,\r\n }\r\n\r\n return content, token_dict\r\n\r\n def _get_completion_from_messages_by_azure(self):\r\n \"\"\"Get completion from messages by azure.\"\"\"\r\n response = self.client.chat.completions.create(\r\n model=self.openai_api_model_azure,\r\n messages=self.messages,\r\n temperature=self.gpt_temperature, \r\n )\r\n\r\n #print(str(response.choices[0].message))\r\n content = response.choices[0].message.content\r\n\r\n token_dict = {\r\n 'prompt_tokens':response.usage.prompt_tokens,\r\n 'completion_tokens':response.usage.completion_tokens,\r\n 'total_tokens':response.usage.total_tokens,\r\n }\r\n return content, token_dict\r\n\r\n def _comletion_tokens(self):\r\n \"\"\"Get comletion and tokens.\"\"\"\r\n if self.azure:\r\n completion, token_dict = self._get_completion_from_messages_by_azure()\r\n else:\r\n completion, token_dict = self._get_completion_from_messages()\r\n self.translated_short_text = (\r\n completion\r\n .encode(\"utf8\")\r\n .decode()\r\n )\r\n # Get the token usage from the API response\r\n self.total_tokens += token_dict['total_tokens']\r\n self.completion_tokens += token_dict['completion_tokens']\r\n self.prompt_tokens += token_dict['prompt_tokens']\r\n\r\n def _translate_text(self, content):\r\n \"\"\"Translate the text.\"\"\"\r\n # Call the OpenAI API for translation\r\n try:\r\n self.messages = [\r\n {'role':'system', \r\n 'content': f\"You are a translation assistant.Your task is to translate the content given to you by the user.{self.prompt}\"},\r\n {'role': 'user',\r\n 'content': f\"{content}\\n\"},\r\n ]\r\n self._comletion_tokens()\r\n except Exception as e:\r\n # Time to wait for limitation of ChatGPT\r\n sleep_time = 60 * 3 + 5\r\n print(e, \"\\n\"+f\"Sleep {sleep_time} seconds.\")\r\n time.sleep(sleep_time)\r\n self._comletion_tokens()\r\n\r\n def _translate_and_store(self, text):\r\n \"\"\"Tranlate and store text.\"\"\"\r\n if self.tlist:\r\n # Revert the replacement so that it can be judged whether the text has been translated\r\n text = self._text_replace_reverse(text)\r\n # If the text has been translated, return the translation result directly\r\n if text in self.translated_dict:\r\n self.translated_short_text = self.translated_dict[text]\r\n else:\r\n # Before translation, replace the text according to the transliteration table\r\n text = self._reverse_text_replace_reverse(text)\r\n # Else, call the translate_text function to translate and store the result in the dictionary\r\n self._translate_text(text)\r\n # Reverse the replacement of the transliteration table so than the text keeps the original content\r\n text = self._text_replace_reverse(text)\r\n self.translated_dict[text] = self.translated_short_text\r\n # Save the dictionary as a JSON file\r\n with open(self.jsonfile, \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.translated_dict, f, ensure_ascii=False, indent=4)\r\n else:\r\n # If the text has been translated, return the translation result directly\r\n if text in self.translated_dict:\r\n self.translated_short_text = self.translated_dict[text]\r\n else:\r\n # Else, call the translate_text function to translate and store the result in the dictionary\r\n self._translate_text(text)\r\n self.translated_dict[text] = self.translated_short_text\r\n # Save the dictionary as a JSON file\r\n with open(self.jsonfile, \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.translated_dict, f, ensure_ascii=False, indent=4)\r\n\r\n def _process_text(self):\r\n \"\"\"Process the text.\"\"\"\r\n # Replace all line breaks with spaces\r\n self.text = self.text.replace(\"\\n\", \" \")\r\n # Replace multiple spaces with one space\r\n self.text = re.sub(r\"\\s+\", \" \", self.text)\r\n # If the transliteration table replacement is set, replace the text before translation\r\n if self.tlist:\r\n self._text_replace()\r\n # Split the text into short texts of no more than 1024 characters\r\n self._split_text()\r\n # If the test mode is turned on, only translate the first 3 short texts\r\n if self.test:\r\n self.short_text_list = self.short_text_list[:3]\r\n # Iterate through the short text list and translate each short text in turn\r\n for short_text in self.short_text_list:\r\n self.count += 1\r\n # Translate the current short text\r\n time.sleep(0.5)\r\n self._translate_and_store(short_text)\r\n short_text = self._replace_sign(short_text)\r\n self.translated_short_text = self._replace_sign(self.translated_short_text)\r\n short_text = self._text_replace_reverse(short_text)\r\n # Add the current short text and the translated text to the total text\r\n if self.bilingual_output.lower() == 'true':\r\n self.translated_text += f\"{short_text}<br>\\n{self.translated_short_text}<br>\\n\"\r\n else:\r\n self.translated_text += f\"{self.translated_short_text}<br>\\n\"\r\n if self.show:\r\n print(\"*\" * 3)\r\n print(short_text)\r\n print(\"*\" * 1)\r\n print(self.translated_short_text)\r\n print(\"*\" * 3)\r\n\r\n def _text_to_epub(self):\r\n \"\"\"Write the translated text to the epub file.\"\"\"\r\n text = self.translated_text.replace('\\n', '<br>').replace(\"\\n\", \"<br>\")\r\n # Create an epub book object\r\n book = epub.EpubBook()\r\n # Set the metadata\r\n book.set_identifier(str(random.randint(100000, 999999)))\r\n book.set_title(self.title)\r\n book.set_language(self.language_code)\r\n # Create a chapter object\r\n c = epub.EpubHtml(title='Chapter 1', file_name='chap_1.xhtml', lang=self.language_code)\r\n c.content = text\r\n # Add the chapter to the book\r\n book.add_item(c)\r\n # Add the table of contents\r\n book.toc = (epub.Link('chap_1.xhtml', 'Chapter 1', 'chap_1'),)\r\n # Set spine order\r\n book.spine = ['nav', c]\r\n # Add navigation files\r\n book.add_item(epub.EpubNcx())\r\n book.add_item(epub.EpubNav())\r\n # Write the content to the epub book\r\n #print(\"\\n\" + text)\r\n try:\r\n epub.write_epub(self.new_filename, book, {})\r\n except Exception as e:\r\n print(f\"Failed to write EPUB: {e}\")\r\n\r\n def _get_title_of_md(self):\r\n \"\"\"Get title of the md.\"\"\"\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the md title.\\033[0m\")\r\n with open(self.filename, 'r', encoding='utf-8') as file:\r\n for line in file:\r\n if line.startswith('#'):\r\n self.title = line.replace('#', '').strip()\r\n break\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Finished parsing the md title.\\033[0m\")\r\n\r\n def _get_title_of_txt(self):\r\n \"\"\"Get title of the txt.\"\"\"\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the txt title.\\033[0m\")\r\n title_extension = os.path.basename(self.filename)\r\n self.title = os.path.splitext(title_extension)[0]\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Finished parsing the txt title.\\033[0m\")\r\n\r\n def _get_title_of_docx(self):\r\n \"\"\"Get title of the docx.\"\"\"\r\n try:\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the docx file.\\033[0m\")\r\n with zipfile.ZipFile(self.filename) as zf:\r\n core_properties = etree.fromstring(zf.read(\"docProps/core.xml\"))\r\n\r\n ns = {\"cp\": \"http://schemas.openxmlformats.org/package/2006/metadata/core-properties\",\r\n \"dc\": \"http://purl.org/dc/elements/1.1/\",\r\n \"dcterms\": \"http://purl.org/dc/terms/\",\r\n \"dcmitype\": \"http://purl.org/dc/dcmitype/\",\r\n \"xsi\": \"http://www.w3.org/2001/XMLSchema-instance\"}\r\n \r\n title_elements = core_properties.findall(\"dc:title\", ns)\r\n if title_elements:\r\n self.title = title_elements[0].text\r\n else:\r\n self.title = \"INFO:Unknown title.\"\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Finished parsing the docx title.\\033[0m\")\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n print(\"*\" * 6)\r\n print(\"\\033[91mERROR:Parsing the DOCX file.\\033[0m\")\r\n print(\"*\" * 6)\r\n\r\n def _get_title_of_pdf(self):\r\n \"\"\"Get title of the pdf.\"\"\"\r\n try:\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the pdf title.\\033[0m\") \r\n with open(self.filename, 'rb') as file:\r\n parser = PDFParser(file)\r\n document = PDFDocument(parser)\r\n if 'Title' in document.info:\r\n self.title = document.info['Title']\r\n else:\r\n text = pdfminer.high_level.extract_text(file)\r\n match = re.search(r'(?<=\\n)([^\\n]+)(?=\\n)', text)\r\n if match:\r\n self.title = match.group(1)\r\n else:\r\n self.title = \"INFO:Unknown title.\"\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Finished parsing the pdf title.\\033[0m\") \r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n print(\"*\" * 6)\r\n print(\"\\033[91mERROR:Parsing the pdf title.\\033[0m\")\r\n print(\"*\" * 6)\r\n\r\n # step 1\r\n def get_title(self):\r\n \"\"\"Get the title of file.\"\"\"\r\n if self.filename.endswith('.pdf'):\r\n self._get_title_of_pdf()\r\n self._get_pdf_total_pages()\r\n elif self.filename.endswith('.txt'):\r\n self._get_title_of_txt()\r\n elif self.filename.endswith('.docx'):\r\n self._get_title_of_docx()\r\n elif self.filename.endswith('.mobi'):\r\n pass\r\n elif self.filename.endswith('.epub'):\r\n self.book = epub.read_epub(self.filename)\r\n elif self.filename.endswith('.md'):\r\n self._get_title_of_md()\r\n else:\r\n print(\"-\" * 3)\r\n print(\"\\033[91mINFO:Unsupported file type right now.\\033[0m\")\r\n print(\"-\" * 3)\r\n sys.exit(0)\r\n\r\n def _get_md_content(self):\r\n \"\"\"Get md content.\"\"\"\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the md content.\\033[0m\")\r\n with open(self.filename, 'r', encoding='utf-8') as file:\r\n self.text = file.read()\r\n\r\n def _get_txt_content(self):\r\n \"\"\"Get txt content.\"\"\"\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the txt content.\\033[0m\")\r\n with open(self.filename, 'r', encoding='utf-8') as file:\r\n self.text = file.read()\r\n\r\n def _get_pdf_content(self):\r\n \"\"\"Get pdf content.\"\"\"\r\n try:\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the pdf content.\\033[0m\")\r\n print(\"-\" * 3)\r\n print(f\"\\033[1;32mINFO:Total pages of the pdf: {self.total_pages}\\033[0m\") \r\n if self.end_page == -1:\r\n self.end_page = self.total_pages\r\n print(\"-\" * 3)\r\n print(f\"\\033[1;32mINFO:Converting pdf from: Page {self.start_page} to Page {self.end_page}.\\033[0m\") \r\n print(\"-\" * 3)\r\n self._convert_pdf_to_text()\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n print(\"*\" * 6)\r\n print(\"\\033[91mERROR:Parsing the pdf content.\\033[0m\")\r\n print(\"*\" * 6)\r\n\r\n def _get_mobi_content(self):\r\n \"\"\"Get mobi content.\"\"\"\r\n try:\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the mobi content.\\033[0m\")\r\n self._convert_mobi_to_text()\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n print(\"*\" * 6)\r\n print(\"\\033[91mERROR:Parsing the MOBI content.\\033[0m\")\r\n print(\"*\" * 6)\r\n\r\n def _get_epub_content(self):\r\n \"\"\"Get mobi content.\"\"\"\r\n try:\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the EPUB content.\\033[0m\")\r\n self._convert_epub_to_text()\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n print(\"*\" * 6)\r\n print(\"\\033[91mERROR:Parsing the EPUB content.\\033[0m\")\r\n print(\"*\" * 6)\r\n\r\n # step 2\r\n def convert_text(self):\r\n \"\"\"Convert the file ending with differnt types to text.\"\"\"\r\n if self.filename.endswith('.pdf'):\r\n self._get_pdf_content()\r\n elif self.filename.endswith('.txt'):\r\n self._get_txt_content()\r\n elif self.filename.endswith('.mobi'):\r\n self._get_mobi_content()\r\n elif self.filename.endswith('.docx'):\r\n self._convert_docx_to_text()\r\n elif self.filename.endswith('.epub'):\r\n self._get_epub_content()\r\n elif self.filename.endswith('.md'):\r\n self._get_md_content()\r\n else:\r\n print(\"\\033[91mINFO:Unsupported to access the content of this file type right now.\\033[0m\")\r\n\r\n # step 3\r\n def tranlate_file(self):\r\n \"\"\"Translate the file.\"\"\"\r\n if self.filename.endswith('.epub'):\r\n # Access all chapters of the epub file\r\n items = self.book.get_items()\r\n # Iterate through all chapters\r\n translated_all = ''\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Translating the file content.\\033[0m\")\r\n for item in items:\r\n # If the chapter type is a document type, it needs to be translated\r\n if item.get_type() == ebooklib.ITEM_DOCUMENT:\r\n # Use BeautifulSoup to extract the original text\r\n soup = BeautifulSoup(item.get_content(), 'html.parser')\r\n self.text = soup.get_text().strip()\r\n img_html = ''\r\n img_tags = soup.find_all('img')\r\n for img_tag in img_tags:\r\n img_html += str(img_tag) + '<br>'\r\n # If the text is empty, skip this chapter\r\n if not self.text:\r\n continue\r\n self._process_text()\r\n # Replace the original chapter content with the translated text\r\n item.set_content((img_html + self.translated_text.replace('\\n', '<br>')).encode('utf-8'))\r\n translated_all += self.translated_text\r\n # If the test mode is turned on, only translate the first 3 chapters\r\n if self.test and self.count >= 3:\r\n break\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Finished parsing and translating the file.\\033[0m\")\r\n # Write content to the epub file\r\n epub.write_epub(self.new_filename, self.book, {})\r\n # Write the translated text to the txt file\r\n with open(self.new_filenametxt, \"w\", encoding=\"utf-8\") as f:\r\n f.write(translated_all.replace('<br>', ''))\r\n else:\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Translating the file content.\\033[0m\")\r\n self._process_text()\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Finished parsing and translating the file.\\033[0m\")\r\n print(\"-\" * 3)\r\n # Write the translated text to the epub file\r\n print(\"\\033[1;32mINFO:Writing the translated text to epub.\\033[0m\") # 输出绿色的 \"DEBUG\"\r\n self._text_to_epub()\r\n # Write the translated text to the txt file\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Writing the translated text to the txt file.\\033[0m\")\r\n with open(self.new_filenametxt, \"w\", encoding=\"utf-8\") as f:\r\n f.write(self.translated_text.replace('<br>', ''))\r\n\r\n # step 4\r\n def caculate_tokens_costs(self):\r\n \"\"\"Caculate the tokens.\"\"\"\r\n cost = self.completion_tokens / 1000 * 0.002 + self.prompt_tokens / 1000 * 0.001\r\n print(\"-\" * 3)\r\n print(f\"\\033[1;32mINFO:Use completion tokens: {self.completion_tokens}.\\033[0m\")\r\n print(\"-\" * 3)\r\n print(f\"\\033[1;32mINFO:Use prompt tokens: {self.prompt_tokens}.\\033[0m\")\r\n print(\"-\" * 3)\r\n print(f\"\\033[1;32mINFO:Use total tokens: {self.total_tokens}.\\033[0m\")\r\n print(\"-\" * 3)\r\n print(f\"\\033[1;32mINFO:Total approximate cost: ${cost}.\\033[0m\")\r\n print(\"-\" * 3)\r\n print(f\"\\033[1;34mINFO:Translation completed.\\033[0m\")\r\n print(\"-\" * 3)\r\n\r\n # step 5\r\n def remove_jsonfile(self):\r\n \"\"\"Remove the jsonfile.\"\"\"\r\n try:\r\n os.remove(self.jsonfile)\r\n print(f\"\\033[1;34mFile '{self.jsonfile}' has been deleted.\\033[0m\")\r\n print(\"-\" * 3)\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n print(\"*\" * 6)\r\n print(f\"\\033[91mERROR:File '{self.jsonfile}' not found. No file was deleted.\\033[0m\")\r\n print(\"*\" * 6)\r"
}
] | from command_args import CommandArgs
from parameter_reader import ParameterReader
from process_file import ProcessFile
| 9,077 |
class TranslateText:
"""Overall class to manage text translation."""
def __init__(self):
"""Initialize parameters about the text translation."""
# 1. Read the command line arguments.
self.commandArgs = CommandArgs()
# 2. Read the parameters from the settings.cfg file and the .env file.
# and process the parameters.
self.parameterReader = ParameterReader(self.commandArgs)
# 3. Prepare to translate the text.
|
class TranslateText:
"""Overall class to manage text translation."""
def __init__(self):
"""Initialize parameters about the text translation."""
# 1. Read the command line arguments.
self.commandArgs = CommandArgs()
# 2. Read the parameters from the settings.cfg file and the .env file.
# and process the parameters.
self.parameterReader = ParameterReader(self.commandArgs)
# 3. Prepare to translate the text.
| self.processFile = ProcessFile(self.parameterReader)
| 2 | 2023-11-10 15:56:06+00:00 | 12k |
ShipBit/wingman-ai | main.py | [
{
"identifier": "AudioRecorder",
"path": "services/audio_recorder.py",
"snippet": "class AudioRecorder(FileCreator):\n def __init__(\n self,\n app_root_dir: str,\n samplerate: int = 44100,\n channels: int = 1,\n ):\n super().__init__(app_root_dir, RECORDING_PATH)\n self.file_path = self.get_full_file_path(RECORDING_FILE)\n\n self.samplerate = samplerate\n self.is_recording = False\n self.recording = None\n\n self.recstream = sounddevice.InputStream(\n callback=self.__handle_input_stream,\n channels=channels,\n samplerate=samplerate,\n )\n\n def __handle_input_stream(self, indata, _frames, _time, _status):\n if self.is_recording:\n if self.recording is None:\n self.recording = indata.copy()\n else:\n self.recording = numpy.concatenate((self.recording, indata.copy()))\n\n def start_recording(self):\n if self.is_recording:\n return\n\n self.recstream.start()\n self.is_recording = True\n printr.print(\"Recording started\", tags=\"grey\")\n\n def stop_recording(self) -> None | str:\n self.recstream.stop()\n self.is_recording = False\n printr.print(\"Recording stopped\", tags=\"grey\")\n\n if self.recording is None:\n printr.print(\"Ignored empty recording\", tags=\"warn\")\n return None\n if (len(self.recording) / self.samplerate) < 0.15:\n printr.print(\"Recording was too short to be handled by the AI\", tags=\"warn\")\n return None\n\n try:\n soundfile.write(self.file_path, self.recording, self.samplerate)\n self.recording = None\n return self.file_path\n except IndexError:\n printr.print(\"Ignored empty recording\", tags=\"warn\")\n return None"
},
{
"identifier": "SecretKeeper",
"path": "services/secret_keeper.py",
"snippet": "class SecretKeeper:\n def __init__(self, app_root_path: str):\n self.printr = Printr()\n self.system_config_path: str = os.path.join(app_root_path, SYSTEM_CONFIG_PATH)\n self.config_file = os.path.join(self.system_config_path, SECRETS_FILE)\n self.secrets = self.__load()\n if not self.secrets:\n self.secrets = {}\n\n def __load(self) -> dict[str, any]: # type: ignore\n parsed_config = None\n\n if os.path.exists(self.config_file) and os.path.isfile(self.config_file):\n with open(self.config_file, \"r\", encoding=\"UTF-8\") as stream:\n try:\n parsed_config = yaml.safe_load(stream)\n except yaml.YAMLError as e:\n self.printr.print_err(\n f\"Could not load ({SECRETS_FILE})\\n{str(e)}\", True\n )\n\n return parsed_config\n\n def save(self):\n \"\"\"Write all secrets to the file\"\"\"\n with open(self.config_file, \"w\", encoding=\"UTF-8\") as stream:\n try:\n yaml.dump(self.secrets, stream)\n return True\n except yaml.YAMLError as e:\n self.printr.print_err(\n f\"Could not write ({SECRETS_FILE})\\n{str(e)}\", True\n )\n return False\n\n def retrieve(\n self,\n requester: str,\n key: str,\n friendly_key_name: str,\n prompt_if_missing: bool = True,\n ) -> str:\n \"\"\"Retrieve secret a secret and optionally prompt user for it if missing\"\"\"\n\n secret = self.secrets.get(key, None)\n if not secret and prompt_if_missing:\n # Prompt user for key\n dialog = ctk.CTkInputDialog(\n text=f\"Please enter '{friendly_key_name}':\",\n title=f\"{requester} needs to know a secret\",\n )\n secret = dialog.get_input()\n if secret:\n secret = secret.strip().replace(\"\\n\", \"\")\n self.secrets[key] = secret\n self.save()\n\n return secret"
},
{
"identifier": "Tower",
"path": "services/tower.py",
"snippet": "class Tower:\n def __init__(self, config: dict[str, any], secret_keeper: SecretKeeper, app_root_dir: str): # type: ignore\n self.config = config\n self.app_root_dir = app_root_dir\n self.secret_keeper = secret_keeper\n self.key_wingman_dict: dict[str, Wingman] = {}\n self.broken_wingmen = []\n\n self.wingmen = self.__instantiate_wingmen()\n self.key_wingman_dict: dict[str, Wingman] = {}\n for wingman in self.wingmen:\n self.key_wingman_dict[wingman.get_record_key()] = wingman\n\n def __instantiate_wingmen(self) -> list[Wingman]:\n wingmen = []\n for wingman_name, wingman_config in self.config[\"wingmen\"].items():\n if wingman_config.get(\"disabled\") is True:\n continue\n\n global_config = {\n \"sound\": self.config.get(\"sound\", {}),\n \"openai\": self.config.get(\"openai\", {}),\n \"features\": self.config.get(\"features\", {}),\n \"edge_tts\": self.config.get(\"edge_tts\", {}),\n \"commands\": self.config.get(\"commands\", {}),\n \"elevenlabs\": self.config.get(\"elevenlabs\", {}),\n \"azure\": self.config.get(\"azure\", {}),\n }\n merged_config = self.__merge_configs(global_config, wingman_config)\n class_config = merged_config.get(\"class\")\n\n wingman = None\n # it's a custom Wingman\n try:\n if class_config:\n kwargs = class_config.get(\"args\", {})\n wingman = Wingman.create_dynamically(\n name=wingman_name,\n config=merged_config,\n secret_keeper=self.secret_keeper,\n module_path=class_config.get(\"module\"),\n class_name=class_config.get(\"name\"),\n app_root_dir=self.app_root_dir,\n **kwargs\n )\n else:\n wingman = OpenAiWingman(\n name=wingman_name,\n config=merged_config,\n secret_keeper=self.secret_keeper,\n app_root_dir=self.app_root_dir,\n )\n except MissingApiKeyException:\n self.broken_wingmen.append(\n {\n \"name\": wingman_name,\n \"error\": \"Missing API key. Please check your key config.\",\n }\n )\n except Exception as e: # pylint: disable=broad-except\n # just in case we missed something\n msg = str(e).strip()\n if not msg:\n msg = type(e).__name__\n self.broken_wingmen.append({\"name\": wingman_name, \"error\": msg})\n else:\n # additional validation check if no exception was raised\n errors = wingman.validate()\n if not errors or len(errors) == 0:\n wingman.prepare()\n wingmen.append(wingman)\n else:\n self.broken_wingmen.append(\n {\"name\": wingman_name, \"error\": \", \".join(errors)}\n )\n\n return wingmen\n\n def get_wingman_from_key(self, key: any) -> Wingman | None: # type: ignore\n if hasattr(key, \"char\"):\n wingman = self.key_wingman_dict.get(key.char, None)\n else:\n wingman = self.key_wingman_dict.get(key.name, None)\n return wingman\n\n def get_wingmen(self):\n return self.wingmen\n\n def get_broken_wingmen(self):\n return self.broken_wingmen\n\n def get_config(self):\n return self.config\n\n def __deep_merge(self, source, updates):\n \"\"\"Recursively merges updates into source.\"\"\"\n for key, value in updates.items():\n if isinstance(value, dict):\n node = source.setdefault(key, {})\n self.__deep_merge(node, value)\n else:\n source[key] = value\n return source\n\n def __merge_command_lists(self, general_commands, wingman_commands):\n \"\"\"Merge two lists of commands, where wingman-specific commands override or get added based on the 'name' key.\"\"\"\n # Use a dictionary to ensure unique names and allow easy overrides\n merged_commands = {cmd[\"name\"]: cmd for cmd in general_commands}\n for cmd in wingman_commands:\n merged_commands[\n cmd[\"name\"]\n ] = cmd # Will override or add the wingman-specific command\n # Convert merged commands back to a list since that's the expected format\n return list(merged_commands.values())\n\n def __merge_configs(self, general, wingman):\n \"\"\"Merge general settings with a specific wingman's overrides, including commands.\"\"\"\n # Start with a copy of the wingman's specific config to keep it intact.\n merged = wingman.copy()\n # Update 'openai', 'features', and 'edge_tts' sections from general config into wingman's config.\n for key in [\"sound\", \"openai\", \"features\", \"edge_tts\", \"elevenlabs\", \"azure\"]:\n if key in general:\n # Use copy.deepcopy to ensure a full deep copy is made and original is untouched.\n merged[key] = self.__deep_merge(\n copy.deepcopy(general[key]), wingman.get(key, {})\n )\n\n # Special handling for merging the commands lists\n if \"commands\" in general and \"commands\" in wingman:\n merged[\"commands\"] = self.__merge_command_lists(\n general[\"commands\"], wingman[\"commands\"]\n )\n elif \"commands\" in general:\n # If the wingman config does not have commands, use the general ones\n merged[\"commands\"] = general[\"commands\"]\n # No else needed; if 'commands' is not in general, we simply don't set it\n\n return merged"
},
{
"identifier": "Printr",
"path": "services/printr.py",
"snippet": "class Printr(object):\n _instance = None\n\n LILA = \"\\033[95m\"\n BLUE = \"\\033[94m\"\n CYAN = \"\\033[96m\"\n GREEN = \"\\033[92m\"\n YELLOW = \"\\033[93m\"\n RED = \"\\033[91m\"\n CLEAR = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n FAINT = \"\\033[2m\"\n NORMAL_WEIGHT = \"\\033[22m\"\n UNDERLINE = \"\\033[4m\"\n END_UNDERLINE = \"\\033[24m\"\n OVERLINE = \"\\033[53m\"\n END_OVERLINE = \"\\033[55m\"\n FRAMED = \"\\033[51m\"\n ENCIRCLED = \"\\033[52m\"\n DELETE_LINE = \"\\033[2K\\033[1G\"\n PREVIOUS_LINE = \"\\033[2F\"\n\n tags = [\n # {\"tagName\": \"bold\", \"font\": \"TkTextFont bold\"},\n {\"tagName\": \"info\", \"foreground\": \"#6699ff\"},\n {\"tagName\": \"warn\", \"foreground\": \"orange\"},\n {\"tagName\": \"err\", \"foreground\": \"red\"},\n\n {\"tagName\": \"green\", \"foreground\": \"#33cc33\"},\n {\"tagName\": \"blue\", \"foreground\": \"#6699ff\"},\n {\"tagName\": \"violet\", \"foreground\": \"#aa33dd\"},\n {\"tagName\": \"grey\", \"foreground\": \"grey\"}\n ]\n\n CHANNEL = Literal[\"main\", \"error\", \"warning\", \"info\"]\n OUTPUT_TYPES = None | ctk.StringVar | ctk.CTkTextbox\n\n _message_stacks: dict[CHANNEL, list] = dict(\n main=[],\n error=[],\n warning=[],\n info=[]\n )\n\n # NOTE this is a singleton class\n def __new__(cls):\n if cls._instance is None:\n cls._instance = super(Printr, cls).__new__(cls)\n\n cls.out: dict[Printr.CHANNEL, Printr.OUTPUT_TYPES ] = dict(\n main=None,\n error=None,\n warning=None,\n info=None\n )\n return cls._instance\n\n\n def set_output(self, output_channel: CHANNEL, output_element: OUTPUT_TYPES):\n if isinstance(output_element, ctk.CTkTextbox):\n for tag in self.tags:\n output_element.tag_config(**tag)\n\n self.out[output_channel] = output_element\n\n msg_stack = self._message_stacks.get(output_channel, [])\n if len(msg_stack) > 0:\n msg = \"\\n\".join(msg_stack)\n self.print(msg, output_channel)\n # TODO: clear stack?\n for _ in range(len(msg_stack)):\n msg_stack.pop()\n\n\n\n def print(self, text, output_channel: CHANNEL = \"main\", tags=None, wait_for_gui=False, console_only=False):\n channel = self.out.get(output_channel, None)\n if channel and not console_only:\n if isinstance(channel, ctk.CTkTextbox):\n channel.configure(state=\"normal\")\n channel.insert(\"end\", f\"{text}\\n\", tags=tags)\n channel.see(\"end\")\n channel.configure(state=\"disabled\")\n else:\n # output type -> StringVar\n channel.set(text)\n elif wait_for_gui and not console_only:\n # message should only be shown in GUI\n # so add it to the queue to wait for GUI initialization\n self._message_stacks.get(output_channel, []).append(text)\n else:\n # no special output type -> terminal output\n print(text)\n\n\n def print_err(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"error\", wait_for_gui=wait_for_gui)\n\n def print_warn(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"warning\", wait_for_gui=wait_for_gui)\n\n def print_info(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"info\", wait_for_gui=wait_for_gui)\n\n\n @staticmethod\n def clr(text, color_format):\n return f\"{color_format}{text}{Printr.CLEAR}\"\n\n @staticmethod\n def clr_print(text, color_format):\n print(Printr.clr(text, color_format))\n\n @staticmethod\n def sys_print(text, headline=\"\", color=RED, first_message=True):\n if first_message:\n print(\"\")\n if headline.strip():\n print(\n Printr.clr(f\"{Printr.BOLD}{headline}{Printr.NORMAL_WEIGHT}\", color)\n )\n else:\n print(Printr.PREVIOUS_LINE)\n print(Printr.clr(f\"⎢ {text}\", color))\n print(\"\")\n\n @staticmethod\n def err_print(text, first_message=True):\n Printr.sys_print(text, \"Something went wrong!\", first_message=first_message)\n\n @staticmethod\n def warn_print(text, first_message=True):\n Printr.sys_print(text, \"Please note:\", Printr.YELLOW, first_message)\n\n @staticmethod\n def info_print(text, first_message=True):\n Printr.sys_print(text, \"\", Printr.BLUE, first_message)\n\n @staticmethod\n def hl_print(text, first_message=True):\n Printr.sys_print(text, \"\", Printr.CYAN, first_message)\n\n @staticmethod\n def override_print(text):\n print(f\"{Printr.DELETE_LINE}{text}\")\n\n @staticmethod\n def box_start():\n print(\n f\"{Printr.CYAN}⎡{Printr.OVERLINE}⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊{Printr.END_OVERLINE}⎤\"\n )\n print(f\"⎢{Printr.CLEAR}\")\n\n @staticmethod\n def box_end():\n print(f\"{Printr.CYAN}⎢\")\n print(\n f\"⎣{Printr.UNDERLINE}⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊{Printr.END_UNDERLINE}⎦{Printr.CLEAR}\"\n )\n\n @staticmethod\n def box_print(text):\n print(f\"{Printr.CYAN}⎜{Printr.CLEAR} {text}\")"
},
{
"identifier": "ConfigManager",
"path": "services/config_manager.py",
"snippet": "class ConfigManager:\n def __init__(self, app_root_path: str, app_is_bundled: bool):\n self.printr = Printr()\n self.gui_config = {}\n self.contexts = [\"\"]\n self.context_config_path: str = os.path.join(\n app_root_path,\n CONTEXT_CONFIG_PATH_BUNDLED if app_is_bundled else CONTEXT_CONFIG_PATH,\n )\n if not os.path.exists(self.context_config_path):\n os.makedirs(self.context_config_path)\n self.system_config_path: str = os.path.join(app_root_path, SYSTEM_CONFIG_PATH)\n self.load_gui_config()\n self.load_context_config_names()\n\n def __read_config_file(self, config_name, is_system_config=True) -> dict[str, any]: # type: ignore\n parsed_config = {}\n\n path = self.system_config_path if is_system_config else self.context_config_path\n config_file = os.path.join(path, config_name)\n if os.path.exists(config_file) and os.path.isfile(config_file):\n with open(config_file, \"r\", encoding=\"UTF-8\") as stream:\n try:\n parsed_config = yaml.safe_load(stream)\n except yaml.YAMLError as e:\n self.printr.print_err(\n f\"Could not load config ({config_name})!\\n{str(e)}\", True\n )\n\n return parsed_config\n\n def __write_config_file(self, config_name, content, is_system_config=True) -> bool: # type: ignore\n path = self.system_config_path if is_system_config else self.context_config_path\n config_file = os.path.join(path, config_name)\n with open(config_file, \"w\", encoding=\"UTF-8\") as stream:\n try:\n yaml.dump(content, stream)\n except yaml.YAMLError as e:\n self.printr.print_err(\n f\"Could not write config ({config_name})!\\n{str(e)}\", True\n )\n return False\n\n return True\n\n def load_gui_config(self):\n \"\"\"Fetch GUI config from file and store it for future use\"\"\"\n self.gui_config = self.__read_config_file(GUI_CONFIG)\n return self.gui_config\n\n def save_gui_config(self):\n \"\"\"Write GUI config to file\"\"\"\n return self.__write_config_file(GUI_CONFIG, self.gui_config)\n\n def load_context_config_names(self):\n default_found = False\n file_prefix, file_ending = DEFAULT_CONTEXT_CONFIG.split(\".\")\n\n # Dynamically load all user configuration files from the provided directory\n for file in os.listdir(self.context_config_path):\n # Filter out all non-yaml files\n if file.endswith(f\".{file_ending}\") and file.startswith(f\"{file_prefix}.\"):\n if file == DEFAULT_CONTEXT_CONFIG:\n default_found = True\n else:\n config_name = file.replace(f\"{file_prefix}.\", \"\").replace(\n f\".{file_ending}\", \"\"\n )\n self.contexts.append(config_name)\n\n if not default_found:\n # create default context from the systems example context config\n example_context: str = os.path.join(\n self.system_config_path, EXAMPLE_CONTEXT_CONFIG\n )\n default_context: str = os.path.join(\n self.context_config_path, DEFAULT_CONTEXT_CONFIG\n )\n if os.path.exists(example_context) and os.path.isfile(example_context):\n shutil.copyfile(example_context, default_context)\n\n def get_context_config(self, context=\"\") -> dict[str, any]: # type: ignore\n # default name -> 'config.yaml'\n # context config -> 'config.{context}.yaml'\n file_name = f\"config.{f'{context}.' if context else ''}yaml\"\n config = self.__read_config_file(file_name, False)\n return config"
},
{
"identifier": "WingmanUI",
"path": "gui/root.py",
"snippet": "class WingmanUI(ctk.CTk):\n VIEWS = Literal[\"context\", \"settings\", \"about\"]\n _views: dict[VIEWS, ctk.CTkFrame | None] = dict(\n context=None, settings=None, about=None\n )\n\n def __init__(self, core):\n super().__init__()\n self.core = core\n\n self.about_window = None\n\n ctk.set_appearance_mode(\n self.core.config_manager.gui_config.get(\"appearance\", \"system\")\n )\n # TODO: add themes\n # ctk.set_default_color_theme(path.join(self.core.app_root_dir, \"assets\", \"themes\", \"wingman-ai.json\"))\n\n self.title(\"Wingman AI\")\n self.geometry(\"1024x800+200+150\")\n self.minsize(400, 150)\n # no way to set this on MacOS\n self.iconbitmap(path.join(self.core.app_root_dir, \"assets\", \"wingman-ai.ico\"))\n\n if platform == \"darwin\":\n mac_dock_icon = tk.Image(\n \"photo\",\n file=path.join(\n self.core.app_root_dir, \"assets\", \"icons\", \"wingman-ai.png\"\n ),\n )\n self.iconphoto(True, mac_dock_icon)\n self.menubar = tk.Menu(self)\n self.system_menu = tk.Menu(self.menubar, name=\"apple\")\n self.system_menu.add_command(label=\"Exit Wingman AI\", command=self.quit)\n self.menubar.add_cascade(label=\"System\", menu=self.system_menu)\n self.help_menu = tk.Menu(self.menubar, tearoff=0)\n self.help_menu.add_command(\n label=\"About Wingman AI\", command=lambda: self.show_view(\"about\")\n )\n self.menubar.add_cascade(label=\"Help\", menu=self.help_menu)\n self.config(menu=self.menubar)\n\n self.grid_columnconfigure(0, weight=1)\n self.grid_rowconfigure(1, weight=1)\n\n self.header = Header(self, height=74, corner_radius=0)\n self.header.grid(row=0, column=0, sticky=\"we\")\n\n view_grid = {\"row\": 1, \"column\": 0, \"sticky\": \"nesw\"}\n self._views[\"about\"] = AboutView(self, corner_radius=0, fg_color=\"transparent\")\n self._views[\"about\"].grid(**view_grid)\n\n self._views[\"settings\"] = SettingsView(\n self, corner_radius=0, fg_color=\"transparent\"\n )\n self._views[\"settings\"].grid(**view_grid)\n\n self._views[\"context\"] = ContextView(\n self, corner_radius=0, fg_color=\"transparent\"\n )\n self._views[\"context\"].grid(**view_grid)\n\n self.notification_banner = NotificationBanner(self, corner_radius=0)\n self.notification_banner.set_grid_position(row=2, column=0)\n\n def switch_view(self, view: VIEWS, show=True):\n toggle_view = self._views.get(view)\n if isinstance(toggle_view, ctk.CTkFrame):\n if show:\n toggle_view.tkraise()\n else:\n toggle_view.lower()\n\n def show_view(self, view: VIEWS):\n self.switch_view(view, show=True)\n\n def hide_view(self, view: VIEWS):\n self.switch_view(view, show=False)"
},
{
"identifier": "Wingman",
"path": "wingmen/wingman.py",
"snippet": "class Wingman(FileCreator):\n \"\"\"The \"highest\" Wingman base class in the chain. It does some very basic things but is meant to be 'virtual', and so are most its methods, so you'll probably never instantiate it directly.\n\n Instead, you'll create a custom wingman that inherits from this (or a another subclass of it) and override its methods if needed.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n config: dict[str, Any],\n secret_keeper: SecretKeeper,\n app_root_dir: str,\n ):\n \"\"\"The constructor of the Wingman class. You can override it in your custom wingman.\n\n Args:\n name (str): The name of the wingman. This is the key you gave it in the config, e.g. \"atc\"\n config (dict[str, any]): All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\n app_root_dir (str): The path to the root directory of the app. This is where the Wingman executable lives.\n \"\"\"\n\n super().__init__(app_root_dir=app_root_dir, subdir=\"wingman_data\")\n\n self.config = config\n \"\"\"All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\"\"\"\n\n self.secret_keeper = secret_keeper\n \"\"\"A service that allows you to store and retrieve secrets like API keys. It can prompt the user for secrets if necessary.\"\"\"\n\n self.name = name\n \"\"\"The name of the wingman. This is the key you gave it in the config, e.g. \"atc\".\"\"\"\n\n self.audio_player = AudioPlayer()\n \"\"\"A service that allows you to play audio files and add sound effects to them.\"\"\"\n\n self.execution_start: None | float = None\n \"\"\"Used for benchmarking executon times. The timer is (re-)started whenever the process function starts.\"\"\"\n\n self.debug: bool = self.config[\"features\"].get(\"debug_mode\", False)\n \"\"\"If enabled, the Wingman will skip executing any keypresses. It will also print more debug messages and benchmark results.\"\"\"\n\n self.tts_provider = self.config[\"features\"].get(\"tts_provider\")\n \"\"\"The name of the TTS provider you configured in the config.yaml\"\"\"\n\n self.app_root_dir = app_root_dir\n \"\"\"The path to the root directory of the app. This is where the Wingman executable lives.\"\"\"\n\n @staticmethod\n def create_dynamically(\n module_path: str,\n class_name: str,\n name: str,\n config: dict[str, Any],\n secret_keeper: SecretKeeper,\n app_root_dir: str,\n **kwargs,\n ):\n \"\"\"Dynamically creates a Wingman instance from a module path and class name\n\n Args:\n module_path (str): The module path, e.g. wingmen.open_ai_wingman. It's like the filepath from root to your custom-wingman.py but with dots instead of slashes and without the .py extension. Case-sensitive!\n class_name (str): The name of the class inside your custom-wingman.py, e.g. OpenAiWingman. Case-sensitive!\n name (str): The name of the wingman. This is the key you gave it in the config, e.g. \"atc\"\n config (dict[str, any]): All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\n \"\"\"\n\n module = import_module(module_path)\n DerivedWingmanClass = getattr(module, class_name)\n instance = DerivedWingmanClass(\n name=name,\n config=config,\n secret_keeper=secret_keeper,\n app_root_dir=app_root_dir,\n **kwargs,\n )\n return instance\n\n def get_record_key(self) -> str:\n \"\"\"Returns the activation or \"push-to-talk\" key for this Wingman.\"\"\"\n return self.config.get(\"record_key\", None)\n\n def print_execution_time(self, reset_timer=False):\n \"\"\"Prints the current time since the execution started (in seconds).\"\"\"\n if self.execution_start:\n execution_stop = time.perf_counter()\n elapsed_seconds = execution_stop - self.execution_start\n printr.print(f\"...took {elapsed_seconds:.2f}s\", tags=\"info\")\n if reset_timer:\n self.start_execution_benchmark()\n\n def start_execution_benchmark(self):\n \"\"\"Starts the execution benchmark timer.\"\"\"\n self.execution_start = time.perf_counter()\n\n # ──────────────────────────────────── Hooks ─────────────────────────────────── #\n\n def validate(self) -> list[str]:\n \"\"\"Use this function to validate params and config before the Wingman is started.\n If you add new config sections or entries to your custom wingman, you should validate them here.\n\n It's a good idea to collect all errors from the base class and not to swallow them first.\n\n If you return errors, your Wingman will be disabled by Tower and not be loaded.\n\n Returns:\n list[str]: A list of error messages or an empty list if everything is okay.\n \"\"\"\n return []\n\n # TODO: this should be async\n def prepare(self):\n \"\"\"This method is called only once when the Wingman is instantiated by Tower.\n It is run AFTER validate() so you can access validated params safely here.\n\n You can override it if you need to load async data from an API or file.\"\"\"\n pass\n\n def reset_conversation_history(self):\n \"\"\"This function is called when the user triggers the ResetConversationHistory command.\n It's a global command that should be implemented by every Wingman that keeps a message history.\n \"\"\"\n\n # ──────────────────────────── The main processing loop ──────────────────────────── #\n\n async def process(self, audio_input_wav: str):\n \"\"\"The main method that gets called when the wingman is activated. This method controls what your wingman actually does and you can override it if you want to.\n\n The base implementation here triggers the transcription and processing of the given audio input.\n If you don't need even transcription, you can just override this entire process method. If you want transcription but then do something in addition, you can override the listed hooks.\n\n Async so you can do async processing, e.g. send a request to an API.\n\n Args:\n audio_input_wav (str): The path to the audio file that contains the user's speech. This is a recording of what you you said.\n\n Hooks:\n - async _transcribe: transcribe the audio to text\n - async _get_response_for_transcript: process the transcript and return a text response\n - async _play_to_user: do something with the response, e.g. play it as audio\n \"\"\"\n\n self.start_execution_benchmark()\n\n process_result = None\n\n if self.debug:\n printr.print(\"Starting transcription...\", tags=\"info\")\n\n # transcribe the audio.\n transcript, locale = await self._transcribe(audio_input_wav)\n\n if self.debug:\n self.print_execution_time(reset_timer=True)\n\n if transcript:\n printr.print(f\">> (You): {transcript}\", tags=\"violet\")\n\n if self.debug:\n printr.print(\"Getting response for transcript...\", tags=\"info\")\n\n # process the transcript further. This is where you can do your magic. Return a string that is the \"answer\" to your passed transcript.\n process_result, instant_response = await self._get_response_for_transcript(\n transcript, locale\n )\n\n if self.debug:\n self.print_execution_time(reset_timer=True)\n\n actual_response = instant_response or process_result\n printr.print(f\"<< ({self.name}): {actual_response}\", tags=\"green\")\n\n if self.debug:\n printr.print(\"Playing response back to user...\", tags=\"info\")\n\n # the last step in the chain. You'll probably want to play the response to the user as audio using a TTS provider or mechanism of your choice.\n await self._play_to_user(str(process_result))\n\n if self.debug:\n self.print_execution_time()\n\n # ───────────────── virtual methods / hooks ───────────────── #\n\n async def _transcribe(self, audio_input_wav: str) -> tuple[str | None, str | None]:\n \"\"\"Transcribes the audio to text. You can override this method if you want to use a different transcription service.\n\n Args:\n audio_input_wav (str): The path to the audio file that contains the user's speech. This is a recording of what you you said.\n\n Returns:\n tuple[str | None, str | None]: The transcript of the audio file and the detected language as locale (if determined).\n \"\"\"\n return None, None\n\n async def _get_response_for_transcript(\n self, transcript: str, locale: str | None\n ) -> tuple[str, str]:\n \"\"\"Processes the transcript and return a response as text. This where you'll do most of your work.\n Pass the transcript to AI providers and build a conversation. Call commands or APIs. Play temporary results to the user etc.\n\n\n Args:\n transcript (str): The user's spoken text transcribed as text.\n locale (str | None): The language that was detected to be used in the transcript, e.g. \"de-DE\".\n\n Returns:\n A tuple of strings representing the response to a function call and/or an instant response.\n \"\"\"\n return (\"\", \"\")\n\n async def _play_to_user(self, text: str):\n \"\"\"You'll probably want to play the response to the user as audio using a TTS provider or mechanism of your choice.\n\n Args:\n text (str): The response of your _get_response_for_transcript. This is usually the \"response\" from conversation with the AI.\n \"\"\"\n pass\n\n # ───────────────────────────────── Commands ─────────────────────────────── #\n\n def _get_command(self, command_name: str) -> dict | None:\n \"\"\"Extracts the command with the given name\n\n Args:\n command_name (str): the name of the command you used in the config\n\n Returns:\n {}: The command object from the config\n \"\"\"\n\n command = next(\n (\n item\n for item in self.config.get(\"commands\", [])\n if item[\"name\"] == command_name\n ),\n None,\n )\n return command\n\n def _select_command_response(self, command: dict) -> str | None:\n \"\"\"Returns one of the configured responses of the command. This base implementation returns a random one.\n\n Args:\n command (dict): The command object from the config\n\n Returns:\n str: A random response from the command's responses list in the config.\n \"\"\"\n command_responses = command.get(\"responses\", None)\n if (command_responses is None) or (len(command_responses) == 0):\n return None\n\n return random.choice(command_responses)\n\n def _execute_instant_activation_command(self, transcript: str) -> dict | None:\n \"\"\"Uses a fuzzy string matching algorithm to match the transcript to a configured instant_activation command and executes it immediately.\n\n Args:\n transcript (text): What the user said, transcripted to text. Needs to be similar to one of the defined instant_activation phrases to work.\n\n Returns:\n {} | None: The executed instant_activation command.\n \"\"\"\n\n instant_activation_commands = [\n command\n for command in self.config.get(\"commands\", [])\n if command.get(\"instant_activation\")\n ]\n\n # check if transcript matches any instant activation command. Each command has a list of possible phrases\n for command in instant_activation_commands:\n for phrase in command.get(\"instant_activation\"):\n ratio = SequenceMatcher(\n None,\n transcript.lower(),\n phrase.lower(),\n ).ratio()\n if (\n ratio > 0.8\n ): # if the ratio is higher than 0.8, we assume that the command was spoken\n self._execute_command(command)\n\n if command.get(\"responses\"):\n return command\n return None\n return None\n\n def _execute_command(self, command: dict) -> str:\n \"\"\"Triggers the execution of a command. This base implementation executes the keypresses defined in the command.\n\n Args:\n command (dict): The command object from the config to execute\n\n Returns:\n str: the selected response from the command's responses list in the config. \"Ok\" if there are none.\n \"\"\"\n\n if not command:\n return \"Command not found\"\n\n printr.print(f\"❖ Executing command: {command.get('name')}\", tags=\"info\")\n\n if self.debug:\n printr.print(\n \"Skipping actual keypress execution in debug_mode...\", tags=\"warn\"\n )\n\n if len(command.get(\"keys\", [])) > 0 and not self.debug:\n self.execute_keypress(command)\n # TODO: we could do mouse_events here, too...\n\n # handle the global special commands:\n if command.get(\"name\", None) == \"ResetConversationHistory\":\n self.reset_conversation_history()\n\n if not self.debug:\n # in debug mode we already printed the separate execution times\n self.print_execution_time()\n\n return self._select_command_response(command) or \"Ok\"\n\n def execute_keypress(self, command: dict):\n \"\"\"Executes the keypresses defined in the command in order.\n\n pydirectinput uses SIGEVENTS to send keypresses to the OS. This lib seems to be the only way to send keypresses to games reliably.\n\n It only works on Windows. For MacOS, we fall back to PyAutoGUI (which has the exact same API as pydirectinput is built on top of it).\n\n Args:\n command (dict): The command object from the config to execute\n \"\"\"\n\n for entry in command.get(\"keys\", []):\n if entry.get(\"modifier\"):\n key_module.keyDown(entry[\"modifier\"])\n\n if entry.get(\"hold\"):\n key_module.keyDown(entry[\"key\"])\n time.sleep(entry[\"hold\"])\n key_module.keyUp(entry[\"key\"])\n else:\n key_module.press(entry[\"key\"])\n\n if entry.get(\"modifier\"):\n key_module.keyUp(entry[\"modifier\"])\n\n if entry.get(\"wait\"):\n time.sleep(entry[\"wait\"])"
}
] | from os import path
from pynput import keyboard
from services.audio_recorder import AudioRecorder
from services.secret_keeper import SecretKeeper
from services.tower import Tower
from services.printr import Printr
from services.config_manager import ConfigManager
from gui.root import WingmanUI
from wingmen.wingman import Wingman
import sys
import asyncio
import threading | 10,051 |
printr = Printr()
def get_application_root(is_bundled: bool):
if is_bundled:
application_path = sys._MEIPASS
else:
application_path = path.dirname(path.abspath(__file__))
return application_path
class WingmanAI:
def __init__(self):
# pyinstaller things...
self.app_is_bundled = getattr(sys, "frozen", False)
self.app_root_dir = get_application_root(self.app_is_bundled)
self.active = False
self.active_recording = {"key": "", "wingman": None}
self.tower = None
self.config_manager = ConfigManager(self.app_root_dir, self.app_is_bundled)
self.secret_keeper = SecretKeeper(self.app_root_dir)
self.audio_recorder = AudioRecorder(self.app_root_dir)
def load_context(self, context=""):
self.active = False
try:
if self.config_manager:
config = self.config_manager.get_context_config(context)
self.tower = Tower(
config=config,
secret_keeper=self.secret_keeper,
app_root_dir=self.app_root_dir,
)
except FileNotFoundError:
printr.print_err(f"Could not find context.{context}.yaml", True)
except Exception as e:
# Everything else...
printr.print_err(str(e), True)
def activate(self):
if self.tower:
self.active = True
def deactivate(self):
self.active = False
def on_press(self, key):
if self.active and self.tower and self.active_recording["key"] == "":
wingman = self.tower.get_wingman_from_key(key)
if wingman:
self.active_recording = dict(key=key, wingman=wingman)
self.audio_recorder.start_recording()
def on_release(self, key):
if self.active and self.active_recording["key"] == key:
wingman = self.active_recording["wingman"]
recorded_audio_wav = self.audio_recorder.stop_recording()
self.active_recording = dict(key="", wingman=None)
def run_async_process():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
|
printr = Printr()
def get_application_root(is_bundled: bool):
if is_bundled:
application_path = sys._MEIPASS
else:
application_path = path.dirname(path.abspath(__file__))
return application_path
class WingmanAI:
def __init__(self):
# pyinstaller things...
self.app_is_bundled = getattr(sys, "frozen", False)
self.app_root_dir = get_application_root(self.app_is_bundled)
self.active = False
self.active_recording = {"key": "", "wingman": None}
self.tower = None
self.config_manager = ConfigManager(self.app_root_dir, self.app_is_bundled)
self.secret_keeper = SecretKeeper(self.app_root_dir)
self.audio_recorder = AudioRecorder(self.app_root_dir)
def load_context(self, context=""):
self.active = False
try:
if self.config_manager:
config = self.config_manager.get_context_config(context)
self.tower = Tower(
config=config,
secret_keeper=self.secret_keeper,
app_root_dir=self.app_root_dir,
)
except FileNotFoundError:
printr.print_err(f"Could not find context.{context}.yaml", True)
except Exception as e:
# Everything else...
printr.print_err(str(e), True)
def activate(self):
if self.tower:
self.active = True
def deactivate(self):
self.active = False
def on_press(self, key):
if self.active and self.tower and self.active_recording["key"] == "":
wingman = self.tower.get_wingman_from_key(key)
if wingman:
self.active_recording = dict(key=key, wingman=wingman)
self.audio_recorder.start_recording()
def on_release(self, key):
if self.active and self.active_recording["key"] == key:
wingman = self.active_recording["wingman"]
recorded_audio_wav = self.audio_recorder.stop_recording()
self.active_recording = dict(key="", wingman=None)
def run_async_process():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try: | if isinstance(wingman, Wingman): | 6 | 2023-11-15 09:36:06+00:00 | 12k |
derkalle4/python3-idotmatrix-client | core/cmd.py | [
{
"identifier": "Bluetooth",
"path": "core/bluetooth.py",
"snippet": "class Bluetooth:\n address = None\n client = None\n logging = logging.getLogger(\"idotmatrix.\" + __name__)\n mtu_size = None\n\n def __init__(self, address):\n self.logging.debug(\"initialize bluetooth for {}\".format(address))\n self.address = address\n\n async def response_handler(self, sender, data):\n \"\"\"Simple response handler which prints the data received.\"\"\"\n self.logging.debug(\"device feedback: {}\".format(list(data)))\n\n async def connect(self):\n self.logging.info(\"connecting to device\")\n try:\n # create client\n self.client = BleakClient(self.address)\n # connect client\n await self.client.connect()\n # get mtu size\n gatt_characteristic = self.client.services.get_characteristic(\n UUID_WRITE_DATA\n )\n self.mtu_size = gatt_characteristic.max_write_without_response_size\n # Initialise Response Message Handler\n await self.client.start_notify(UUID_READ_DATA, self.response_handler)\n except Exception as e:\n self.logging.error(e)\n if self.client.is_connected:\n self.disconnect()\n return False\n return True\n\n async def disconnect(self):\n self.logging.info(\"disconnecting from device\")\n if self.client is not None:\n await self.client.stop_notify(UUID_READ_DATA)\n await self.client.disconnect()\n\n def splitIntoMultipleLists(self, data):\n \"\"\"\n Returns a list containing lists with the elements from `data`.\n It is ensured that the lists have a maximum length of `max_elems_per_list`.\n\n Derived from `private List<byte[]> getSendData4096(byte[] bArr)`\n in `com/tech/idotmatrix/core/data/ImageAgreement1.java:259`.\n \"\"\"\n chunks = []\n len_ = len(data)\n for start in range(0, len_, self.mtu_size):\n end = start + min(len_ - start, self.mtu_size)\n chunks.append(data[start:end])\n return chunks\n\n async def send(self, message):\n # check if connected\n if self.client is None or not self.client.is_connected:\n if not await self.connect():\n return False\n self.logging.debug(\"sending message(s) to device\")\n for data in self.splitIntoMultipleLists(message):\n self.logging.debug(\"trying to send {}\".format(data))\n await self.client.write_gatt_char(\n UUID_WRITE_DATA,\n data,\n )\n time.sleep(0.01)\n return True"
},
{
"identifier": "Chronograph",
"path": "core/idotmatrix/chronograph.py",
"snippet": "class Chronograph:\n def setChronograph(self, mode):\n \"\"\"Starts/Stops the Chronograph.\n\n Args:\n mode (int): 0 = reset, 1 = (re)start, 2 = pause, 3 = continue after pause\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 5,\n 0,\n 9,\n 128,\n int(mode) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the chronograph: {}\".format(error))"
},
{
"identifier": "Clock",
"path": "core/idotmatrix/clock.py",
"snippet": "class Clock:\n \"\"\"This class contains the management of the iDotMatrix clock.\n Based on the BleProtocolN.java file of the iDotMatrix Android App.\n \"\"\"\n\n def setTimeIndicator(self, enabled=True):\n \"\"\"Sets the time indicator of the clock. Does not seem to work currently (maybe in a future update?).\n It is inside the source code of BleProtocolN.java, but not referenced anywhere.\n\n Args:\n enabled (bool, optional): Whether or not to show the time indicator of the clock. Defaults to True.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 5,\n 0,\n 7,\n 128,\n 1 if enabled else 0,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the time indicator: {}\".format(error))\n\n def setClockMode(self, style, visibleDate, hour24, r=255, g=255, b=255):\n \"\"\"set the clock mode of the device\n\n Args:\n style (int): style of the clock\n 0 = default\n 1 = christmas\n 2 = racing\n 3 = inverted full screen\n 4 = animated hourglass\n 5 = frame 1\n 6 = frame 2\n 7 = frame 3\n visibleDate (bool): whether the date should be shown ornot\n hour24 (bool): 12 or 24 hour format\n r (int, optional): color red. Defaults to 255.\n g (int, optional): color green. Defaults to 255.\n b (int, optional): color blue. Defaults to 255.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 8,\n 0,\n 6,\n 1,\n (style | (128 if visibleDate else 0)) | (64 if hour24 else 0),\n int(r) % 256,\n int(g) % 256,\n int(b) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the clock mode: {}\".format(error))"
},
{
"identifier": "Common",
"path": "core/idotmatrix/common.py",
"snippet": "class Common:\n \"\"\"This class contains generic bluetooth functions for the iDotMatrix.\n Based on the BleProtocolN.java file of the iDotMatrix Android App.\n \"\"\"\n\n def toggleScreenFreeze(self):\n \"\"\"Freezes or unfreezes the screen.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n return bytearray([4, 0, 3, 0])\n\n def rotate180degrees(self, type=0):\n \"\"\"rotates the screen 180 dregrees\n\n Args:\n type (int): 0 = normal, 1 = rotated. Defaults to 0.\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 5,\n 0,\n 6,\n 128,\n int(type) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not rotate the screen of the device: {}\".format(error))\n\n def set_screen_brightness(self, brightness_percent: int) -> None:\n \"\"\"Set screen brightness. Range 5-100 (%)\n\n Args:\n brightness_percent (int): set the brightness in percent\n\n Returns:\n None\n \"\"\"\n try:\n return bytearray(\n [\n 5,\n 0,\n 4,\n 128,\n int(brightness_percent) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the brightness of the screen: {}\".format(error))\n \n def setSpeed(self, speed):\n \"\"\"Sets the speed of ? - not referenced anyhwere in the iDotrMatrix Android App.\n\n Args:\n speed (int): set the speed\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 5,\n 0,\n 3,\n 1,\n int(speed) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not change the speed of the device: {}\".format(error))\n\n def setTime(self, year, month, day, hour, minute, second):\n \"\"\"Sets the date and time of the device.\n\n Args:\n year (int): year (4 digits)\n month (int): month\n day (int): day\n hour (int): hour\n minute (int): minute\n second (int): second\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n date = datetime(year, month, day, hour, minute, second)\n return bytearray(\n [\n 11,\n 0,\n 1,\n 128,\n int(year) % 256,\n int(month) % 256,\n int(day) % 256,\n int(int(date.weekday()) + 1) % 256,\n int(hour) % 256,\n int(minute) % 256,\n int(second) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the time of the device: {}\".format(error))\n\n def setJoint(self, mode):\n \"\"\"Currently no Idea what this is doing.\n\n Args:\n mode (int): set the joint mode\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 5,\n 0,\n 12,\n 128,\n int(mode) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not change the device joint: {}\".format(error))\n \n def set_password(self, password: int) -> None:\n \"\"\"Setting password: 6 digits in range 000000..999999. Reset device to clear\n\n Args:\n password (int): password\n Returns:\n None\n \"\"\"\n \n pwd_high = password // 10000\n pwd_mid = password % 10000 // 100\n pwd_low = password % 100\n \n try:\n return bytearray(\n [\n 8,\n 0,\n 4,\n 2,\n 1,\n pwd_high,\n pwd_mid,\n pwd_low,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the password: {}\".format(error))"
},
{
"identifier": "Countdown",
"path": "core/idotmatrix/countdown.py",
"snippet": "class Countdown:\n \"\"\"This class contains the management of the Countdown of the iDotMatrix device.\"\"\"\n\n def setCountdown(self, mode, minutes, seconds):\n \"\"\"Sets the countdown (and activates or disables it)\n\n Args:\n mode (int): mode of the countdown. 0 = disable, 1 = start, 2 = pause, 3 = restart\n minutes (int): minutes to count down from\n seconds (int): seconds to count down from\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 7,\n 0,\n 8,\n 128,\n int(mode) % 256,\n int(minutes) % 256,\n int(seconds) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the countdown: {}\".format(error))"
},
{
"identifier": "Gif",
"path": "core/idotmatrix/gif.py",
"snippet": "class Gif:\n logging = logging.getLogger(\"idotmatrix.\" + __name__)\n\n def load_gif(self, file_path):\n \"\"\"Load a gif file into a byte buffer.\n\n Args:\n file_path (str): path to file\n\n Returns:\n file: returns the file contents\n \"\"\"\n with open(file_path, \"rb\") as file:\n return file.read()\n\n def split_into_chunks(self, data, chunk_size):\n \"\"\"Split the data into chunks of specified size.\n\n Args:\n data (bytearray): data to split into chunks\n chunk_size (int): size of the chunks\n\n Returns:\n list: returns list with chunks of given data input\n \"\"\"\n return [data[i : i + chunk_size] for i in range(0, len(data), chunk_size)]\n\n def create_payloads(self, gif_data):\n \"\"\"Creates payloads from a GIF file.\n\n Args:\n gif_data (bytearray): data of the gif file\n\n Returns:\n bytearray: returns bytearray payload\n \"\"\"\n # TODO: make this function look more nicely :)\n # Calculate CRC of the GIF data\n crc = zlib.crc32(gif_data)\n # header for gif\n header = bytearray(\n [\n 255,\n 255,\n 1,\n 0,\n 0,\n 255,\n 255,\n 255,\n 255,\n 255,\n 255,\n 255,\n 255,\n 5,\n 0,\n 13,\n ]\n )\n # set length\n header[5:9] = int(len(gif_data) + len(header)).to_bytes(4, byteorder=\"little\")\n # add crc\n header[9:13] = crc.to_bytes(4, byteorder=\"little\")\n # Split the GIF data into 4096-byte chunks\n gif_chunks = self.split_into_chunks(gif_data, 4096)\n # build data\n payloads = bytearray()\n for i, chunk in enumerate(gif_chunks):\n header[4] = 2 if i > 0 else 0\n chunk_len = len(chunk) + len(header)\n header[0:2] = chunk_len.to_bytes(2, byteorder=\"little\")\n payloads.extend(header + chunk)\n return payloads\n\n def upload_unprocessed(self, file_path):\n \"\"\"uploads an image without further checks and resizes.\n\n Args:\n file_path (str): path to the image file\n\n Returns:\n bytearray: returns bytearray payload\n \"\"\"\n gif_data = self.load_gif(file_path)\n return self.create_payloads(gif_data)\n\n def upload_processed(self, file_path, pixel_size=32):\n \"\"\"uploads a file processed to make sure everything is correct before uploading to the device.\n\n Args:\n file_path (str): path to the image file\n pixel_size (int, optional): amount of pixels (either 16 or 32 makes sense). Defaults to 32.\n\n Returns:\n bytearray: returns bytearray payload\n \"\"\"\n try:\n # Open the gif file\n with PilImage.open(file_path) as img:\n # resize each frame of the gif\n frames = []\n try:\n while True:\n frame = img.copy()\n if frame.size != (pixel_size, pixel_size):\n # Resize the current frame\n frame = frame.resize(\n (pixel_size, pixel_size), PilImage.Resampling.NEAREST\n )\n # Copy the frame and append it to the list\n frames.append(frame.copy())\n # Move to the next frame\n img.seek(img.tell() + 1)\n except EOFError:\n pass # End of sequence\n # Create a BytesIO object to hold the GIF data\n gif_buffer = io.BytesIO()\n # Save the resized image as GIF to the BytesIO object\n frames[0].save(\n gif_buffer,\n format=\"GIF\",\n save_all=True,\n append_images=frames[1:],\n loop=1,\n duration=img.info[\"duration\"],\n disposal=2,\n )\n # Seek to the start of the GIF buffer\n gif_buffer.seek(0)\n # Return the GIF data\n return self.create_payloads(gif_buffer.getvalue())\n except IOError as e:\n self.logging.error(\"could not process gif: {}\".format(e))\n quit()"
},
{
"identifier": "Image",
"path": "core/idotmatrix/image.py",
"snippet": "class Image:\n logging = logging.getLogger(\"idotmatrix.\" + __name__)\n\n def show(self, mode=1):\n \"\"\"Enter the DIY draw mode of the iDotMatrix device.\n\n Args:\n mode (int): 0 = disable DIY, 1 = enable DIY, 2 = ?, 3 = ?. Defaults to 1.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 5,\n 0,\n 4,\n 1,\n int(mode) % 256,\n ]\n )\n except BaseException as error:\n self.logging.error(\"could not enter image mode :(\")\n quit()\n\n def load_png(self, file_path):\n \"\"\"Load a PNG file into a byte buffer.\n\n Args:\n file_path (str): path to file\n\n Returns:\n file: returns the file contents\n \"\"\"\n with open(file_path, \"rb\") as file:\n return file.read()\n\n def split_into_chunks(self, data, chunk_size):\n \"\"\"Split the data into chunks of specified size.\n\n Args:\n data (bytearray): data to split into chunks\n chunk_size (int): size of the chunks\n\n Returns:\n list: returns list with chunks of given data input\n \"\"\"\n return [data[i : i + chunk_size] for i in range(0, len(data), chunk_size)]\n\n def create_payloads(self, png_data):\n \"\"\"Creates payloads from a PNG file.\n\n Args:\n png_data (bytearray): data of the png file\n\n Returns:\n bytearray: returns bytearray payload\n \"\"\"\n # Split the PNG data into 4096-byte chunks\n png_chunks = self.split_into_chunks(png_data, 4096)\n # Calculate the arbitrary metadata number\n idk = len(png_data) + len(png_chunks)\n idk_bytes = struct.pack(\"h\", idk) # convert to 16bit signed int\n png_len_bytes = struct.pack(\"i\", len(png_data))\n # build data\n payloads = bytearray()\n for i, chunk in enumerate(png_chunks):\n payload = (\n idk_bytes\n + bytearray(\n [\n 0,\n 0,\n 2 if i > 0 else 0,\n ]\n )\n + png_len_bytes\n + chunk\n )\n payloads.extend(payload)\n return payloads\n\n def upload_unprocessed(self, file_path):\n \"\"\"uploads an image without further checks and resizes.\n\n Args:\n file_path (str): path to the image file\n\n Returns:\n bytearray: returns bytearray payload\n \"\"\"\n png_data = self.load_png(file_path)\n return self.create_payloads(png_data)\n\n def upload_processed(self, file_path, pixel_size=32):\n \"\"\"uploads a file processed and makes sure everything is correct.\n\n Args:\n file_path (str): path to the image file\n pixel_size (int, optional): amount of pixels (either 16 or 32 makes sense). Defaults to 32.\n\n Returns:\n bytearray: returns bytearray payload\n \"\"\"\n try:\n # Open the image file\n with PilImage.open(file_path) as img:\n # Resize the image\n if img.size != (pixel_size, pixel_size):\n img = img.resize(\n (pixel_size, pixel_size), PilImage.Resampling.LANCZOS\n )\n # Create a BytesIO object to hold the PNG data\n png_buffer = io.BytesIO()\n # Save the resized image as PNG to the BytesIO object\n img.save(png_buffer, format=\"PNG\")\n # Seek to the start of the PNG buffer\n png_buffer.seek(0)\n # Return the PNG data\n return self.create_payloads(png_buffer.getvalue())\n except IOError as e:\n self.logging.error(\"could not process image: {}\".format(e))\n quit()"
},
{
"identifier": "FullscreenColor",
"path": "core/idotmatrix/fullscreenColor.py",
"snippet": "class FullscreenColor:\n \"\"\"This class contains the management of the iDotMatrix fullscreen color mode.\n Based on the BleProtocolN.java file of the iDotMatrix Android App.\n \"\"\"\n\n def setColor(self, r=0, g=0, b=0):\n \"\"\"Sets the fullscreen color of the screen of the device\n\n Args:\n r (int, optional): color red. Defaults to 0.\n g (int, optional): color green. Defaults to 0.\n b (int, optional): color blue. Defaults to 0.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 7,\n 0,\n 2,\n 2,\n int(r) % 256,\n int(g) % 256,\n int(b) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the color: {}\".format(error))"
},
{
"identifier": "MusicSync",
"path": "core/idotmatrix/musicSync.py",
"snippet": "class MusicSync:\n def setMicType(self, type):\n \"\"\"Set the microphone type. Not referenced anywhere in the iDotMatrix Android App. So not used atm.\n\n Args:\n type (int): type of the Microphone. Unknown what values can be used.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 6,\n 0,\n 11,\n 128,\n int(type) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the microphone type: {}\".format(error))\n\n def sendImageRythm(self, value1):\n \"\"\"Set the image rythm. Not referenced anywhere in the iDotMatrix Android App. When used (tested with values up to 10)\n it displays a stick figure which dances if the value1 gets changed often enough to a different one.\n\n Args:\n value1 (int): type of the rythm? Unknown what values can be used.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 6,\n 0,\n 0,\n 2,\n int(value1) % 256,\n 1,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the image rythm: {}\".format(error))\n\n def sendRhythm(self, mode, byteArray):\n \"\"\"Used to send synchronized Microphone sound data to the device and visualizing it. Is handled in MicrophoneActivity.java of the\n iDotMatrix Android App. Will not be implemented here because I have no plans to support the computer microphone. The device\n has an integrated microphone which is able to react to sound.\n\n Args:\n mode (int): mode of the rythm.\n byteArray (byteArray): actual microphone sound data for the visualization.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return byteArray\n except BaseException as error:\n logging.error(\"could not set the rythm: {}\".format(error))\n\n def stopRythm(self):\n \"\"\"Stops the Microhpone Rythm on the iDotMatrix device.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n return bytearray([6, 0, 0, 2, 0, 0])"
},
{
"identifier": "Scoreboard",
"path": "core/idotmatrix/scoreboard.py",
"snippet": "class Scoreboard:\n \"\"\"This class contains the Scorboard management of the iDotMatrix device.\"\"\"\n\n def setScoreboard(self, count1, count2):\n \"\"\"Set the scoreboard of the device.\n\n Args:\n count1 (int): first counter, max: 999 (buffer overflow, if more! -> maybe RCE? :D)\n count2 (int): second counter, max: 999 (buffer overflow, if more! -> maybe RCE? :D)\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n bytearray_count1 = struct.pack(\"!h\", int(count1))\n bytearray_count2 = struct.pack(\"!h\", int(count2))\n return bytearray(\n [\n 8,\n 0,\n 10,\n 128,\n int(bytearray_count1[1]) % 256,\n int(bytearray_count1[0]) % 256,\n int(bytearray_count2[1]) % 256,\n int(bytearray_count2[0]) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not update the scoreboard: {}\".format(error))"
},
{
"identifier": "Graffiti",
"path": "core/idotmatrix/graffiti.py",
"snippet": "class Graffiti:\n \"\"\"This class contains the Graffiti controls for the iDotMatrix device.\"\"\"\n\n def setPixelColor(self, r, g, b, x, y):\n \"\"\"Set the scoreboard of the device.\n\n Args:\n r (int): color red value\n g (int): color green value\n b (int): color blue value\n x (int): pixel x position\n y (int): pixel y position\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n ###START COMMAND####\n 10,\n 0,\n 5,\n 1,\n 0,\n ###END COMMAND####\n r, ###COLOR R\n g, ###COLOR G\n b, ###COLOR B\n x, ###PIXEL X\n y, ###PIXEL Y\n ]\n )\n except BaseException as error:\n logging.error(\"could not update the Graffiti Board: {}\".format(error))"
}
] | from datetime import datetime
from PIL import Image
from .bluetooth import Bluetooth
from .idotmatrix.chronograph import Chronograph
from .idotmatrix.clock import Clock
from .idotmatrix.common import Common
from .idotmatrix.countdown import Countdown
from .idotmatrix.gif import Gif
from .idotmatrix.image import Image
from .idotmatrix.fullscreenColor import FullscreenColor
from .idotmatrix.musicSync import MusicSync
from .idotmatrix.scoreboard import Scoreboard
from .idotmatrix.graffiti import Graffiti
import logging
import os
import time | 8,275 | "--pixel-color",
action="append",
help="sets a pixel to a specific color. Could be used multiple times. Format: <PIXEL-X>-<PIXEL-Y>-<R0-255>-<G0-255>-<B0-255> (example: 0-0-255-255-255)",
nargs="+",
)
# scoreboard
parser.add_argument(
"--scoreboard",
action="store",
help="shows the scoreboard with the given scores. Format: <0-999>-<0-999>",
)
# image upload
parser.add_argument(
"--image",
action="store",
help="enables or disables the image mode (true = enable, false = disable)",
)
parser.add_argument(
"--set-image",
action="store",
help="uploads a given image file (fastest is png, max. pixel depending on your display). Format: ./path/to/image.png",
)
parser.add_argument(
"--process-image",
action="store",
help="processes the image instead of sending it raw (useful when the size does not match or it is not a png). Format: <AMOUNT_PIXEL>",
)
# gif upload
parser.add_argument(
"--set-gif",
action="store",
help="uploads a given gif file (pixel depending on your display). Format: ./path/to/image.gif",
)
parser.add_argument(
"--process-gif",
action="store",
help="processes the gif instead of sending it raw (useful when the size does not match). Format: <AMOUNT_PIXEL>",
)
async def run(self, args):
self.logging.info("initializing command line")
address = None
if args.address:
self.logging.debug("using --address")
address = args.address
elif "IDOTMATRIX_ADDRESS" in os.environ:
self.logging.debug("using IDOTMATRIX_ADDRESS")
address = os.environ["IDOTMATRIX_ADDRESS"]
if address is None:
self.logging.error("no device address given")
quit()
else:
self.bluetooth = Bluetooth(address)
# arguments which can be run in parallel
if args.sync_time:
await self.sync_time(args.set_time)
if args.rotate180degrees:
await self.rotate180degrees(args.rotate180degrees)
if args.togglescreen:
await self.togglescreen()
if args.set_brightness:
await self.set_brightness(args.set_brightness)
if args.set_password:
await self.set_password(args.set_password)
# arguments which cannot run in parallel
if args.test:
await self.test()
elif args.chronograph:
await self.chronograph(args.chronograph)
elif args.clock:
await self.clock(args)
elif args.countdown:
await self.countdown(args)
elif args.fullscreen_color:
await self.fullscreenColor(args.fullscreen_color)
elif args.pixel_color:
await self.pixelColor(args.pixel_color)
elif args.scoreboard:
await self.scoreboard(args.scoreboard)
elif args.image:
await self.image(args)
elif args.set_gif:
await self.gif(args)
async def test(self):
"""Tests all available options for the device"""
self.logging.info("starting test of device")
## chronograph
await self.bluetooth.send(Chronograph().setChronograph(1))
time.sleep(5)
await self.bluetooth.send(Chronograph().setChronograph(0))
time.sleep(1)
## clock
await self.bluetooth.send(Clock().setTimeIndicator(True))
await self.bluetooth.send(Clock().setClockMode(0, True, True))
time.sleep(5)
## countdown
await self.bluetooth.send(Countdown().setCountdown(1, 0, 5))
await self.bluetooth.send(Countdown().setCountdown(0, 0, 5))
time.sleep(5)
## fullscreen color
await self.bluetooth.send(FullscreenColor().setColor(255, 0, 0))
time.sleep(5)
## scoreboard
await self.bluetooth.send(Scoreboard().setScoreboard(1, 0))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 1))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 2))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(2, 2))
## graffiti
# load graffiti board and color pixel 0,0 red
await self.bluetooth.send(Graffiti().setPixelColor(255, 0, 0, 0, 0))
# load graffitti board and color pixel 1,1 green
await self.bluetooth.send(Graffiti().setPixelColor(0, 255, 0, 1, 1))
# load graffitti board and color pixel 2,2 blue
await self.bluetooth.send(Graffiti().setPixelColor(0, 0, 255, 2, 2))
time.sleep(5)
## diy image (png)
| # python imports
# idotmatrix imports
class CMD:
bluetooth = None
logging = logging.getLogger("idotmatrix." + __name__)
def add_arguments(self, parser):
# test
parser.add_argument(
"--test",
action="store_true",
help="run the test function from the command line class",
)
# time sync
parser.add_argument(
"--sync-time",
action="store_true",
help="sync time to device",
)
parser.add_argument(
"--set-time",
action="store",
help="optionally set time to sync to device (use with --sync-time)",
default=datetime.now().strftime("%d-%m-%Y-%H:%M:%S"),
)
# device screen rotation
parser.add_argument(
"--rotate180degrees",
action="store",
help="enable 180 degree device rotation (true = enable, false = disable)",
)
# screen toggle
parser.add_argument(
"--togglescreen",
action="store_true",
help="toggles the screen on or off",
)
# brightness
parser.add_argument(
"--set-brightness",
action="store",
help="sets the brightness of the screen in percent: range 5..100",
)
# password
parser.add_argument(
"--set-password",
action="store",
help="sets password",
)
# chronograph
parser.add_argument(
"--chronograph",
action="store",
help="sets the chronograph mode: 0 = reset, 1 = (re)start, 2 = pause, 3 = continue after pause",
)
# clock
parser.add_argument(
"--clock",
action="store",
help="sets the clock mode: 0 = default, 1 = christmas, 2 = racing, 3 = inverted full screen, 4 = animated hourglass, 5 = frame 1, 6 = frame 2, 7 = frame 3",
)
parser.add_argument(
"--clock-with-date",
action="store_true",
help="shows the current date in addition to the current time.",
)
parser.add_argument(
"--clock-24h",
action="store_true",
help="shows the current time in 24h format.",
)
parser.add_argument(
"--clock-color",
action="store",
help="sets the color of the clock. Format: <R0-255>-<G0-255>-<B0-255> (example: 255-255-255)",
default="255-255-255",
)
# countdown
parser.add_argument(
"--countdown",
action="store",
help="sets the countdown mode: 0 = disable, 1 = start, 2 = pause, 3 = restart",
)
parser.add_argument(
"--countdown-time",
action="store",
help="sets the countdown mode: <MINUTES>-<SECONDS> (example: 10-30)",
default="5-0",
)
# fullscreen color
parser.add_argument(
"--fullscreen-color",
action="store",
help="sets a fullscreen color. Format: <R0-255>-<G0-255>-<B0-255> (example: 255-255-255)",
)
# pixel color
parser.add_argument(
"--pixel-color",
action="append",
help="sets a pixel to a specific color. Could be used multiple times. Format: <PIXEL-X>-<PIXEL-Y>-<R0-255>-<G0-255>-<B0-255> (example: 0-0-255-255-255)",
nargs="+",
)
# scoreboard
parser.add_argument(
"--scoreboard",
action="store",
help="shows the scoreboard with the given scores. Format: <0-999>-<0-999>",
)
# image upload
parser.add_argument(
"--image",
action="store",
help="enables or disables the image mode (true = enable, false = disable)",
)
parser.add_argument(
"--set-image",
action="store",
help="uploads a given image file (fastest is png, max. pixel depending on your display). Format: ./path/to/image.png",
)
parser.add_argument(
"--process-image",
action="store",
help="processes the image instead of sending it raw (useful when the size does not match or it is not a png). Format: <AMOUNT_PIXEL>",
)
# gif upload
parser.add_argument(
"--set-gif",
action="store",
help="uploads a given gif file (pixel depending on your display). Format: ./path/to/image.gif",
)
parser.add_argument(
"--process-gif",
action="store",
help="processes the gif instead of sending it raw (useful when the size does not match). Format: <AMOUNT_PIXEL>",
)
async def run(self, args):
self.logging.info("initializing command line")
address = None
if args.address:
self.logging.debug("using --address")
address = args.address
elif "IDOTMATRIX_ADDRESS" in os.environ:
self.logging.debug("using IDOTMATRIX_ADDRESS")
address = os.environ["IDOTMATRIX_ADDRESS"]
if address is None:
self.logging.error("no device address given")
quit()
else:
self.bluetooth = Bluetooth(address)
# arguments which can be run in parallel
if args.sync_time:
await self.sync_time(args.set_time)
if args.rotate180degrees:
await self.rotate180degrees(args.rotate180degrees)
if args.togglescreen:
await self.togglescreen()
if args.set_brightness:
await self.set_brightness(args.set_brightness)
if args.set_password:
await self.set_password(args.set_password)
# arguments which cannot run in parallel
if args.test:
await self.test()
elif args.chronograph:
await self.chronograph(args.chronograph)
elif args.clock:
await self.clock(args)
elif args.countdown:
await self.countdown(args)
elif args.fullscreen_color:
await self.fullscreenColor(args.fullscreen_color)
elif args.pixel_color:
await self.pixelColor(args.pixel_color)
elif args.scoreboard:
await self.scoreboard(args.scoreboard)
elif args.image:
await self.image(args)
elif args.set_gif:
await self.gif(args)
async def test(self):
"""Tests all available options for the device"""
self.logging.info("starting test of device")
## chronograph
await self.bluetooth.send(Chronograph().setChronograph(1))
time.sleep(5)
await self.bluetooth.send(Chronograph().setChronograph(0))
time.sleep(1)
## clock
await self.bluetooth.send(Clock().setTimeIndicator(True))
await self.bluetooth.send(Clock().setClockMode(0, True, True))
time.sleep(5)
## countdown
await self.bluetooth.send(Countdown().setCountdown(1, 0, 5))
await self.bluetooth.send(Countdown().setCountdown(0, 0, 5))
time.sleep(5)
## fullscreen color
await self.bluetooth.send(FullscreenColor().setColor(255, 0, 0))
time.sleep(5)
## scoreboard
await self.bluetooth.send(Scoreboard().setScoreboard(1, 0))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 1))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 2))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(2, 2))
## graffiti
# load graffiti board and color pixel 0,0 red
await self.bluetooth.send(Graffiti().setPixelColor(255, 0, 0, 0, 0))
# load graffitti board and color pixel 1,1 green
await self.bluetooth.send(Graffiti().setPixelColor(0, 255, 0, 1, 1))
# load graffitti board and color pixel 2,2 blue
await self.bluetooth.send(Graffiti().setPixelColor(0, 0, 255, 2, 2))
time.sleep(5)
## diy image (png) | await self.bluetooth.send(Image().show(1)) | 6 | 2023-11-13 14:04:21+00:00 | 12k |
AdmTal/music-graphs | src/generate_music_graph.py | [
{
"identifier": "AnimationFrames",
"path": "src/animation_stuff.py",
"snippet": "class AnimationFrames:\n \"\"\"\n Helper object to organize layered frames in order to produce an animation.\n self._data is a Dict. The Keys are \"layer_ids\", and the values are Lists of \"images\".\n They are not really images, they are instead closures that can be executed to create an image.\n \"\"\"\n\n def __init__(self):\n self._data = {}\n\n def __len__(self):\n if not len(self._data):\n return 0\n first_layer = list(self._data.keys())[0]\n return len(self._data[first_layer])\n\n def items(self):\n return self._data.items()\n\n def _ensure_layer_length(self, length):\n for layer_id in self._data:\n self._data[layer_id].extend([None] * (length - len(self._data[layer_id])))\n\n def add_frames_to_layer(self, layer_id, frame_index, images):\n # Find the new length, which is the longer of the current max length or the new frame_index + number of images\n new_length = max(\n frame_index + len(images),\n max((len(frames) for frames in self._data.values()), default=0),\n )\n\n # Extend all layers to the new length\n self._ensure_layer_length(new_length)\n\n # Add new layer if it doesn't exist\n if layer_id not in self._data:\n self._data[layer_id] = [None] * new_length\n\n # Set images at the correct frame index\n for i, image in enumerate(images):\n self._data[layer_id][frame_index + i] = image\n\n def __str__(self):\n return str(self._data)"
},
{
"identifier": "cleanup_cache_dir",
"path": "src/cache_stuff.py",
"snippet": "def cleanup_cache_dir(cache_dir):\n shutil.rmtree(cache_dir)"
},
{
"identifier": "get_cache_dir",
"path": "src/cache_stuff.py",
"snippet": "def get_cache_dir():\n global _cache_dir_created, _cache_dir\n if not _cache_dir_created:\n _cache_dir = f\".cache/{uuid4()}\"\n os.makedirs(_cache_dir, exist_ok=True)\n _cache_dir_created = True\n return _cache_dir"
},
{
"identifier": "animate_bezier_point",
"path": "src/graph_stuff.py",
"snippet": "def animate_bezier_point(\n base_image,\n offsets,\n theme,\n track,\n points,\n frame_number,\n animation_length_in_frames,\n):\n overlay_image = Image.new(\n \"RGBA\",\n base_image.size,\n color=None,\n )\n draw = ImageDraw.Draw(overlay_image)\n\n x_offset, y_offset = offsets\n\n t = frame_number / animation_length_in_frames\n point = bezier_point(t, [points[i : i + 2] for i in range(0, len(points), 2)])\n point_center = (x_offset + point[0], y_offset + point[1])\n\n # Draw the 3D-looking circle\n for i in range(theme.ball_radius(track) // 2):\n # Calculate the color gradient based on the specified ball color\n draw.ellipse(\n [\n (point_center[0] - i, point_center[1] - i),\n (point_center[0] + i, point_center[1] + i),\n ],\n fill=theme.ball_color(track),\n outline=hex_to_rgb(theme.ball_stroke_color(track)),\n width=theme.ball_stroke_width(track),\n )\n\n blur_max = theme.ball_g_blur_max(track)\n if blur_max:\n blur_radius = min(\n animation_length_in_frames - frame_number,\n theme.ball_g_blur_max(track) / (frame_number + 1),\n )\n overlay_image = overlay_image.filter(\n ImageFilter.GaussianBlur(radius=blur_radius)\n )\n\n # Composite the transparent overlay onto the base image\n return Image.alpha_composite(\n base_image.convert(\"RGBA\"),\n overlay_image,\n )"
},
{
"identifier": "animate_ellipsis_blur",
"path": "src/graph_stuff.py",
"snippet": "def animate_ellipsis_blur(\n base_image,\n points,\n frame_number,\n offsets,\n theme,\n track,\n animation_len,\n velocity,\n):\n image = base_image.copy()\n draw = ImageDraw.Draw(image)\n\n x_offset, y_offset = offsets\n x0, y0, w, h = points\n x0 += x_offset\n y0 += y_offset\n\n # Calculate the increase in size\n w_increase = w * theme.note_increase_size(track) * (velocity / 127)\n h_increase = h * theme.note_increase_size(track) * (velocity / 127)\n\n # Define the bounding box with the increased size\n bounding_box = [\n x0 - w - w_increase / 2,\n y0 - h - h_increase / 2,\n x0 + w + w_increase / 2,\n y0 + h + h_increase / 2,\n ]\n\n # Draw the initial ellipse\n draw.ellipse(\n bounding_box,\n outline=hex_to_rgb(theme.note_color(track)),\n width=theme.note_stroke_width(track),\n )\n\n # Determine the blur radius for this frame\n blur_strength = (frame_number / animation_len) * velocity\n blur_radius = max(1, blur_strength)\n\n # Create a mask for the ellipse to constrain the blur effect\n mask = Image.new(\"L\", image.size, 0)\n mask_draw = ImageDraw.Draw(mask)\n mask_draw.ellipse(bounding_box, fill=255)\n\n # Apply the blur effect on the mask\n mask_blurred = mask.filter(ImageFilter.GaussianBlur(blur_radius))\n\n # Create a solid image for the blur color\n ellipse = Image.new(\"RGBA\", image.size, hex_to_rgb(theme.note_color(track)))\n\n # Composite the blurred mask with the ellipse onto the base image\n image.paste(ellipse, mask=mask_blurred)\n\n return image"
},
{
"identifier": "draw_fading_bezier_curve",
"path": "src/graph_stuff.py",
"snippet": "def draw_fading_bezier_curve(\n base_image,\n offsets,\n theme,\n points,\n frame_number,\n track,\n animation_len,\n):\n # Create a new transparent image to draw the Bézier curve\n overlay_image = Image.new(\"RGBA\", base_image.size, (255, 255, 255, 0))\n draw = ImageDraw.Draw(overlay_image)\n\n # Calculate alpha value for current frame\n alpha = calculate_alpha(frame_number, animation_len)\n\n # Split the points into pairs and apply offsets\n points = [points[i : i + 2] for i in range(0, len(points), 2)]\n points = [(c[0] + offsets[0], c[1] + offsets[1]) for c in points]\n\n # Split the curve into segments\n segments = 300 * len(points)\n curve = [bezier_point(t / segments, points) for t in range(segments + 1)]\n\n # Draw the border/shadow\n border_width = theme.chord_line_width(track) * 2\n border_rgba_color = hex_to_rgba(theme.chord_line_border_color(track), alpha)\n for i in range(segments):\n draw.line(\n (\n curve[i],\n curve[i + 1],\n ),\n fill=border_rgba_color,\n width=border_width,\n )\n overlay_image = overlay_image.filter(ImageFilter.GaussianBlur(radius=5))\n draw = ImageDraw.Draw(overlay_image)\n\n # Convert hex color to RGBA with alpha for main line\n rgba_color = hex_to_rgba(theme.chord_line_color(track), alpha)\n\n # Draw the main line with fading effect\n for i in range(segments):\n draw.line(\n (\n curve[i],\n curve[i + 1],\n ),\n fill=rgba_color,\n width=theme.chord_line_width(track),\n )\n\n # Composite the transparent overlay onto the base image\n return Image.alpha_composite(\n base_image.convert(\"RGBA\"),\n overlay_image,\n )"
},
{
"identifier": "parse_graph",
"path": "src/graph_stuff.py",
"snippet": "def parse_graph(\n graph,\n theme: Theme,\n):\n temp_filename = f\"{get_cache_dir()}/graph.gv\"\n graph.filename = temp_filename\n graph.render(view=False)\n file_contents = open(f\"{temp_filename}.xdot\").read()\n\n lines = compact_dot_format(file_contents).split(\"\\n\")\n\n nodes = {}\n edges = defaultdict(dict)\n\n nodes_to_draw = []\n text_to_draw = []\n\n for line in lines[1:-1]:\n line_id, attributes = line.split(\"[\")\n line_id = line_id.strip().replace('\"', \"\")\n attributes = attributes.replace(\"];\", \"\")\n\n attrs_dict = split_attributes(attributes)\n\n if line_id == \"graph\":\n draw = parse_draw(attrs_dict[\"_draw_\"], theme.dpi)\n host_image = Image.new(\n \"RGBA\",\n (\n theme.width,\n theme.height,\n ),\n color=None,\n )\n\n # offsets\n host_width, host_height = host_image.size\n width, height = get_dimensions(draw.p_points)\n x = (host_width - width) // 2\n y = (host_height - height) // 2\n offsets = (x, y)\n\n graph_image = Image.new(\n \"RGBA\",\n (\n theme.width,\n theme.height,\n ),\n color=(0, 0, 0, 0),\n )\n\n if theme.background_image:\n bg_image = Image.open(theme.background_image).convert(\"RGBA\")\n bg_image = bg_image.resize(\n (theme.width, theme.height),\n Image.Resampling.LANCZOS,\n )\n host_image.paste(bg_image, (0, 0))\n else:\n # Create a new white image if background_image is false\n bg_image = Image.new(\n \"RGBA\",\n (theme.width, theme.height),\n hex_to_rgb(theme.background_color),\n )\n host_image.paste(bg_image, (0, 0))\n\n if \"_draw_\" in attrs_dict:\n draw = parse_draw(attrs_dict[\"_draw_\"], theme.dpi)\n if draw.e_points:\n nodes_to_draw.append(\n [\n draw.e_points,\n theme.node_outline_color,\n theme.node_fill_color,\n theme.node_shadow_color,\n theme.node_shadow_size,\n theme.graph_line_width,\n ]\n )\n nodes[line_id] = draw\n\n if draw.b_points:\n a, b = line_id.split(\" -- \")\n edges[a][b] = draw\n edges[b][a] = draw\n if theme.show_lines:\n draw_bezier_curve(\n offsets,\n graph_image,\n draw.b_points,\n theme.graph_line_color,\n theme.graph_line_width,\n theme.graph_line_blur,\n )\n\n if \"_ldraw_\" in attrs_dict and not theme.hide_letters:\n ldraw = parse_ldraw(attrs_dict[\"_ldraw_\"], theme.dpi)\n\n if len(ldraw.text) == 2:\n dx = theme.text_location_offsets.len_2.x\n dy = theme.text_location_offsets.len_2.y\n else:\n dx = theme.text_location_offsets.len_1.x\n dy = theme.text_location_offsets.len_1.y\n\n text_to_draw.append(\n [\n ldraw.text,\n ldraw.text_x + dx,\n ldraw.text_y + dy,\n theme.font,\n theme.font_size,\n theme.node_text_color,\n theme.node_text_outline_color,\n theme.node_text_stroke_width,\n ]\n )\n\n for args in nodes_to_draw:\n draw_ellipse(offsets, graph_image, *args)\n\n for args in text_to_draw:\n draw_centered_text(offsets, graph_image, *args)\n\n paste_center(host_image, graph_image)\n\n return host_image, nodes, edges, offsets"
},
{
"identifier": "get_node_positions",
"path": "src/graph_stuff.py",
"snippet": "def get_node_positions(graph):\n \"\"\"Draw a graph to a file, load it, then parse it's `node[pos] values, and return them\"\"\"\n temp_filename = f\"{get_cache_dir()}/graph_order\"\n graph.filename = temp_filename\n graph.render(view=False)\n file_contents = open(f\"{temp_filename}.plain\").read()\n lines = file_contents.split(\"\\n\")\n\n nodes = {}\n\n for line in lines:\n tokens = line.split(\" \")\n if tokens[0] != \"node\":\n continue\n _, node, x, y = tokens[0], tokens[1], tokens[2], tokens[3]\n node = node.replace('\"', \"\")\n nodes[node] = f\"{x},{y}!\"\n\n return nodes"
},
{
"identifier": "get_note_start_times_in_frames",
"path": "src/midi_stuff.py",
"snippet": "def get_note_start_times_in_frames(\n midi_file_path,\n fps,\n squash_tracks=False,\n group_notes_by_track=False,\n):\n # Load the MIDI file\n midi_data = pretty_midi.PrettyMIDI(midi_file_path)\n\n track_events_frames = defaultdict(lambda: defaultdict(list))\n\n for i, instrument in enumerate(midi_data.instruments, start=1):\n for note in instrument.notes:\n # Calculate the start time of the note in seconds and convert to frames\n start_time = note.start\n end_time = note.end\n frame = int(start_time * fps)\n note_length_in_frames = int((end_time - start_time) * fps)\n\n track_name = 0\n if group_notes_by_track:\n track_name = i + 1\n note_value = get_note(track_name, note.pitch)\n\n note_tuple = (\n note_value,\n note.velocity,\n note_length_in_frames,\n )\n if squash_tracks:\n track_events_frames[f\"track_2\"][frame].append(note_tuple)\n else:\n track_events_frames[f\"track_{track_name}\"][frame].append(note_tuple)\n\n return track_events_frames"
},
{
"identifier": "TRACK_NOTE_DELIMITER",
"path": "src/midi_stuff.py",
"snippet": "TRACK_NOTE_DELIMITER = \"#\""
},
{
"identifier": "Theme",
"path": "src/theme_stuff.py",
"snippet": "class Theme:\n def __init__(\n self,\n theme_file,\n defaults_file,\n ):\n with open(theme_file, \"r\") as stream:\n self._theme = AttributeDict(**yaml.safe_load(stream))\n\n with open(defaults_file, \"r\") as stream:\n try:\n self._defaults = AttributeDict(**yaml.safe_load(stream))\n except:\n self._defaults = AttributeDict(**{})\n\n def _get_value(self, path, default_path=\"\"):\n value = self._theme.get_path(path)\n if value is not None:\n return value\n if default_path:\n theme_default = self._theme.get_path(default_path)\n if theme_default:\n return theme_default\n return self._defaults.get_path(default_path)\n\n @property\n def debug_show_base_image(self):\n path = \"debug.show_base_image\"\n return self._get_value(path, path)\n\n @property\n def debug_max_frames(self):\n path = \"debug.max_frames\"\n return self._get_value(path, path)\n\n @property\n def frame_rate(self):\n path = \"frame_rate\"\n return self._get_value(path, path)\n\n @property\n def graphviz_engine(self):\n path = \"graphviz_engine\"\n return self._get_value(path, path)\n\n @property\n def squash_tracks(self):\n path = \"squash_tracks\"\n return self._get_value(path, path)\n\n def skip_track(self, track):\n if track == \"track_1\":\n return True\n return self._get_value(\n f\"tracks.{track}.skip\",\n default_path=f\"tracks.default.skip\",\n )\n\n def pulses_only(self, track):\n return self._get_value(\n f\"tracks.{track}.pulses_only\",\n default_path=f\"tracks.default.pulses_only\",\n )\n\n def allow_self_notes(self, track):\n return self._get_value(\n f\"tracks.{track}.allow_self_notes\",\n default_path=f\"tracks.default.skip\",\n )\n\n @property\n def graphviz_edge_attrs(self):\n path = \"graphviz_edge_attrs\"\n return self._get_value(path, path)\n\n @property\n def graphviz_node_attrs(self):\n path = \"graphviz_node_attrs\"\n return self._get_value(path, path)\n\n @property\n def graphviz_graph_attrs(self):\n path = \"graphviz_graph_attrs\"\n return self._get_value(path, path)\n\n @property\n def nodes_sorted(self):\n path = \"nodes_sorted\"\n return self._get_value(path, path)\n\n @property\n def background_image(self):\n path = \"background_image\"\n return self._get_value(path, path)\n\n @property\n def background_color(self):\n path = \"background_color\"\n return self._get_value(path, path)\n\n @property\n def font(self):\n path = \"font\"\n return self._get_value(path, path)\n\n @property\n def hide_letters(self):\n path = \"hide_letters\"\n return self._get_value(path, path)\n\n @property\n def group_notes_by_track(self):\n path = \"group_notes_by_track\"\n return self._get_value(path, path)\n\n @property\n def width(self):\n path = \"width\"\n return self._get_value(path, path)\n\n @property\n def height(self):\n path = \"height\"\n return self._get_value(path, path)\n\n @property\n def show_lines(self):\n path = \"show_graph_lines\"\n return self._get_value(path, path)\n\n @property\n def graph_line_width(self):\n path = \"graph_line_width\"\n return self._get_value(path, path)\n\n @property\n def graph_line_blur(self):\n path = \"graph_line_blur\"\n return self._get_value(path, path)\n\n @property\n def graph_line_color(self):\n path = \"graph_line_color\"\n return self._get_value(path, path)\n\n @property\n def font_size(self):\n path = \"font_size\"\n return self._get_value(path, path)\n\n @property\n def node_outline_color(self):\n path = \"node.outline_color\"\n return self._get_value(path, path)\n\n @property\n def node_fill_color(self):\n path = \"node.fill_color\"\n return self._get_value(path, path)\n\n @property\n def node_text_color(self):\n path = \"node.text.color\"\n return self._get_value(path, path)\n\n @property\n def node_text_outline_color(self):\n path = \"node.text.stroke_color\"\n return self._get_value(path, path)\n\n @property\n def node_text_stroke_width(self):\n path = \"node.text.stroke_width\"\n return self._get_value(path, path)\n\n @property\n def dpi(self):\n path = \"dpi\"\n return self._get_value(path, path)\n\n @property\n def text_location_offsets(self):\n path = \"text_location_offsets\"\n return self._get_value(path, path)\n\n @property\n def node_shadow_color(self):\n path = \"node.shadow_color\"\n return self._get_value(path, path)\n\n @property\n def node_shadow_size(self):\n path = \"node.shadow_size\"\n return self._get_value(path, path) / 100\n\n @property\n def tracks(self):\n return list(self._theme.tracks.keys())\n\n def note_num_frames(self, track):\n a = self._get_value(\n f\"tracks.{track}.note.num_frames\",\n default_path=f\"tracks.default.note.num_frames\",\n )\n return a\n\n def note_color(self, track):\n return self._get_value(\n f\"tracks.{track}.note.color\",\n default_path=f\"tracks.default.note.color\",\n )\n\n def note_stroke_width(self, track):\n return self._get_value(\n f\"tracks.{track}.note.stroke_width\",\n default_path=f\"tracks.default.note.stroke_width\",\n )\n\n def note_increase_size(self, track):\n return (\n self._get_value(\n f\"tracks.{track}.note.increase_size\",\n default_path=f\"tracks.default.note.increase_size\",\n )\n / 100\n )\n\n def chord_line_width(self, track):\n return self._get_value(\n f\"tracks.{track}.chord_line.width\",\n default_path=f\"tracks.default.chord_line.width\",\n )\n\n def chord_line_border_color(self, track):\n return self._get_value(\n f\"tracks.{track}.chord_line.border_color\",\n default_path=f\"tracks.default.chord_line.border_color\",\n )\n\n def chord_line_color(self, track):\n return self._get_value(\n f\"tracks.{track}.chord_line.color\",\n default_path=f\"tracks.default.chord_line.color\",\n )\n\n def ball_radius(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.radius\",\n default_path=f\"tracks.default.ball.radius\",\n )\n\n def ball_g_blur_max(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.g_blur_max\",\n default_path=f\"tracks.default.ball.g_blur_max\",\n )\n\n def ball_color(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.color\",\n default_path=f\"tracks.default.ball.color\",\n )\n\n def ball_stroke_color(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.stroke_color\",\n default_path=f\"tracks.default.ball.stroke_color\",\n )\n\n def ball_stroke_width(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.stroke_width\",\n default_path=f\"tracks.default.ball.stroke_width\",\n )"
},
{
"identifier": "add_frame_to_video",
"path": "src/video_stuff.py",
"snippet": "def add_frame_to_video(writer, frame):\n writer.append_data(np.array(frame))"
},
{
"identifier": "finalize_video_with_music",
"path": "src/video_stuff.py",
"snippet": "def finalize_video_with_music(\n writer,\n video_file_path,\n output_file_name,\n midi_file_path,\n frame_rate,\n soundfont_file,\n frames_written,\n):\n writer.close() # Ensure the writer is closed\n\n # Audio processing\n temp_music_file = os.path.join(get_cache_dir(), \"temp_music.wav\")\n open(temp_music_file, \"ab\").close()\n click.echo(\"Converting midi to wave...\")\n convert_midi_to_wav(\n midi_file_path,\n temp_music_file,\n soundfont_file,\n )\n audio_clip = AudioSegment.from_file(temp_music_file)\n\n audio_duration = int(\n (frames_written / frame_rate) * 1000\n ) # Duration in milliseconds\n audio_clip = audio_clip[:audio_duration] # Truncate the audio\n\n temp_audio = f\"{get_cache_dir()}/music.wav\"\n audio_clip.export(temp_audio, format=\"wav\")\n\n final_video = VideoFileClip(video_file_path)\n final_video_audio = AudioFileClip(temp_audio)\n final_video = final_video.set_audio(final_video_audio)\n\n timestamp = int(time.time())\n final_output_path = f\"{output_file_name}_{timestamp}.mp4\"\n final_video.write_videofile(final_output_path, codec=\"libx264\", audio_codec=\"aac\")\n\n cleanup_cache_dir(get_cache_dir())\n\n return final_output_path"
},
{
"identifier": "initialize_video_writer",
"path": "src/video_stuff.py",
"snippet": "@contextmanager\ndef initialize_video_writer(frame_rate):\n video_file_path = f\"{get_cache_dir()}/video.mp4\"\n writer = imageio.get_writer(video_file_path, fps=frame_rate)\n try:\n yield writer, video_file_path\n finally:\n writer.close()"
}
] | import os
import click
import psutil
from graphviz import Graph
from hurry.filesize import size
from concurrent.futures import ThreadPoolExecutor, as_completed
from src.animation_stuff import AnimationFrames
from src.cache_stuff import (
cleanup_cache_dir,
get_cache_dir,
)
from src.graph_stuff import (
animate_bezier_point,
animate_ellipsis_blur,
draw_fading_bezier_curve,
parse_graph,
get_node_positions,
)
from src.midi_stuff import (
get_note_start_times_in_frames,
TRACK_NOTE_DELIMITER,
)
from src.theme_stuff import Theme
from src.video_stuff import (
add_frame_to_video,
finalize_video_with_music,
initialize_video_writer,
) | 8,024 | # For each individual chord, draw the lines
for frame_len, all_notes in notes_in_cords.items():
# The chord lines shoudl not overlap, so sort them according to sort order
if theme.nodes_sorted:
if isinstance(theme.nodes_sorted, bool):
all_notes = sorted(
all_notes,
key=lambda i: int(i.split(TRACK_NOTE_DELIMITER)[1]),
)
else:
all_notes = filter_and_order_custom(
theme.nodes_sorted, all_notes
)
# Use `overlapping_pairs` to make the notes connect as a circle
pairs = overlapping_pairs(all_notes)
for a, b in pairs:
frames = []
for i in range(frame_len):
if b not in edges[a]:
continue
frames.append(
[
draw_fading_bezier_curve,
{
"track": track,
"points": edges[a][b].b_points,
"frame_number": i,
"animation_len": frame_len,
},
]
)
FRAMES.add_frames_to_layer(
f"l1-{track}-{a}-{b}-line", curr_frame, frames
)
curr_notes = [curr_note_tuple[0] for curr_note_tuple in curr_note_tuples]
# Animate the "next note" balls
if prev_notes:
animation_length_in_frames = curr_frame - prev_notes_frame
drawn_to = set()
source_usage = {note: 0 for note in prev_notes}
# New Rule: Check if there are more destinations than sources to determine max usage
max_usage = 2 if len(curr_notes) > len(prev_notes) else 1
if animation_length_in_frames / theme.frame_rate <= 10:
for a in prev_notes:
for b in curr_notes:
if (
b in drawn_to
or (a == b and not theme.allow_self_notes(track))
or source_usage[a] >= max_usage
or b not in edges[a]
):
continue
frames = []
for i in range(animation_length_in_frames):
frame = [
animate_bezier_point,
{
"track": track,
"points": edges[a][b].b_points,
"frame_number": i,
"animation_length_in_frames": animation_length_in_frames,
},
]
frames.append(frame)
FRAMES.add_frames_to_layer(
f"l3-{track}-{a}-{b}-balls", prev_notes_frame, frames
)
drawn_to.add(b)
source_usage[a] += 1
prev_notes = curr_notes
prev_notes_frame = curr_frame
num_frames = len(FRAMES)
if theme.debug_max_frames:
num_frames = theme.debug_max_frames
writer_context = initialize_video_writer(theme.frame_rate)
frames_written = 0
click.echo("\nDrawing frames, writing videos...")
NUM_WORKERS = os.cpu_count()
with writer_context as (writer, video_file_path):
while frames_written < num_frames:
with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
future_to_frame = {
executor.submit(
process_frame,
current_frame=i,
base_image=base_image.copy(),
theme=theme,
offsets=offsets,
FRAMES=FRAMES,
): i
for i in range(
frames_written, min(frames_written + NUM_WORKERS, num_frames)
)
}
results = []
for future in as_completed(future_to_frame):
frame_index = future_to_frame[future]
frame_image = future.result()
results.append((frame_index, frame_image))
for frame_index, frame_image in sorted(results, key=lambda x: x[0]):
add_frame_to_video(writer, frame_image)
frames_written += 1
usage = size(psutil.Process().memory_info().rss)
click.echo(
f"\rProcessed {frames_written} of {num_frames}... (memory usage={usage})",
nl=False,
)
|
def midi_note_to_pitch_class(midi_note):
_, note = midi_note.split(TRACK_NOTE_DELIMITER)
midi_note = int(note)
note_names = ["C", "Db", "D", "Eb", "E", "F", "F#", "G", "Ab", "A", "Bb", "B"]
return note_names[(midi_note - 1) % 12]
def overlapping_pairs(lst):
return list(zip(lst, lst[1:])) + [(lst[-1], lst[0])] if len(lst) > 1 else []
def create_graphviz_default_sort(theme, track_events_frames):
"""Create a Graphviz without a specified order"""
song_graph = Graph(
"G",
engine=theme.graphviz_engine,
format="xdot",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
# Create Nodes
for note in notes:
n = note[0]
song_graph.node(n, label=midi_note_to_pitch_class(n))
# Create Edges
melody_pairs = overlapping_pairs(notes)
for a_notes, b_notes in melody_pairs:
for a in a_notes:
for b in b_notes:
song_graph.node(b, label=midi_note_to_pitch_class(b))
song_graph.edge(a, b)
return song_graph
def filter_and_order_custom(reference_list, input_list):
# Extract the numbers from the input strings and convert them to integers
input_numbers = [int(item.split(TRACK_NOTE_DELIMITER)[1]) for item in input_list]
# Create a mapping of number to original string for reconstruction later
number_to_string = dict(zip(input_numbers, input_list))
# Filter and order the input list based on the reference list
ordered_list = [
number_to_string[item] for item in reference_list if item in number_to_string
]
return ordered_list
def create_graphviz_sorted(theme, track_events_frames):
"""
This function implements a hack to force Graphviz node ordering.
Step 1: Create a bare-bones CIRCO graph with nodes added in order
Step 2: Save that graph to a file, and extract its node positions
Step 3: Generate the final NEATO graph, using hard coded node positions
"""
if theme.graphviz_engine.lower() != "circo":
click.echo(
"ERROR: Node sorting only works when graphviz engine is circo", err=True
)
cleanup_cache_dir(get_cache_dir())
exit(1)
song_graph = Graph(
"G",
engine=theme.graphviz_engine,
format="plain",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
all_notes = {}
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
for note in notes:
all_notes[note[0]] = True
# Create Nodes - In order
prev_note = None
all_notes = list(all_notes.keys())
if theme.nodes_sorted:
if isinstance(theme.nodes_sorted, bool):
all_notes = sorted(
all_notes, key=lambda i: int(i.split(TRACK_NOTE_DELIMITER)[1])
)
else:
all_notes = filter_and_order_custom(theme.nodes_sorted, all_notes)
for n in all_notes + [all_notes[0]]: # tack on the first to make a circle
song_graph.node(n, label=midi_note_to_pitch_class(n))
if prev_note:
song_graph.edge(n, prev_note)
prev_note = n
node_positions = get_node_positions(song_graph)
song_graph = Graph(
"G",
engine="neato",
format="xdot",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
# Create Nodes
for note in notes:
n = note[0]
song_graph.node(
n,
label=midi_note_to_pitch_class(n),
_attributes={"pos": node_positions[n]},
)
# Create Edges
melody_pairs = overlapping_pairs(notes)
for a_notes, b_notes in melody_pairs:
for a in a_notes:
for b in b_notes:
song_graph.node(b, label=midi_note_to_pitch_class(b))
song_graph.edge(a, b)
return song_graph
def create_graphviz(theme, track_events_frames):
if theme.nodes_sorted:
return create_graphviz_sorted(theme, track_events_frames)
return create_graphviz_default_sort(theme, track_events_frames)
def process_frame(current_frame, base_image, theme, offsets, FRAMES):
frame_result = base_image.copy()
for layer, layer_images in sorted(FRAMES.items()):
frame = layer_images[current_frame]
if frame:
draw_function, args = frame
frame_result = draw_function(
base_image=frame_result, # Use frame_result instead of base_image
theme=theme,
offsets=offsets,
**args,
)
return frame_result
def generate_music_graph(
midi_file_path,
default_theme_file_path,
theme_file_path,
output_path,
soundfont_file,
):
theme = Theme(theme_file_path, default_theme_file_path)
track_events_frames = get_note_start_times_in_frames(
midi_file_path,
theme.frame_rate,
squash_tracks=theme.squash_tracks,
group_notes_by_track=theme.group_notes_by_track,
)
song_graph = create_graphviz(theme, track_events_frames)
base_image, nodes, edges, offsets = parse_graph(song_graph, theme)
if theme.debug_show_base_image:
base_image.show()
cleanup_cache_dir(get_cache_dir())
exit()
FRAMES = AnimationFrames()
click.echo("Planning out frames...", nl=False)
for track in track_events_frames.keys():
if theme.skip_track(track):
continue
curr_track = track_events_frames[track]
curr_frame = min(curr_track) - 1
prev_notes = None
prev_notes_frame = None
num_notes_processed = 0
click.echo() # NL
max_notes = len(track_events_frames[track])
while num_notes_processed < max_notes and curr_frame <= max(curr_track):
curr_frame += 1
if curr_frame not in curr_track:
continue
usage = size(psutil.Process().memory_info().rss)
click.echo(
f"\r[{track}] Processing {num_notes_processed + 1} of {max_notes} notes... (memory usage={usage})",
nl=False,
)
num_notes_processed += 1
curr_note_tuples = curr_track[curr_frame]
# Animate the Node pulses
for (
current_note,
curr_note_velocity,
curr_note_frame_len,
) in curr_note_tuples:
frames = []
for i in range(curr_note_frame_len):
frame = [
animate_ellipsis_blur,
{
"track": track,
"points": nodes[current_note].e_points,
"frame_number": i,
"animation_len": curr_note_frame_len,
"velocity": curr_note_velocity,
},
]
frames.append(frame)
FRAMES.add_frames_to_layer(
f"l2-{track}-{current_note}", curr_frame, frames
)
if theme.pulses_only:
continue
# Animate the Chord Lines
if len(curr_note_tuples) > 1:
# Split notes in chord up by the frame length, cause multiple chords might be playing
notes_in_cords = {}
for note, velocity, frame_len in curr_note_tuples:
if frame_len not in notes_in_cords:
notes_in_cords[frame_len] = []
notes_in_cords[frame_len].append(note)
# For each individual chord, draw the lines
for frame_len, all_notes in notes_in_cords.items():
# The chord lines shoudl not overlap, so sort them according to sort order
if theme.nodes_sorted:
if isinstance(theme.nodes_sorted, bool):
all_notes = sorted(
all_notes,
key=lambda i: int(i.split(TRACK_NOTE_DELIMITER)[1]),
)
else:
all_notes = filter_and_order_custom(
theme.nodes_sorted, all_notes
)
# Use `overlapping_pairs` to make the notes connect as a circle
pairs = overlapping_pairs(all_notes)
for a, b in pairs:
frames = []
for i in range(frame_len):
if b not in edges[a]:
continue
frames.append(
[
draw_fading_bezier_curve,
{
"track": track,
"points": edges[a][b].b_points,
"frame_number": i,
"animation_len": frame_len,
},
]
)
FRAMES.add_frames_to_layer(
f"l1-{track}-{a}-{b}-line", curr_frame, frames
)
curr_notes = [curr_note_tuple[0] for curr_note_tuple in curr_note_tuples]
# Animate the "next note" balls
if prev_notes:
animation_length_in_frames = curr_frame - prev_notes_frame
drawn_to = set()
source_usage = {note: 0 for note in prev_notes}
# New Rule: Check if there are more destinations than sources to determine max usage
max_usage = 2 if len(curr_notes) > len(prev_notes) else 1
if animation_length_in_frames / theme.frame_rate <= 10:
for a in prev_notes:
for b in curr_notes:
if (
b in drawn_to
or (a == b and not theme.allow_self_notes(track))
or source_usage[a] >= max_usage
or b not in edges[a]
):
continue
frames = []
for i in range(animation_length_in_frames):
frame = [
animate_bezier_point,
{
"track": track,
"points": edges[a][b].b_points,
"frame_number": i,
"animation_length_in_frames": animation_length_in_frames,
},
]
frames.append(frame)
FRAMES.add_frames_to_layer(
f"l3-{track}-{a}-{b}-balls", prev_notes_frame, frames
)
drawn_to.add(b)
source_usage[a] += 1
prev_notes = curr_notes
prev_notes_frame = curr_frame
num_frames = len(FRAMES)
if theme.debug_max_frames:
num_frames = theme.debug_max_frames
writer_context = initialize_video_writer(theme.frame_rate)
frames_written = 0
click.echo("\nDrawing frames, writing videos...")
NUM_WORKERS = os.cpu_count()
with writer_context as (writer, video_file_path):
while frames_written < num_frames:
with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
future_to_frame = {
executor.submit(
process_frame,
current_frame=i,
base_image=base_image.copy(),
theme=theme,
offsets=offsets,
FRAMES=FRAMES,
): i
for i in range(
frames_written, min(frames_written + NUM_WORKERS, num_frames)
)
}
results = []
for future in as_completed(future_to_frame):
frame_index = future_to_frame[future]
frame_image = future.result()
results.append((frame_index, frame_image))
for frame_index, frame_image in sorted(results, key=lambda x: x[0]):
add_frame_to_video(writer, frame_image)
frames_written += 1
usage = size(psutil.Process().memory_info().rss)
click.echo(
f"\rProcessed {frames_written} of {num_frames}... (memory usage={usage})",
nl=False,
)
| finalize_video_with_music( | 12 | 2023-11-17 17:56:04+00:00 | 12k |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.